text
stringlengths 4
288k
| id
stringlengths 17
110
| metadata
dict | __index_level_0__
int64 0
47
|
---|---|---|---|
import datetime
import re
import sys
from decimal import Decimal
from pip._vendor.toml.decoder import InlineTableDict
if sys.version_info >= (3,):
unicode = str
def dump(o, f, encoder=None):
"""Writes out dict as toml to a file
Args:
o: Object to dump into toml
f: File descriptor where the toml should be stored
encoder: The ``TomlEncoder`` to use for constructing the output string
Returns:
String containing the toml corresponding to dictionary
Raises:
TypeError: When anything other than file descriptor is passed
"""
if not f.write:
raise TypeError("You can only dump an object to a file descriptor")
d = dumps(o, encoder=encoder)
f.write(d)
return d
def dumps(o, encoder=None):
"""Stringifies input dict as toml
Args:
o: Object to dump into toml
encoder: The ``TomlEncoder`` to use for constructing the output string
Returns:
String containing the toml corresponding to dict
Examples:
```python
>>> import toml
>>> output = {
... 'a': "I'm a string",
... 'b': ["I'm", "a", "list"],
... 'c': 2400
... }
>>> toml.dumps(output)
'a = "I\'m a string"\nb = [ "I\'m", "a", "list",]\nc = 2400\n'
```
"""
retval = ""
if encoder is None:
encoder = TomlEncoder(o.__class__)
addtoretval, sections = encoder.dump_sections(o, "")
retval += addtoretval
outer_objs = [id(o)]
while sections:
section_ids = [id(section) for section in sections]
for outer_obj in outer_objs:
if outer_obj in section_ids:
raise ValueError("Circular reference detected")
outer_objs += section_ids
newsections = encoder.get_empty_table()
for section in sections:
addtoretval, addtosections = encoder.dump_sections(
sections[section], section)
if addtoretval or (not addtoretval and not addtosections):
if retval and retval[-2:] != "\n\n":
retval += "\n"
retval += "[" + section + "]\n"
if addtoretval:
retval += addtoretval
for s in addtosections:
newsections[section + "." + s] = addtosections[s]
sections = newsections
return retval
def _dump_str(v):
if sys.version_info < (3,) and hasattr(v, 'decode') and isinstance(v, str):
v = v.decode('utf-8')
v = "%r" % v
if v[0] == 'u':
v = v[1:]
singlequote = v.startswith("'")
if singlequote or v.startswith('"'):
v = v[1:-1]
if singlequote:
v = v.replace("\\'", "'")
v = v.replace('"', '\\"')
v = v.split("\\x")
while len(v) > 1:
i = -1
if not v[0]:
v = v[1:]
v[0] = v[0].replace("\\\\", "\\")
# No, I don't know why != works and == breaks
joinx = v[0][i] != "\\"
while v[0][:i] and v[0][i] == "\\":
joinx = not joinx
i -= 1
if joinx:
joiner = "x"
else:
joiner = "u00"
v = [v[0] + joiner + v[1]] + v[2:]
return unicode('"' + v[0] + '"')
def _dump_float(v):
return "{}".format(v).replace("e+0", "e+").replace("e-0", "e-")
def _dump_time(v):
utcoffset = v.utcoffset()
if utcoffset is None:
return v.isoformat()
# The TOML norm specifies that it's local time thus we drop the offset
return v.isoformat()[:-6]
class TomlEncoder(object):
def __init__(self, _dict=dict, preserve=False):
self._dict = _dict
self.preserve = preserve
self.dump_funcs = {
str: _dump_str,
unicode: _dump_str,
list: self.dump_list,
bool: lambda v: unicode(v).lower(),
int: lambda v: v,
float: _dump_float,
Decimal: _dump_float,
datetime.datetime: lambda v: v.isoformat().replace('+00:00', 'Z'),
datetime.time: _dump_time,
datetime.date: lambda v: v.isoformat()
}
def get_empty_table(self):
return self._dict()
def dump_list(self, v):
retval = "["
for u in v:
retval += " " + unicode(self.dump_value(u)) + ","
retval += "]"
return retval
def dump_inline_table(self, section):
"""Preserve inline table in its compact syntax instead of expanding
into subsection.
https://github.com/toml-lang/toml#user-content-inline-table
"""
retval = ""
if isinstance(section, dict):
val_list = []
for k, v in section.items():
val = self.dump_inline_table(v)
val_list.append(k + " = " + val)
retval += "{ " + ", ".join(val_list) + " }\n"
return retval
else:
return unicode(self.dump_value(section))
def dump_value(self, v):
# Lookup function corresponding to v's type
dump_fn = self.dump_funcs.get(type(v))
if dump_fn is None and hasattr(v, '__iter__'):
dump_fn = self.dump_funcs[list]
# Evaluate function (if it exists) else return v
return dump_fn(v) if dump_fn is not None else self.dump_funcs[str](v)
def dump_sections(self, o, sup):
retstr = ""
if sup != "" and sup[-1] != ".":
sup += '.'
retdict = self._dict()
arraystr = ""
for section in o:
section = unicode(section)
qsection = section
if not re.match(r'^[A-Za-z0-9_-]+$', section):
qsection = _dump_str(section)
if not isinstance(o[section], dict):
arrayoftables = False
if isinstance(o[section], list):
for a in o[section]:
if isinstance(a, dict):
arrayoftables = True
if arrayoftables:
for a in o[section]:
arraytabstr = "\n"
arraystr += "[[" + sup + qsection + "]]\n"
s, d = self.dump_sections(a, sup + qsection)
if s:
if s[0] == "[":
arraytabstr += s
else:
arraystr += s
while d:
newd = self._dict()
for dsec in d:
s1, d1 = self.dump_sections(d[dsec], sup +
qsection + "." +
dsec)
if s1:
arraytabstr += ("[" + sup + qsection +
"." + dsec + "]\n")
arraytabstr += s1
for s1 in d1:
newd[dsec + "." + s1] = d1[s1]
d = newd
arraystr += arraytabstr
else:
if o[section] is not None:
retstr += (qsection + " = " +
unicode(self.dump_value(o[section])) + '\n')
elif self.preserve and isinstance(o[section], InlineTableDict):
retstr += (qsection + " = " +
self.dump_inline_table(o[section]))
else:
retdict[qsection] = o[section]
retstr += arraystr
return (retstr, retdict)
class TomlPreserveInlineDictEncoder(TomlEncoder):
def __init__(self, _dict=dict):
super(TomlPreserveInlineDictEncoder, self).__init__(_dict, True)
class TomlArraySeparatorEncoder(TomlEncoder):
def __init__(self, _dict=dict, preserve=False, separator=","):
super(TomlArraySeparatorEncoder, self).__init__(_dict, preserve)
if separator.strip() == "":
separator = "," + separator
elif separator.strip(' \t\n\r,'):
raise ValueError("Invalid separator for arrays")
self.separator = separator
def dump_list(self, v):
t = []
retval = "["
for u in v:
t.append(self.dump_value(u))
while t != []:
s = []
for u in t:
if isinstance(u, list):
for r in u:
s.append(r)
else:
retval += " " + unicode(u) + self.separator
t = s
retval += "]"
return retval
class TomlNumpyEncoder(TomlEncoder):
def __init__(self, _dict=dict, preserve=False):
import numpy as np
super(TomlNumpyEncoder, self).__init__(_dict, preserve)
self.dump_funcs[np.float16] = _dump_float
self.dump_funcs[np.float32] = _dump_float
self.dump_funcs[np.float64] = _dump_float
self.dump_funcs[np.int16] = self._dump_int
self.dump_funcs[np.int32] = self._dump_int
self.dump_funcs[np.int64] = self._dump_int
def _dump_int(self, v):
return "{}".format(int(v))
class TomlPreserveCommentEncoder(TomlEncoder):
def __init__(self, _dict=dict, preserve=False):
from pip._vendor.toml.decoder import CommentValue
super(TomlPreserveCommentEncoder, self).__init__(_dict, preserve)
self.dump_funcs[CommentValue] = lambda v: v.dump(self.dump_value)
class TomlPathlibEncoder(TomlEncoder):
def _dump_pathlib_path(self, v):
return _dump_str(str(v))
def dump_value(self, v):
if (3, 4) <= sys.version_info:
import pathlib
if isinstance(v, pathlib.PurePath):
v = str(v)
return super(TomlPathlibEncoder, self).dump_value(v)
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/toml/encoder.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/toml/encoder.py",
"repo_id": "Django-locallibrary",
"token_count": 5250
}
| 25 |
from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .packages.six.moves import queue
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection,
HTTPSConnection,
VerifiedHTTPSConnection,
HTTPException,
BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import (
get_host,
parse_url,
Url,
_normalize_host as normalize_host,
_encode_target,
)
from .util.queue import LifoQueue
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
.. note::
ConnectionPool.urlopen() does not normalize or percent-encode target URIs
which is useful if your target server doesn't support percent-encoded
target URIs.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _normalize_host(host, scheme=self.scheme)
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = "http"
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
**conn_kw
):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
**self.conn_kw
)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
)
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, "auto_open", 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning("Connection pool is full, discarding connection: %s", self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if "timed out" in str(err) or "did not complete (read)" in str(
err
): # Python < 2.7.4
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
def _make_request(
self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, "sock", None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout
)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try:
# Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError:
# Python 3
try:
httplib_response = conn.getresponse()
except BaseException as e:
# Remove the TypeError from the exception chain in
# Python 3 (including for exceptions like SystemExit).
# Otherwise it looks like a bug in the code.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
http_version,
httplib_response.status,
httplib_response.length,
)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
"Failed to parse headers (url=%s): %s",
self._absolute_url(url),
hpe,
exc_info=True,
)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith("/"):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if host is not None:
host = _normalize_host(host, scheme=scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=None,
redirect=True,
assert_same_host=True,
timeout=_Default,
pool_timeout=None,
release_conn=None,
chunked=False,
body_pos=None,
**response_kw
):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get("preload_content", True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
# Ensure that the URL we're connecting to is properly encoded
if url.startswith("/"):
url = six.ensure_str(_encode_target(url))
else:
url = six.ensure_str(parse_url(url).url)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/urllib3/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == "http":
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(
conn, "sock", None
)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(
conn,
method,
url,
timeout=timeout_obj,
body=body,
headers=headers,
chunked=chunked,
)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw["request_method"] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(
httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw
)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError("Cannot connect to proxy.", e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError("Connection aborted.", e)
retries = retries.increment(
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
)
return self.urlopen(
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = "GET"
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
response.drain_conn()
raise
return response
response.drain_conn()
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = "https"
ConnectionCls = HTTPSConnection
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
key_file=None,
cert_file=None,
cert_reqs=None,
key_password=None,
ca_certs=None,
ssl_version=None,
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
**conn_kw
):
HTTPConnectionPool.__init__(
self,
host,
port,
strict,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(
key_file=self.key_file,
key_password=self.key_password,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTPS connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "443",
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError(
"Can't connect to HTTPS URL because the SSL module is not available."
)
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
**self.conn_kw
)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn(
(
"Unverified HTTPS request is being made to host '%s'. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
"#ssl-warnings" % conn.host
),
InsecureRequestWarning,
)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == "https":
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _normalize_host(host, scheme):
"""
Normalize hosts for comparisons and use with sockets.
"""
host = normalize_host(host, scheme)
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
if host.startswith("[") and host.endswith("]"):
host = host[1:-1]
return host
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/connectionpool.py",
"repo_id": "Django-locallibrary",
"token_count": 15827
}
| 26 |
"""
This module provides a pool manager that uses Google App Engine's
`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Example usage::
from pip._vendor.urllib3 import PoolManager
from pip._vendor.urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
if is_appengine_sandbox():
# AppEngineManager uses AppEngine's URLFetch API behind the scenes
http = AppEngineManager()
else:
# PoolManager uses a socket-level API behind the scenes
http = PoolManager()
r = http.request('GET', 'https://google.com/')
There are `limitations <https://cloud.google.com/appengine/docs/python/\
urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
the best choice for your application. There are three options for using
urllib3 on Google App Engine:
1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
cost-effective in many circumstances as long as your usage is within the
limitations.
2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
Sockets also have `limitations and restrictions
<https://cloud.google.com/appengine/docs/python/sockets/\
#limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
To use sockets, be sure to specify the following in your ``app.yaml``::
env_variables:
GAE_USE_SOCKETS_HTTPLIB : 'true'
3. If you are using `App Engine Flexible
<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
:class:`PoolManager` without any configuration or special environment variables.
"""
from __future__ import absolute_import
import io
import logging
import warnings
from ..packages.six.moves.urllib.parse import urljoin
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError,
)
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
from . import _appengine_environ
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation `here
<https://cloud.google.com/appengine/docs/python/urlfetch>`_.
Notably it will raise an :class:`AppEnginePlatformError` if:
* URLFetch is not available.
* If you attempt to use this on App Engine Flexible, as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(
self,
headers=None,
retries=None,
validate_certificate=True,
urlfetch_retries=True,
):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment."
)
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
AppEnginePlatformWarning,
)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.urlfetch_retries = urlfetch_retries
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=None,
redirect=True,
timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw
):
retries = self._get_retries(retries, redirect)
try:
follow_redirects = redirect and retries.redirect != 0 and retries.total
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=self.urlfetch_retries and follow_redirects,
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if "too large" in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.",
e,
)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if "Too many redirects" in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.",
e,
)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e
)
http_response = self._urlfetch_response_to_http_response(
response, retries=retries, **response_kw
)
# Handle redirect?
redirect_location = redirect and http_response.get_redirect_location()
if redirect_location:
# Check for redirect response
if self.urlfetch_retries and retries.raise_on_redirect:
raise MaxRetryError(self, url, "too many redirects")
else:
if http_response.status == 303:
method = "GET"
try:
retries = retries.increment(
method, url, response=http_response, _pool=self
)
except MaxRetryError:
if retries.raise_on_redirect:
raise MaxRetryError(self, url, "too many redirects")
return http_response
retries.sleep_for_retry(http_response)
log.debug("Redirecting %s -> %s", url, redirect_location)
redirect_url = urljoin(url, redirect_location)
return self.urlopen(
method,
redirect_url,
body,
headers,
retries=retries,
redirect=redirect,
timeout=timeout,
**response_kw
)
# Check if we should retry the HTTP response.
has_retry_after = bool(http_response.getheader("Retry-After"))
if retries.is_retry(method, http_response.status, has_retry_after):
retries = retries.increment(method, url, response=http_response, _pool=self)
log.debug("Retry: %s", url)
retries.sleep(http_response)
return self.urlopen(
method,
url,
body=body,
headers=headers,
retries=retries,
redirect=redirect,
timeout=timeout,
**response_kw
)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get("content-encoding")
if content_encoding == "deflate":
del urlfetch_resp.headers["content-encoding"]
transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == "chunked":
encodings = transfer_encoding.split(",")
encodings.remove("chunked")
urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
original_response = HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=io.BytesIO(urlfetch_resp.content),
msg=urlfetch_resp.header_msg,
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
return HTTPResponse(
body=io.BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
original_response=original_response,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return None # Defer to URLFetch's default.
if isinstance(timeout, Timeout):
if timeout._read is not None or timeout._connect is not None:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total or default URLFetch timeout.",
AppEnginePlatformWarning,
)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning,
)
return retries
# Alias methods from _appengine_environ to maintain public API interface.
is_appengine = _appengine_environ.is_appengine
is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
is_local_appengine = _appengine_environ.is_local_appengine
is_prod_appengine = _appengine_environ.is_prod_appengine
is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/contrib/appengine.py",
"repo_id": "Django-locallibrary",
"token_count": 4886
}
| 27 |
import sys
try:
# Our match_hostname function is the same as 3.5's, so we only want to
# import the match_hostname function if it's at least that good.
if sys.version_info < (3, 5):
raise ImportError("Fallback to vendored code")
from ssl import CertificateError, match_hostname
except ImportError:
try:
# Backport of the function from a pypi module
from backports.ssl_match_hostname import CertificateError, match_hostname
except ImportError:
# Our vendored copy
from ._implementation import CertificateError, match_hostname
# Not needed, but documenting what we provide.
__all__ = ("CertificateError", "match_hostname")
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/packages/ssl_match_hostname/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 228
}
| 28 |
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
# Disabled for now due to: #2228, #2230
import setuptools.distutils_patch # noqa: F401
import distutils.core
import distutils.filelist
import re
from distutils.errors import DistutilsOptionError
from distutils.util import convert_path
from fnmatch import fnmatchcase
from ._deprecation_warning import SetuptoolsDeprecationWarning
from setuptools.extern.six import PY3, string_types
from setuptools.extern.six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution
from setuptools.depends import Require
from . import monkey
__metaclass__ = type
__all__ = [
'setup', 'Distribution', 'Command', 'Extension', 'Require',
'SetuptoolsDeprecationWarning',
'find_packages'
]
if PY3:
__all__.append('find_namespace_packages')
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
if PY3:
find_namespace_packages = PEP420PackageFinder.find
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
# our PEP 517 backend patch `distutils.core.Distribution`.
class MinimalDistribution(distutils.core.Distribution):
"""
A minimal version of a distribution for supporting the
fetch_build_eggs interface.
"""
def __init__(self, attrs):
_incl = 'dependency_links', 'setup_requires'
filtered = {
k: attrs[k]
for k in set(_incl) & set(attrs)
}
distutils.core.Distribution.__init__(self, filtered)
def finalize_options(self):
"""
Disable finalize_options to avoid building the working set.
Ref #2158.
"""
dist = MinimalDistribution(attrs)
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
dist.fetch_build_eggs(dist.setup_requires)
def setup(**attrs):
# Make sure we have any requirements needed to interpret 'attrs'.
_install_setup_requires(attrs)
return distutils.core.setup(**attrs)
setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, string_types):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, string_types):
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if isinstance(val, list):
ok = all(isinstance(v, string_types) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)"
% (option, val))
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
class sic(str):
"""Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
# Apply monkey patches
monkey.patch_all()
|
Django-locallibrary/env/Lib/site-packages/setuptools/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 3304
}
| 29 |
"""distutils
The main package for the Python Module Distribution Utilities. Normally
used from a setup script as
from distutils.core import setup
setup (...)
"""
import sys
__version__ = sys.version[:sys.version.index(' ')]
local = True
|
Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 71
}
| 30 |
"""distutils.ccompiler
Contains CCompiler, an abstract base class that defines the interface
for the Distutils compiler abstraction model."""
import sys, os, re
from distutils.errors import *
from distutils.spawn import spawn
from distutils.file_util import move_file
from distutils.dir_util import mkpath
from distutils.dep_util import newer_group
from distutils.util import split_quoted, execute
from distutils import log
class CCompiler:
"""Abstract base class to define the interface that must be implemented
by real compiler classes. Also has some utility methods used by
several compiler classes.
The basic idea behind a compiler abstraction class is that each
instance can be used for all the compile/link steps in building a
single project. Thus, attributes common to all of those compile and
link steps -- include directories, macros to define, libraries to link
against, etc. -- are attributes of the compiler instance. To allow for
variability in how individual files are treated, most of those
attributes may be varied on a per-compilation or per-link basis.
"""
# 'compiler_type' is a class attribute that identifies this class. It
# keeps code that wants to know what kind of compiler it's dealing with
# from having to import all possible compiler classes just to do an
# 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
# should really, really be one of the keys of the 'compiler_class'
# dictionary (see below -- used by the 'new_compiler()' factory
# function) -- authors of new compiler interface classes are
# responsible for updating 'compiler_class'!
compiler_type = None
# XXX things not handled by this compiler abstraction model:
# * client can't provide additional options for a compiler,
# e.g. warning, optimization, debugging flags. Perhaps this
# should be the domain of concrete compiler abstraction classes
# (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
# class should have methods for the common ones.
# * can't completely override the include or library searchg
# path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
# I'm not sure how widely supported this is even by Unix
# compilers, much less on other platforms. And I'm even less
# sure how useful it is; maybe for cross-compiling, but
# support for that is a ways off. (And anyways, cross
# compilers probably have a dedicated binary with the
# right paths compiled in. I hope.)
# * can't do really freaky things with the library list/library
# dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
# different versions of libfoo.a in different locations. I
# think this is useless without the ability to null out the
# library search path anyways.
# Subclasses that rely on the standard filename generation methods
# implemented below should override these; see the comment near
# those methods ('object_filenames()' et. al.) for details:
src_extensions = None # list of strings
obj_extension = None # string
static_lib_extension = None
shared_lib_extension = None # string
static_lib_format = None # format string
shared_lib_format = None # prob. same as static_lib_format
exe_extension = None # string
# Default language settings. language_map is used to detect a source
# file or Extension target language, checking source filenames.
# language_order is used to detect the language precedence, when deciding
# what language to use when mixing source types. For example, if some
# extension has two files with ".c" extension, and one with ".cpp", it
# is still linked as c++.
language_map = {".c" : "c",
".cc" : "c++",
".cpp" : "c++",
".cxx" : "c++",
".m" : "objc",
}
language_order = ["c++", "objc", "c"]
def __init__(self, verbose=0, dry_run=0, force=0):
self.dry_run = dry_run
self.force = force
self.verbose = verbose
# 'output_dir': a common output directory for object, library,
# shared object, and shared library files
self.output_dir = None
# 'macros': a list of macro definitions (or undefinitions). A
# macro definition is a 2-tuple (name, value), where the value is
# either a string or None (no explicit value). A macro
# undefinition is a 1-tuple (name,).
self.macros = []
# 'include_dirs': a list of directories to search for include files
self.include_dirs = []
# 'libraries': a list of libraries to include in any link
# (library names, not filenames: eg. "foo" not "libfoo.a")
self.libraries = []
# 'library_dirs': a list of directories to search for libraries
self.library_dirs = []
# 'runtime_library_dirs': a list of directories to search for
# shared libraries/objects at runtime
self.runtime_library_dirs = []
# 'objects': a list of object files (or similar, such as explicitly
# named library files) to include on any link
self.objects = []
for key in self.executables.keys():
self.set_executable(key, self.executables[key])
def set_executables(self, **kwargs):
"""Define the executables (and options for them) that will be run
to perform the various stages of compilation. The exact set of
executables that may be specified here depends on the compiler
class (via the 'executables' class attribute), but most will have:
compiler the C/C++ compiler
linker_so linker used to create shared objects and libraries
linker_exe linker used to create binary executables
archiver static library creator
On platforms with a command-line (Unix, DOS/Windows), each of these
is a string that will be split into executable name and (optional)
list of arguments. (Splitting the string is done similarly to how
Unix shells operate: words are delimited by spaces, but quotes and
backslashes can override this. See
'distutils.util.split_quoted()'.)
"""
# Note that some CCompiler implementation classes will define class
# attributes 'cpp', 'cc', etc. with hard-coded executable names;
# this is appropriate when a compiler class is for exactly one
# compiler/OS combination (eg. MSVCCompiler). Other compiler
# classes (UnixCCompiler, in particular) are driven by information
# discovered at run-time, since there are many different ways to do
# basically the same things with Unix C compilers.
for key in kwargs:
if key not in self.executables:
raise ValueError("unknown executable '%s' for class %s" %
(key, self.__class__.__name__))
self.set_executable(key, kwargs[key])
def set_executable(self, key, value):
if isinstance(value, str):
setattr(self, key, split_quoted(value))
else:
setattr(self, key, value)
def _find_macro(self, name):
i = 0
for defn in self.macros:
if defn[0] == name:
return i
i += 1
return None
def _check_macro_definitions(self, definitions):
"""Ensures that every element of 'definitions' is a valid macro
definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
nothing if all definitions are OK, raise TypeError otherwise.
"""
for defn in definitions:
if not (isinstance(defn, tuple) and
(len(defn) in (1, 2) and
(isinstance (defn[1], str) or defn[1] is None)) and
isinstance (defn[0], str)):
raise TypeError(("invalid macro definition '%s': " % defn) + \
"must be tuple (string,), (string, string), or " + \
"(string, None)")
# -- Bookkeeping methods -------------------------------------------
def define_macro(self, name, value=None):
"""Define a preprocessor macro for all compilations driven by this
compiler object. The optional parameter 'value' should be a
string; if it is not supplied, then the macro will be defined
without an explicit value and the exact outcome depends on the
compiler used (XXX true? does ANSI say anything about this?)
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
self.macros.append((name, value))
def undefine_macro(self, name):
"""Undefine a preprocessor macro for all compilations driven by
this compiler object. If the same macro is defined by
'define_macro()' and undefined by 'undefine_macro()' the last call
takes precedence (including multiple redefinitions or
undefinitions). If the macro is redefined/undefined on a
per-compilation basis (ie. in the call to 'compile()'), then that
takes precedence.
"""
# Delete from the list of macro definitions/undefinitions if
# already there (so that this one will take precedence).
i = self._find_macro (name)
if i is not None:
del self.macros[i]
undefn = (name,)
self.macros.append(undefn)
def add_include_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
header files. The compiler is instructed to search directories in
the order in which they are supplied by successive calls to
'add_include_dir()'.
"""
self.include_dirs.append(dir)
def set_include_dirs(self, dirs):
"""Set the list of directories that will be searched to 'dirs' (a
list of strings). Overrides any preceding calls to
'add_include_dir()'; subsequence calls to 'add_include_dir()' add
to the list passed to 'set_include_dirs()'. This does not affect
any list of standard include directories that the compiler may
search by default.
"""
self.include_dirs = dirs[:]
def add_library(self, libname):
"""Add 'libname' to the list of libraries that will be included in
all links driven by this compiler object. Note that 'libname'
should *not* be the name of a file containing a library, but the
name of the library itself: the actual filename will be inferred by
the linker, the compiler, or the compiler class (depending on the
platform).
The linker will be instructed to link against libraries in the
order they were supplied to 'add_library()' and/or
'set_libraries()'. It is perfectly valid to duplicate library
names; the linker will be instructed to link against libraries as
many times as they are mentioned.
"""
self.libraries.append(libname)
def set_libraries(self, libnames):
"""Set the list of libraries to be included in all links driven by
this compiler object to 'libnames' (a list of strings). This does
not affect any standard system libraries that the linker may
include by default.
"""
self.libraries = libnames[:]
def add_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
libraries specified to 'add_library()' and 'set_libraries()'. The
linker will be instructed to search for libraries in the order they
are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
"""
self.library_dirs.append(dir)
def set_library_dirs(self, dirs):
"""Set the list of library search directories to 'dirs' (a list of
strings). This does not affect any standard library search path
that the linker may search by default.
"""
self.library_dirs = dirs[:]
def add_runtime_library_dir(self, dir):
"""Add 'dir' to the list of directories that will be searched for
shared libraries at runtime.
"""
self.runtime_library_dirs.append(dir)
def set_runtime_library_dirs(self, dirs):
"""Set the list of directories to search for shared libraries at
runtime to 'dirs' (a list of strings). This does not affect any
standard search path that the runtime linker may search by
default.
"""
self.runtime_library_dirs = dirs[:]
def add_link_object(self, object):
"""Add 'object' to the list of object files (or analogues, such as
explicitly named library files or the output of "resource
compilers") to be included in every link driven by this compiler
object.
"""
self.objects.append(object)
def set_link_objects(self, objects):
"""Set the list of object files (or analogues) to be included in
every link to 'objects'. This does not affect any standard object
files that the linker may include by default (such as system
libraries).
"""
self.objects = objects[:]
# -- Private utility methods --------------------------------------
# (here for the convenience of subclasses)
# Helper method to prep compiler in subclass compile() methods
def _setup_compile(self, outdir, macros, incdirs, sources, depends,
extra):
"""Process arguments and decide which source files to compile."""
if outdir is None:
outdir = self.output_dir
elif not isinstance(outdir, str):
raise TypeError("'output_dir' must be a string or None")
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError("'macros' (if supplied) must be a list of tuples")
if incdirs is None:
incdirs = self.include_dirs
elif isinstance(incdirs, (list, tuple)):
incdirs = list(incdirs) + (self.include_dirs or [])
else:
raise TypeError(
"'include_dirs' (if supplied) must be a list of strings")
if extra is None:
extra = []
# Get the list of expected output (object) files
objects = self.object_filenames(sources, strip_dir=0,
output_dir=outdir)
assert len(objects) == len(sources)
pp_opts = gen_preprocess_options(macros, incdirs)
build = {}
for i in range(len(sources)):
src = sources[i]
obj = objects[i]
ext = os.path.splitext(src)[1]
self.mkpath(os.path.dirname(obj))
build[obj] = (src, ext)
return macros, objects, extra, pp_opts, build
def _get_cc_args(self, pp_opts, debug, before):
# works for unixccompiler, cygwinccompiler
cc_args = pp_opts + ['-c']
if debug:
cc_args[:0] = ['-g']
if before:
cc_args[:0] = before
return cc_args
def _fix_compile_args(self, output_dir, macros, include_dirs):
"""Typecheck and fix-up some of the arguments to the 'compile()'
method, and return fixed-up values. Specifically: if 'output_dir'
is None, replaces it with 'self.output_dir'; ensures that 'macros'
is a list, and augments it with 'self.macros'; ensures that
'include_dirs' is a list, and augments it with 'self.include_dirs'.
Guarantees that the returned values are of the correct type,
i.e. for 'output_dir' either string or None, and for 'macros' and
'include_dirs' either list or None.
"""
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
if macros is None:
macros = self.macros
elif isinstance(macros, list):
macros = macros + (self.macros or [])
else:
raise TypeError("'macros' (if supplied) must be a list of tuples")
if include_dirs is None:
include_dirs = self.include_dirs
elif isinstance(include_dirs, (list, tuple)):
include_dirs = list(include_dirs) + (self.include_dirs or [])
else:
raise TypeError(
"'include_dirs' (if supplied) must be a list of strings")
return output_dir, macros, include_dirs
def _prep_compile(self, sources, output_dir, depends=None):
"""Decide which souce files must be recompiled.
Determine the list of object files corresponding to 'sources',
and figure out which ones really need to be recompiled.
Return a list of all object files and a dictionary telling
which source files can be skipped.
"""
# Get the list of expected output (object) files
objects = self.object_filenames(sources, output_dir=output_dir)
assert len(objects) == len(sources)
# Return an empty dict for the "which source files can be skipped"
# return value to preserve API compatibility.
return objects, {}
def _fix_object_args(self, objects, output_dir):
"""Typecheck and fix up some arguments supplied to various methods.
Specifically: ensure that 'objects' is a list; if output_dir is
None, replace with self.output_dir. Return fixed versions of
'objects' and 'output_dir'.
"""
if not isinstance(objects, (list, tuple)):
raise TypeError("'objects' must be a list or tuple of strings")
objects = list(objects)
if output_dir is None:
output_dir = self.output_dir
elif not isinstance(output_dir, str):
raise TypeError("'output_dir' must be a string or None")
return (objects, output_dir)
def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs):
"""Typecheck and fix up some of the arguments supplied to the
'link_*' methods. Specifically: ensure that all arguments are
lists, and augment them with their permanent versions
(eg. 'self.libraries' augments 'libraries'). Return a tuple with
fixed versions of all arguments.
"""
if libraries is None:
libraries = self.libraries
elif isinstance(libraries, (list, tuple)):
libraries = list (libraries) + (self.libraries or [])
else:
raise TypeError(
"'libraries' (if supplied) must be a list of strings")
if library_dirs is None:
library_dirs = self.library_dirs
elif isinstance(library_dirs, (list, tuple)):
library_dirs = list (library_dirs) + (self.library_dirs or [])
else:
raise TypeError(
"'library_dirs' (if supplied) must be a list of strings")
if runtime_library_dirs is None:
runtime_library_dirs = self.runtime_library_dirs
elif isinstance(runtime_library_dirs, (list, tuple)):
runtime_library_dirs = (list(runtime_library_dirs) +
(self.runtime_library_dirs or []))
else:
raise TypeError("'runtime_library_dirs' (if supplied) "
"must be a list of strings")
return (libraries, library_dirs, runtime_library_dirs)
def _need_link(self, objects, output_file):
"""Return true if we need to relink the files listed in 'objects'
to recreate 'output_file'.
"""
if self.force:
return True
else:
if self.dry_run:
newer = newer_group (objects, output_file, missing='newer')
else:
newer = newer_group (objects, output_file)
return newer
def detect_language(self, sources):
"""Detect the language of a given file, or list of files. Uses
language_map, and language_order to do the job.
"""
if not isinstance(sources, list):
sources = [sources]
lang = None
index = len(self.language_order)
for source in sources:
base, ext = os.path.splitext(source)
extlang = self.language_map.get(ext)
try:
extindex = self.language_order.index(extlang)
if extindex < index:
lang = extlang
index = extindex
except ValueError:
pass
return lang
# -- Worker methods ------------------------------------------------
# (must be implemented by subclasses)
def preprocess(self, source, output_file=None, macros=None,
include_dirs=None, extra_preargs=None, extra_postargs=None):
"""Preprocess a single C/C++ source file, named in 'source'.
Output will be written to file named 'output_file', or stdout if
'output_file' not supplied. 'macros' is a list of macro
definitions as for 'compile()', which will augment the macros set
with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
list of directory names that will be added to the default list.
Raises PreprocessError on failure.
"""
pass
def compile(self, sources, output_dir=None, macros=None,
include_dirs=None, debug=0, extra_preargs=None,
extra_postargs=None, depends=None):
"""Compile one or more source files.
'sources' must be a list of filenames, most likely C/C++
files, but in reality anything that can be handled by a
particular compiler and compiler class (eg. MSVCCompiler can
handle resource files in 'sources'). Return a list of object
filenames, one per source filename in 'sources'. Depending on
the implementation, not all source files will necessarily be
compiled, but all corresponding object filenames will be
returned.
If 'output_dir' is given, object files will be put under it, while
retaining their original path component. That is, "foo/bar.c"
normally compiles to "foo/bar.o" (for a Unix implementation); if
'output_dir' is "build", then it would compile to
"build/foo/bar.o".
'macros', if given, must be a list of macro definitions. A macro
definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
The former defines a macro; if the value is None, the macro is
defined without an explicit value. The 1-tuple case undefines a
macro. Later definitions/redefinitions/ undefinitions take
precedence.
'include_dirs', if given, must be a list of strings, the
directories to add to the default include file search path for this
compilation only.
'debug' is a boolean; if true, the compiler will be instructed to
output debug symbols in (or alongside) the object file(s).
'extra_preargs' and 'extra_postargs' are implementation- dependent.
On platforms that have the notion of a command-line (e.g. Unix,
DOS/Windows), they are most likely lists of strings: extra
command-line arguments to prepend/append to the compiler command
line. On other platforms, consult the implementation class
documentation. In any event, they are intended as an escape hatch
for those occasions when the abstract compiler framework doesn't
cut the mustard.
'depends', if given, is a list of filenames that all targets
depend on. If a source file is older than any file in
depends, then the source file will be recompiled. This
supports dependency tracking, but only at a coarse
granularity.
Raises CompileError on failure.
"""
# A concrete compiler class can either override this method
# entirely or implement _compile().
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# Return *all* object filenames, not just the ones we just built.
return objects
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile 'src' to product 'obj'."""
# A concrete compiler class that does not override compile()
# should implement _compile().
pass
def create_static_lib(self, objects, output_libname, output_dir=None,
debug=0, target_lang=None):
"""Link a bunch of stuff together to create a static library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects', the extra object files supplied to
'add_link_object()' and/or 'set_link_objects()', the libraries
supplied to 'add_library()' and/or 'set_libraries()', and the
libraries supplied as 'libraries' (if any).
'output_libname' should be a library name, not a filename; the
filename will be inferred from the library name. 'output_dir' is
the directory where the library file will be put.
'debug' is a boolean; if true, debugging information will be
included in the library (note that on most platforms, it is the
compile step where this matters: the 'debug' flag is included here
just for consistency).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LibError on failure.
"""
pass
# values for target_desc parameter in link()
SHARED_OBJECT = "shared_object"
SHARED_LIBRARY = "shared_library"
EXECUTABLE = "executable"
def link(self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
"""Link a bunch of stuff together to create an executable or
shared library file.
The "bunch of stuff" consists of the list of object files supplied
as 'objects'. 'output_filename' should be a filename. If
'output_dir' is supplied, 'output_filename' is relative to it
(i.e. 'output_filename' can provide directory components if
needed).
'libraries' is a list of libraries to link against. These are
library names, not filenames, since they're translated into
filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
on Unix and "foo.lib" on DOS/Windows). However, they can include a
directory component, which means the linker will look in that
specific directory rather than searching all the normal locations.
'library_dirs', if supplied, should be a list of directories to
search for libraries that were specified as bare library names
(ie. no directory component). These are on top of the system
default and those supplied to 'add_library_dir()' and/or
'set_library_dirs()'. 'runtime_library_dirs' is a list of
directories that will be embedded into the shared library and used
to search for other shared libraries that *it* depends on at
run-time. (This may only be relevant on Unix.)
'export_symbols' is a list of symbols that the shared library will
export. (This appears to be relevant only on Windows.)
'debug' is as for 'compile()' and 'create_static_lib()', with the
slight distinction that it actually matters on most platforms (as
opposed to 'create_static_lib()', which includes a 'debug' flag
mostly for form's sake).
'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
of course that they supply command-line arguments for the
particular linker being used).
'target_lang' is the target language for which the given objects
are being compiled. This allows specific linkage time treatment of
certain languages.
Raises LinkError on failure.
"""
raise NotImplementedError
# Old 'link_*()' methods, rewritten to use the new 'link()' method.
def link_shared_lib(self,
objects,
output_libname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_LIBRARY, objects,
self.library_filename(output_libname, lib_type='shared'),
output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_shared_object(self,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
self.link(CCompiler.SHARED_OBJECT, objects,
output_filename, output_dir,
libraries, library_dirs, runtime_library_dirs,
export_symbols, debug,
extra_preargs, extra_postargs, build_temp, target_lang)
def link_executable(self,
objects,
output_progname,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
target_lang=None):
self.link(CCompiler.EXECUTABLE, objects,
self.executable_filename(output_progname), output_dir,
libraries, library_dirs, runtime_library_dirs, None,
debug, extra_preargs, extra_postargs, None, target_lang)
# -- Miscellaneous methods -----------------------------------------
# These are all used by the 'gen_lib_options() function; there is
# no appropriate default implementation so subclasses should
# implement all of these.
def library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
raise NotImplementedError
def runtime_library_dir_option(self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
raise NotImplementedError
def library_option(self, lib):
"""Return the compiler option to add 'lib' to the list of libraries
linked into the shared library or executable.
"""
raise NotImplementedError
def has_function(self, funcname, includes=None, include_dirs=None,
libraries=None, library_dirs=None):
"""Return a boolean indicating whether funcname is supported on
the current platform. The optional arguments can be used to
augment the compilation environment.
"""
# this can't be included at module scope because it tries to
# import math which might not be available at that point - maybe
# the necessary logic should just be inlined?
import tempfile
if includes is None:
includes = []
if include_dirs is None:
include_dirs = []
if libraries is None:
libraries = []
if library_dirs is None:
library_dirs = []
fd, fname = tempfile.mkstemp(".c", funcname, text=True)
f = os.fdopen(fd, "w")
try:
for incl in includes:
f.write("""#include "%s"\n""" % incl)
f.write("""\
int main (int argc, char **argv) {
%s();
return 0;
}
""" % funcname)
finally:
f.close()
try:
objects = self.compile([fname], include_dirs=include_dirs)
except CompileError:
return False
try:
self.link_executable(objects, "a.out",
libraries=libraries,
library_dirs=library_dirs)
except (LinkError, TypeError):
return False
return True
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
raise NotImplementedError
# -- Filename generation methods -----------------------------------
# The default implementation of the filename generating methods are
# prejudiced towards the Unix/DOS/Windows view of the world:
# * object files are named by replacing the source file extension
# (eg. .c/.cpp -> .o/.obj)
# * library files (shared or static) are named by plugging the
# library name and extension into a format string, eg.
# "lib%s.%s" % (lib_name, ".a") for Unix static libraries
# * executables are named by appending an extension (possibly
# empty) to the program name: eg. progname + ".exe" for
# Windows
#
# To reduce redundant code, these methods expect to find
# several attributes in the current object (presumably defined
# as class attributes):
# * src_extensions -
# list of C/C++ source file extensions, eg. ['.c', '.cpp']
# * obj_extension -
# object file extension, eg. '.o' or '.obj'
# * static_lib_extension -
# extension for static library files, eg. '.a' or '.lib'
# * shared_lib_extension -
# extension for shared library/object files, eg. '.so', '.dll'
# * static_lib_format -
# format string for generating static library filenames,
# eg. 'lib%s.%s' or '%s.%s'
# * shared_lib_format
# format string for generating shared library filenames
# (probably same as static_lib_format, since the extension
# is one of the intended parameters to the format string)
# * exe_extension -
# extension for executable files, eg. '' or '.exe'
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
base, ext = os.path.splitext(src_name)
base = os.path.splitdrive(base)[1] # Chop off the drive
base = base[os.path.isabs(base):] # If abs, chop off leading /
if ext not in self.src_extensions:
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % (ext, src_name))
if strip_dir:
base = os.path.basename(base)
obj_names.append(os.path.join(output_dir,
base + self.obj_extension))
return obj_names
def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + self.shared_lib_extension)
def executable_filename(self, basename, strip_dir=0, output_dir=''):
assert output_dir is not None
if strip_dir:
basename = os.path.basename(basename)
return os.path.join(output_dir, basename + (self.exe_extension or ''))
def library_filename(self, libname, lib_type='static', # or 'shared'
strip_dir=0, output_dir=''):
assert output_dir is not None
if lib_type not in ("static", "shared", "dylib", "xcode_stub"):
raise ValueError(
"'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"")
fmt = getattr(self, lib_type + "_lib_format")
ext = getattr(self, lib_type + "_lib_extension")
dir, base = os.path.split(libname)
filename = fmt % (base, ext)
if strip_dir:
dir = ''
return os.path.join(output_dir, dir, filename)
# -- Utility methods -----------------------------------------------
def announce(self, msg, level=1):
log.debug(msg)
def debug_print(self, msg):
from distutils.debug import DEBUG
if DEBUG:
print(msg)
def warn(self, msg):
sys.stderr.write("warning: %s\n" % msg)
def execute(self, func, args, msg=None, level=1):
execute(func, args, msg, self.dry_run)
def spawn(self, cmd, **kwargs):
spawn(cmd, dry_run=self.dry_run, **kwargs)
def move_file(self, src, dst):
return move_file(src, dst, dry_run=self.dry_run)
def mkpath (self, name, mode=0o777):
mkpath(name, mode, dry_run=self.dry_run)
# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
# type for that platform. Keys are interpreted as re match
# patterns. Order is important; platform mappings are preferred over
# OS names.
_default_compilers = (
# Platform string mappings
# on a cygwin built python we can use gcc like an ordinary UNIXish
# compiler
('cygwin.*', 'unix'),
# OS name mappings
('posix', 'unix'),
('nt', 'msvc'),
)
def get_default_compiler(osname=None, platform=None):
"""Determine the default compiler to use for the given platform.
osname should be one of the standard Python OS names (i.e. the
ones returned by os.name) and platform the common value
returned by sys.platform for the platform in question.
The default values are os.name and sys.platform in case the
parameters are not given.
"""
if osname is None:
osname = os.name
if platform is None:
platform = sys.platform
for pattern, compiler in _default_compilers:
if re.match(pattern, platform) is not None or \
re.match(pattern, osname) is not None:
return compiler
# Default to Unix compiler
return 'unix'
# Map compiler types to (module_name, class_name) pairs -- ie. where to
# find the code that implements an interface to this compiler. (The module
# is assumed to be in the 'distutils' package.)
compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
"standard UNIX-style compiler"),
'msvc': ('_msvccompiler', 'MSVCCompiler',
"Microsoft Visual C++"),
'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
"Cygwin port of GNU C Compiler for Win32"),
'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
"Mingw32 port of GNU C Compiler for Win32"),
'bcpp': ('bcppcompiler', 'BCPPCompiler',
"Borland C++ Compiler"),
}
def show_compilers():
"""Print list of available compilers (used by the "--help-compiler"
options to "build", "build_ext", "build_clib").
"""
# XXX this "knows" that the compiler option it's describing is
# "--compiler", which just happens to be the case for the three
# commands that use it.
from distutils.fancy_getopt import FancyGetopt
compilers = []
for compiler in compiler_class.keys():
compilers.append(("compiler="+compiler, None,
compiler_class[compiler][2]))
compilers.sort()
pretty_printer = FancyGetopt(compilers)
pretty_printer.print_help("List of available compilers:")
def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0):
"""Generate an instance of some CCompiler subclass for the supplied
platform/compiler combination. 'plat' defaults to 'os.name'
(eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
for that platform. Currently only 'posix' and 'nt' are supported, and
the default compilers are "traditional Unix interface" (UnixCCompiler
class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
possible to ask for a Unix compiler object under Windows, and a
Microsoft compiler object under Unix -- if you supply a value for
'compiler', 'plat' is ignored.
"""
if plat is None:
plat = os.name
try:
if compiler is None:
compiler = get_default_compiler(plat)
(module_name, class_name, long_description) = compiler_class[compiler]
except KeyError:
msg = "don't know how to compile C/C++ code on platform '%s'" % plat
if compiler is not None:
msg = msg + " with '%s' compiler" % compiler
raise DistutilsPlatformError(msg)
try:
module_name = "distutils." + module_name
__import__ (module_name)
module = sys.modules[module_name]
klass = vars(module)[class_name]
except ImportError:
raise DistutilsModuleError(
"can't compile C/C++ code: unable to load module '%s'" % \
module_name)
except KeyError:
raise DistutilsModuleError(
"can't compile C/C++ code: unable to find class '%s' "
"in module '%s'" % (class_name, module_name))
# XXX The None is necessary to preserve backwards compatibility
# with classes that expect verbose to be the first positional
# argument.
return klass(None, dry_run, force)
def gen_preprocess_options(macros, include_dirs):
"""Generate C pre-processor options (-D, -U, -I) as used by at least
two types of compilers: the typical Unix compiler and Visual C++.
'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
means undefine (-U) macro 'name', and (name,value) means define (-D)
macro 'name' to 'value'. 'include_dirs' is just a list of directory
names to be added to the header file search path (-I). Returns a list
of command-line options suitable for either Unix compilers or Visual
C++.
"""
# XXX it would be nice (mainly aesthetic, and so we don't generate
# stupid-looking command lines) to go over 'macros' and eliminate
# redundant definitions/undefinitions (ie. ensure that only the
# latest mention of a particular macro winds up on the command
# line). I don't think it's essential, though, since most (all?)
# Unix C compilers only pay attention to the latest -D or -U
# mention of a macro on their command line. Similar situation for
# 'include_dirs'. I'm punting on both for now. Anyways, weeding out
# redundancies like this should probably be the province of
# CCompiler, since the data structures used are inherited from it
# and therefore common to all CCompiler classes.
pp_opts = []
for macro in macros:
if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2):
raise TypeError(
"bad macro definition '%s': "
"each element of 'macros' list must be a 1- or 2-tuple"
% macro)
if len(macro) == 1: # undefine this macro
pp_opts.append("-U%s" % macro[0])
elif len(macro) == 2:
if macro[1] is None: # define with no explicit value
pp_opts.append("-D%s" % macro[0])
else:
# XXX *don't* need to be clever about quoting the
# macro value here, because we're going to avoid the
# shell at all costs when we spawn the command!
pp_opts.append("-D%s=%s" % macro)
for dir in include_dirs:
pp_opts.append("-I%s" % dir)
return pp_opts
def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
"""Generate linker options for searching library directories and
linking with specific libraries. 'libraries' and 'library_dirs' are,
respectively, lists of library names (not filenames!) and search
directories. Returns a list of command-line options suitable for use
with some compiler (depending on the two format strings passed in).
"""
lib_opts = []
for dir in library_dirs:
lib_opts.append(compiler.library_dir_option(dir))
for dir in runtime_library_dirs:
opt = compiler.runtime_library_dir_option(dir)
if isinstance(opt, list):
lib_opts = lib_opts + opt
else:
lib_opts.append(opt)
# XXX it's important that we *not* remove redundant library mentions!
# sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
# resolve all symbols. I just hope we never have to say "-lfoo obj.o
# -lbar" to get things to work -- that's certainly a possibility, but a
# pretty nasty way to arrange your C code.
for lib in libraries:
(lib_dir, lib_name) = os.path.split(lib)
if lib_dir:
lib_file = compiler.find_library_file([lib_dir], lib_name)
if lib_file:
lib_opts.append(lib_file)
else:
compiler.warn("no library file corresponding to "
"'%s' found (skipping)" % lib)
else:
lib_opts.append(compiler.library_option (lib))
return lib_opts
|
Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/ccompiler.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/ccompiler.py",
"repo_id": "Django-locallibrary",
"token_count": 19323
}
| 31 |
"""distutils.command.build_clib
Implements the Distutils 'build_clib' command, to build a C/C++ library
that is included in the module distribution and needed by an extension
module."""
# XXX this module has *lots* of code ripped-off quite transparently from
# build_ext.py -- not surprisingly really, as the work required to build
# a static library from a collection of C source files is not really all
# that different from what's required to build a shared object file from
# a collection of C source files. Nevertheless, I haven't done the
# necessary refactoring to account for the overlap in code between the
# two modules, mainly because a number of subtle details changed in the
# cut 'n paste. Sigh.
import os
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler
from distutils import log
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build_clib(Command):
description = "build C/C++ libraries used by Python extensions"
user_options = [
('build-clib=', 'b',
"directory to build C/C++ libraries to"),
('build-temp=', 't',
"directory to put temporary build by-products"),
('debug', 'g',
"compile with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
]
boolean_options = ['debug', 'force']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.build_clib = None
self.build_temp = None
# List of libraries to build
self.libraries = None
# Compilation options for all libraries
self.include_dirs = None
self.define = None
self.undef = None
self.debug = None
self.force = 0
self.compiler = None
def finalize_options(self):
# This might be confusing: both build-clib and build-temp default
# to build-temp as defined by the "build" command. This is because
# I think that C libraries are really just temporary build
# by-products, at least from the point of view of building Python
# extensions -- but I want to keep my options open.
self.set_undefined_options('build',
('build_temp', 'build_clib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'))
self.libraries = self.distribution.libraries
if self.libraries:
self.check_library_list(self.libraries)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# XXX same as for build_ext -- what about 'self.define' and
# 'self.undef' ?
def run(self):
if not self.libraries:
return
# Yech -- this is cut 'n pasted from build_ext.py!
from distutils.ccompiler import new_compiler
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
self.build_libraries(self.libraries)
def check_library_list(self, libraries):
"""Ensure that the list of libraries is valid.
`library` is presumably provided as a command option 'libraries'.
This method checks that it is a list of 2-tuples, where the tuples
are (library_name, build_info_dict).
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(libraries, list):
raise DistutilsSetupError(
"'libraries' option must be a list of tuples")
for lib in libraries:
if not isinstance(lib, tuple) and len(lib) != 2:
raise DistutilsSetupError(
"each element of 'libraries' must a 2-tuple")
name, build_info = lib
if not isinstance(name, str):
raise DistutilsSetupError(
"first element of each tuple in 'libraries' "
"must be a string (the library name)")
if '/' in name or (os.sep != '/' and os.sep in name):
raise DistutilsSetupError("bad library name '%s': "
"may not contain directory separators" % lib[0])
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'libraries' "
"must be a dictionary (build info)")
def get_library_names(self):
# Assume the library list is valid -- 'check_library_list()' is
# called from 'finalize_options()', so it should be!
if not self.libraries:
return None
lib_names = []
for (lib_name, build_info) in self.libraries:
lib_names.append(lib_name)
return lib_names
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
for (lib_name, build_info) in self.libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
filenames.extend(sources)
return filenames
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
debug=self.debug)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(objects, lib_name,
output_dir=self.build_clib,
debug=self.debug)
|
Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/build_clib.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/build_clib.py",
"repo_id": "Django-locallibrary",
"token_count": 3650
}
| 32 |
"""distutils.pypirc
Provides the PyPIRCCommand class, the base class for the command classes
that uses .pypirc in the distutils.command package.
"""
import os
from configparser import RawConfigParser
from distutils.cmd import Command
DEFAULT_PYPIRC = """\
[distutils]
index-servers =
pypi
[pypi]
username:%s
password:%s
"""
class PyPIRCCommand(Command):
"""Base command that knows how to handle the .pypirc file
"""
DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
DEFAULT_REALM = 'pypi'
repository = None
realm = None
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % \
DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server')]
boolean_options = ['show-response']
def _get_rc_file(self):
"""Returns rc file path."""
return os.path.join(os.path.expanduser('~'), '.pypirc')
def _store_pypirc(self, username, password):
"""Creates a default .pypirc file."""
rc = self._get_rc_file()
with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
f.write(DEFAULT_PYPIRC % (username, password))
def _read_pypirc(self):
"""Reads the .pypirc file."""
rc = self._get_rc_file()
if os.path.exists(rc):
self.announce('Using PyPI login from %s' % rc)
repository = self.repository or self.DEFAULT_REPOSITORY
config = RawConfigParser()
config.read(rc)
sections = config.sections()
if 'distutils' in sections:
# let's get the list of servers
index_servers = config.get('distutils', 'index-servers')
_servers = [server.strip() for server in
index_servers.split('\n')
if server.strip() != '']
if _servers == []:
# nothing set, let's try to get the default pypi
if 'pypi' in sections:
_servers = ['pypi']
else:
# the file is not properly defined, returning
# an empty dict
return {}
for server in _servers:
current = {'server': server}
current['username'] = config.get(server, 'username')
# optional params
for key, default in (('repository',
self.DEFAULT_REPOSITORY),
('realm', self.DEFAULT_REALM),
('password', None)):
if config.has_option(server, key):
current[key] = config.get(server, key)
else:
current[key] = default
# work around people having "repository" for the "pypi"
# section of their config set to the HTTP (rather than
# HTTPS) URL
if (server == 'pypi' and
repository in (self.DEFAULT_REPOSITORY, 'pypi')):
current['repository'] = self.DEFAULT_REPOSITORY
return current
if (current['server'] == repository or
current['repository'] == repository):
return current
elif 'server-login' in sections:
# old format
server = 'server-login'
if config.has_option(server, 'repository'):
repository = config.get(server, 'repository')
else:
repository = self.DEFAULT_REPOSITORY
return {'username': config.get(server, 'username'),
'password': config.get(server, 'password'),
'repository': repository,
'server': server,
'realm': self.DEFAULT_REALM}
return {}
def _read_pypi_response(self, response):
"""Read and decode a PyPI HTTP response."""
import cgi
content_type = response.getheader('content-type', 'text/plain')
encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii')
return response.read().decode(encoding)
def initialize_options(self):
"""Initialize options."""
self.repository = None
self.realm = None
self.show_response = 0
def finalize_options(self):
"""Finalizes options."""
if self.repository is None:
self.repository = self.DEFAULT_REPOSITORY
if self.realm is None:
self.realm = self.DEFAULT_REALM
|
Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/config.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/config.py",
"repo_id": "Django-locallibrary",
"token_count": 2499
}
| 33 |
"""Provide access to Python's configuration information. The specific
configuration variables available depend heavily on the platform and
configuration. The values may be retrieved using
get_config_var(name), and the list of variables is available via
get_config_vars().keys(). Additional convenience functions are also
available.
Written by: Fred L. Drake, Jr.
Email: <fdrake@acm.org>
"""
import _imp
import os
import re
import sys
from .errors import DistutilsPlatformError
IS_PYPY = '__pypy__' in sys.builtin_module_names
# These are needed in a couple of spots, so just compute them once.
PREFIX = os.path.normpath(sys.prefix)
EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
BASE_PREFIX = os.path.normpath(sys.base_prefix)
BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
# Path to the base directory of the project. On Windows the binary may
# live in project/PCbuild/win32 or project/PCbuild/amd64.
# set for cross builds
if "_PYTHON_PROJECT_BASE" in os.environ:
project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
else:
if sys.executable:
project_base = os.path.dirname(os.path.abspath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
project_base = os.getcwd()
# python_build: (Boolean) if true, we're either building Python or
# building an extension with an un-installed Python, so we use
# different (hard-wired) directories.
def _is_python_source_dir(d):
for fn in ("Setup", "Setup.local"):
if os.path.isfile(os.path.join(d, "Modules", fn)):
return True
return False
_sys_home = getattr(sys, '_home', None)
if os.name == 'nt':
def _fix_pcbuild(d):
if d and os.path.normcase(d).startswith(
os.path.normcase(os.path.join(PREFIX, "PCbuild"))):
return PREFIX
return d
project_base = _fix_pcbuild(project_base)
_sys_home = _fix_pcbuild(_sys_home)
def _python_build():
if _sys_home:
return _is_python_source_dir(_sys_home)
return _is_python_source_dir(project_base)
python_build = _python_build()
# Calculate the build qualifier flags if they are defined. Adding the flags
# to the include and lib directories only makes sense for an installation, not
# an in-source build.
build_flags = ''
try:
if not python_build:
build_flags = sys.abiflags
except AttributeError:
# It's not a configure-based build, so the sys module doesn't have
# this attribute, which is fine.
pass
def get_python_version():
"""Return a string containing the major and minor Python version,
leaving off the patchlevel. Sample return values could be '1.5'
or '2.2'.
"""
return '%d.%d' % sys.version_info[:2]
def get_python_inc(plat_specific=0, prefix=None):
"""Return the directory containing installed Python header files.
If 'plat_specific' is false (the default), this is the path to the
non-platform-specific header files, i.e. Python.h and so on;
otherwise, this is the path to platform-specific header files
(namely pyconfig.h).
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if prefix is None:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
if IS_PYPY:
return os.path.join(prefix, 'include')
elif os.name == "posix":
if python_build:
# Assume the executable is in the build directory. The
# pyconfig.h file should be in the same directory. Since
# the build directory may not be the source directory, we
# must use "srcdir" from the makefile to find the "Include"
# directory.
if plat_specific:
return _sys_home or project_base
else:
incdir = os.path.join(get_config_var('srcdir'), 'Include')
return os.path.normpath(incdir)
python_dir = 'python' + get_python_version() + build_flags
return os.path.join(prefix, "include", python_dir)
elif os.name == "nt":
if python_build:
# Include both the include and PC dir to ensure we can find
# pyconfig.h
return (os.path.join(prefix, "include") + os.path.pathsep +
os.path.join(prefix, "PC"))
return os.path.join(prefix, "include")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its C header files "
"on platform '%s'" % os.name)
def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
"""Return the directory containing the Python library (standard or
site additions).
If 'plat_specific' is true, return the directory containing
platform-specific modules, i.e. any module from a non-pure-Python
module distribution; otherwise, return the platform-shared library
directory. If 'standard_lib' is true, return the directory
containing standard Python library modules; otherwise, return the
directory for site-specific modules.
If 'prefix' is supplied, use it instead of sys.base_prefix or
sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
"""
if IS_PYPY:
# PyPy-specific schema
if prefix is None:
prefix = PREFIX
if standard_lib:
return os.path.join(prefix, "lib-python", sys.version[0])
return os.path.join(prefix, 'site-packages')
if prefix is None:
if standard_lib:
prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
else:
prefix = plat_specific and EXEC_PREFIX or PREFIX
if os.name == "posix":
if plat_specific or standard_lib:
# Platform-specific modules (any module from a non-pure-Python
# module distribution) or standard Python library modules.
libdir = getattr(sys, "platlibdir", "lib")
else:
# Pure Python
libdir = "lib"
libpython = os.path.join(prefix, libdir,
"python" + get_python_version())
if standard_lib:
return libpython
else:
return os.path.join(libpython, "site-packages")
elif os.name == "nt":
if standard_lib:
return os.path.join(prefix, "Lib")
else:
return os.path.join(prefix, "Lib", "site-packages")
else:
raise DistutilsPlatformError(
"I don't know where Python installs its library "
"on platform '%s'" % os.name)
def customize_compiler(compiler):
"""Do any platform-specific customization of a CCompiler instance.
Mainly needed on Unix, so we can plug in the information that
varies across Unices and is stored in Python's Makefile.
"""
if compiler.compiler_type == "unix":
if sys.platform == "darwin":
# Perform first-time customization of compiler-related
# config vars on OS X now that we know we need a compiler.
# This is primarily to support Pythons from binary
# installers. The kind and paths to build tools on
# the user system may vary significantly from the system
# that Python itself was built on. Also the user OS
# version and build tools may not support the same set
# of CPU architectures for universal builds.
global _config_vars
# Use get_config_var() to ensure _config_vars is initialized.
if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
import _osx_support
_osx_support.customize_compiler(_config_vars)
_config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
(cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
get_config_vars('CC', 'CXX', 'CFLAGS',
'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
if 'CC' in os.environ:
newcc = os.environ['CC']
if (sys.platform == 'darwin'
and 'LDSHARED' not in os.environ
and ldshared.startswith(cc)):
# On OS X, if CC is overridden, use that as the default
# command for LDSHARED as well
ldshared = newcc + ldshared[len(cc):]
cc = newcc
if 'CXX' in os.environ:
cxx = os.environ['CXX']
if 'LDSHARED' in os.environ:
ldshared = os.environ['LDSHARED']
if 'CPP' in os.environ:
cpp = os.environ['CPP']
else:
cpp = cc + " -E" # not always
if 'LDFLAGS' in os.environ:
ldshared = ldshared + ' ' + os.environ['LDFLAGS']
if 'CFLAGS' in os.environ:
cflags = cflags + ' ' + os.environ['CFLAGS']
ldshared = ldshared + ' ' + os.environ['CFLAGS']
if 'CPPFLAGS' in os.environ:
cpp = cpp + ' ' + os.environ['CPPFLAGS']
cflags = cflags + ' ' + os.environ['CPPFLAGS']
ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
if 'AR' in os.environ:
ar = os.environ['AR']
if 'ARFLAGS' in os.environ:
archiver = ar + ' ' + os.environ['ARFLAGS']
else:
archiver = ar + ' ' + ar_flags
cc_cmd = cc + ' ' + cflags
compiler.set_executables(
preprocessor=cpp,
compiler=cc_cmd,
compiler_so=cc_cmd + ' ' + ccshared,
compiler_cxx=cxx,
linker_so=ldshared,
linker_exe=cc,
archiver=archiver)
compiler.shared_lib_extension = shlib_suffix
def get_config_h_filename():
"""Return full pathname of installed pyconfig.h file."""
if python_build:
if os.name == "nt":
inc_dir = os.path.join(_sys_home or project_base, "PC")
else:
inc_dir = _sys_home or project_base
else:
inc_dir = get_python_inc(plat_specific=1)
return os.path.join(inc_dir, 'pyconfig.h')
def get_makefile_filename():
"""Return full pathname of installed Makefile from the Python build."""
if python_build:
return os.path.join(_sys_home or project_base, "Makefile")
lib_dir = get_python_lib(plat_specific=0, standard_lib=1)
config_file = 'config-{}{}'.format(get_python_version(), build_flags)
if hasattr(sys.implementation, '_multiarch'):
config_file += '-%s' % sys.implementation._multiarch
return os.path.join(lib_dir, config_file, 'Makefile')
def parse_config_h(fp, g=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if g is None:
g = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
#
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try: v = int(v)
except ValueError: pass
g[n] = v
else:
m = undef_rx.match(line)
if m:
g[m.group(1)] = 0
return g
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
def parse_makefile(fn, g=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
from distutils.text_file import TextFile
fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
if g is None:
g = {}
done = {}
notdone = {}
while True:
line = fp.readline()
if line is None: # eof
break
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
# do variable interpolation here
while notdone:
for name in list(notdone):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if name.startswith('PY_') and name[3:] in renamed_variables:
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try: value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
del notdone[name]
if name.startswith('PY_') \
and name[3:] in renamed_variables:
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference; just drop it since we can't deal
del notdone[name]
fp.close()
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
g.update(done)
return g
def expand_makefile_vars(s, vars):
"""Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
'string' according to 'vars' (a dictionary mapping variable names to
values). Variables not present in 'vars' are silently expanded to the
empty string. The variable values in 'vars' should not contain further
variable expansions; if 'vars' is the output of 'parse_makefile()',
you're fine. Returns a variable-expanded version of 's'.
"""
# This algorithm does multiple expansion, so if vars['foo'] contains
# "${bar}", it will expand ${foo} to ${bar}, and then expand
# ${bar}... and so forth. This is fine as long as 'vars' comes from
# 'parse_makefile()', which takes care of such expansions eagerly,
# according to make's variable expansion semantics.
while True:
m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
if m:
(beg, end) = m.span()
s = s[0:beg] + vars.get(m.group(1)) + s[end:]
else:
break
return s
_config_vars = None
def _init_posix():
"""Initialize the module as appropriate for POSIX systems."""
# _sysconfigdata is generated at build time, see the sysconfig module
name = os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
'_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
abi=sys.abiflags,
platform=sys.platform,
multiarch=getattr(sys.implementation, '_multiarch', ''),
))
try:
_temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
except ImportError:
# Python 3.5 and pypy 7.3.1
_temp = __import__(
'_sysconfigdata', globals(), locals(), ['build_time_vars'], 0)
build_time_vars = _temp.build_time_vars
global _config_vars
_config_vars = {}
_config_vars.update(build_time_vars)
def _init_nt():
"""Initialize the module as appropriate for NT"""
g = {}
# set basic install directories
g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
# XXX hmmm.. a normal install puts include files here
g['INCLUDEPY'] = get_python_inc(plat_specific=0)
g['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
g['EXE'] = ".exe"
g['VERSION'] = get_python_version().replace(".", "")
g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
global _config_vars
_config_vars = g
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform. Generally this includes
everything needed to build extensions and install both pure modules and
extensions. On Unix, this means every variable defined in Python's
installed Makefile; on Windows it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _config_vars
if _config_vars is None:
func = globals().get("_init_" + os.name)
if func:
func()
else:
_config_vars = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# Distutils.
_config_vars['prefix'] = PREFIX
_config_vars['exec_prefix'] = EXEC_PREFIX
if not IS_PYPY:
# For backward compatibility, see issue19555
SO = _config_vars.get('EXT_SUFFIX')
if SO is not None:
_config_vars['SO'] = SO
# Always convert srcdir to an absolute path
srcdir = _config_vars.get('srcdir', project_base)
if os.name == 'posix':
if python_build:
# If srcdir is a relative path (typically '.' or '..')
# then it should be interpreted relative to the directory
# containing Makefile.
base = os.path.dirname(get_makefile_filename())
srcdir = os.path.join(base, srcdir)
else:
# srcdir is not meaningful since the installation is
# spread about the filesystem. We choose the
# directory containing the Makefile since we know it
# exists.
srcdir = os.path.dirname(get_makefile_filename())
_config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir))
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if python_build and os.name == "posix":
base = project_base
if (not os.path.isabs(_config_vars['srcdir']) and
base != os.getcwd()):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _config_vars['srcdir'])
_config_vars['srcdir'] = os.path.normpath(srcdir)
# OS X platforms require special customization to handle
# multi-architecture, multi-os-version installers
if sys.platform == 'darwin':
import _osx_support
_osx_support.customize_config_vars(_config_vars)
if args:
vals = []
for name in args:
vals.append(_config_vars.get(name))
return vals
else:
return _config_vars
def get_config_var(name):
"""Return the value of a single variable using the dictionary
returned by 'get_config_vars()'. Equivalent to
get_config_vars().get(name)
"""
if name == 'SO':
import warnings
warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
return get_config_vars().get(name)
|
Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/sysconfig.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/sysconfig.py",
"repo_id": "Django-locallibrary",
"token_count": 9702
}
| 34 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import collections
import itertools
import re
from ._structures import Infinity
__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
_Version = collections.namedtuple(
"_Version", ["epoch", "release", "dev", "pre", "post", "local"]
)
def parse(version):
"""
Parse the given version string and return either a :class:`Version` object
or a :class:`LegacyVersion` object depending on if the given version is
a valid PEP 440 version or a legacy version.
"""
try:
return Version(version)
except InvalidVersion:
return LegacyVersion(version)
class InvalidVersion(ValueError):
"""
An invalid version was found, users should refer to PEP 440.
"""
class _BaseVersion(object):
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, _BaseVersion):
return NotImplemented
return method(self._key, other._key)
class LegacyVersion(_BaseVersion):
def __init__(self, version):
self._version = str(version)
self._key = _legacy_cmpkey(self._version)
def __str__(self):
return self._version
def __repr__(self):
return "<LegacyVersion({0})>".format(repr(str(self)))
@property
def public(self):
return self._version
@property
def base_version(self):
return self._version
@property
def epoch(self):
return -1
@property
def release(self):
return None
@property
def pre(self):
return None
@property
def post(self):
return None
@property
def dev(self):
return None
@property
def local(self):
return None
@property
def is_prerelease(self):
return False
@property
def is_postrelease(self):
return False
@property
def is_devrelease(self):
return False
_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
_legacy_version_replacement_map = {
"pre": "c",
"preview": "c",
"-": "final-",
"rc": "c",
"dev": "@",
}
def _parse_version_parts(s):
for part in _legacy_version_component_re.split(s):
part = _legacy_version_replacement_map.get(part, part)
if not part or part == ".":
continue
if part[:1] in "0123456789":
# pad for numeric comparison
yield part.zfill(8)
else:
yield "*" + part
# ensure that alpha/beta/candidate are before final
yield "*final"
def _legacy_cmpkey(version):
# We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
# greater than or equal to 0. This will effectively put the LegacyVersion,
# which uses the defacto standard originally implemented by setuptools,
# as before all PEP 440 versions.
epoch = -1
# This scheme is taken from pkg_resources.parse_version setuptools prior to
# it's adoption of the packaging library.
parts = []
for part in _parse_version_parts(version.lower()):
if part.startswith("*"):
# remove "-" before a prerelease tag
if part < "*final":
while parts and parts[-1] == "*final-":
parts.pop()
# remove trailing zeros from each series of numeric parts
while parts and parts[-1] == "00000000":
parts.pop()
parts.append(part)
parts = tuple(parts)
return epoch, parts
# Deliberately not anchored to the start and end of the string, to make it
# easier for 3rd party code to reuse
VERSION_PATTERN = r"""
v?
(?:
(?:(?P<epoch>[0-9]+)!)? # epoch
(?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
(?P<pre> # pre-release
[-_\.]?
(?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
[-_\.]?
(?P<pre_n>[0-9]+)?
)?
(?P<post> # post release
(?:-(?P<post_n1>[0-9]+))
|
(?:
[-_\.]?
(?P<post_l>post|rev|r)
[-_\.]?
(?P<post_n2>[0-9]+)?
)
)?
(?P<dev> # dev release
[-_\.]?
(?P<dev_l>dev)
[-_\.]?
(?P<dev_n>[0-9]+)?
)?
)
(?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
"""
class Version(_BaseVersion):
_regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
def __init__(self, version):
# Validate the version and parse it into pieces
match = self._regex.search(version)
if not match:
raise InvalidVersion("Invalid version: '{0}'".format(version))
# Store the parsed out pieces of the version
self._version = _Version(
epoch=int(match.group("epoch")) if match.group("epoch") else 0,
release=tuple(int(i) for i in match.group("release").split(".")),
pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
post=_parse_letter_version(
match.group("post_l"), match.group("post_n1") or match.group("post_n2")
),
dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
local=_parse_local_version(match.group("local")),
)
# Generate a key which will be used for sorting
self._key = _cmpkey(
self._version.epoch,
self._version.release,
self._version.pre,
self._version.post,
self._version.dev,
self._version.local,
)
def __repr__(self):
return "<Version({0})>".format(repr(str(self)))
def __str__(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
# Pre-release
if self.pre is not None:
parts.append("".join(str(x) for x in self.pre))
# Post-release
if self.post is not None:
parts.append(".post{0}".format(self.post))
# Development release
if self.dev is not None:
parts.append(".dev{0}".format(self.dev))
# Local version segment
if self.local is not None:
parts.append("+{0}".format(self.local))
return "".join(parts)
@property
def epoch(self):
return self._version.epoch
@property
def release(self):
return self._version.release
@property
def pre(self):
return self._version.pre
@property
def post(self):
return self._version.post[1] if self._version.post else None
@property
def dev(self):
return self._version.dev[1] if self._version.dev else None
@property
def local(self):
if self._version.local:
return ".".join(str(x) for x in self._version.local)
else:
return None
@property
def public(self):
return str(self).split("+", 1)[0]
@property
def base_version(self):
parts = []
# Epoch
if self.epoch != 0:
parts.append("{0}!".format(self.epoch))
# Release segment
parts.append(".".join(str(x) for x in self.release))
return "".join(parts)
@property
def is_prerelease(self):
return self.dev is not None or self.pre is not None
@property
def is_postrelease(self):
return self.post is not None
@property
def is_devrelease(self):
return self.dev is not None
def _parse_letter_version(letter, number):
if letter:
# We consider there to be an implicit 0 in a pre-release if there is
# not a numeral associated with it.
if number is None:
number = 0
# We normalize any letters to their lower case form
letter = letter.lower()
# We consider some words to be alternate spellings of other words and
# in those cases we want to normalize the spellings to our preferred
# spelling.
if letter == "alpha":
letter = "a"
elif letter == "beta":
letter = "b"
elif letter in ["c", "pre", "preview"]:
letter = "rc"
elif letter in ["rev", "r"]:
letter = "post"
return letter, int(number)
if not letter and number:
# We assume if we are given a number, but we are not given a letter
# then this is using the implicit post release syntax (e.g. 1.0-1)
letter = "post"
return letter, int(number)
_local_version_separators = re.compile(r"[\._-]")
def _parse_local_version(local):
"""
Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
"""
if local is not None:
return tuple(
part.lower() if not part.isdigit() else int(part)
for part in _local_version_separators.split(local)
)
def _cmpkey(epoch, release, pre, post, dev, local):
# When we compare a release version, we want to compare it with all of the
# trailing zeros removed. So we'll use a reverse the list, drop all the now
# leading zeros until we come to something non zero, then take the rest
# re-reverse it back into the correct order and make it a tuple and use
# that for our sorting key.
release = tuple(
reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
)
# We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
# We'll do this by abusing the pre segment, but we _only_ want to do this
# if there is not a pre or a post segment. If we have one of those then
# the normal sorting rules will handle this case correctly.
if pre is None and post is None and dev is not None:
pre = -Infinity
# Versions without a pre-release (except as noted above) should sort after
# those with one.
elif pre is None:
pre = Infinity
# Versions without a post segment should sort before those with one.
if post is None:
post = -Infinity
# Versions without a development segment should sort after those with one.
if dev is None:
dev = Infinity
if local is None:
# Versions without a local segment should sort before those with one.
local = -Infinity
else:
# Versions with a local segment need that segment parsed to implement
# the sorting rules in PEP440.
# - Alpha numeric segments sort before numeric segments
# - Alpha numeric segments sort lexicographically
# - Numeric segments sort numerically
# - Shorter versions sort before longer versions when the prefixes
# match exactly
local = tuple((i, "") if isinstance(i, int) else (-Infinity, i) for i in local)
return epoch, release, pre, post, dev, local
|
Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/version.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_vendor/packaging/version.py",
"repo_id": "Django-locallibrary",
"token_count": 5210
}
| 35 |
from distutils import log
import distutils.command.install_scripts as orig
import os
import sys
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
try:
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
except ImportError:
is_wininst = False
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
if exec_param == sys.executable:
# In case the path to the Python executable contains a space, wrap
# it so it's not split up.
exec_param = [exec_param]
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
|
Django-locallibrary/env/Lib/site-packages/setuptools/command/install_scripts.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/install_scripts.py",
"repo_id": "Django-locallibrary",
"token_count": 1104
}
| 36 |
"""setuptools.errors
Provides exceptions used by setuptools modules.
"""
from distutils.errors import DistutilsError
class RemovedCommandError(DistutilsError, RuntimeError):
"""Error used for commands that have been removed in setuptools.
Since ``setuptools`` is built on ``distutils``, simply removing a command
from ``setuptools`` will make the behavior fall back to ``distutils``; this
error is raised if a command exists in ``distutils`` but has been actively
removed in ``setuptools``.
"""
|
Django-locallibrary/env/Lib/site-packages/setuptools/errors.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/errors.py",
"repo_id": "Django-locallibrary",
"token_count": 146
}
| 37 |
__all__ = []
__metaclass__ = type
try:
# Python >=3.2
from tempfile import TemporaryDirectory
except ImportError:
import shutil
import tempfile
class TemporaryDirectory:
"""
Very simple temporary directory context manager.
Will try to delete afterward, but will also ignore OS and similar
errors on deletion.
"""
def __init__(self, **kwargs):
self.name = None # Handle mkdtemp raising an exception
self.name = tempfile.mkdtemp(**kwargs)
def __enter__(self):
return self.name
def __exit__(self, exctype, excvalue, exctrace):
try:
shutil.rmtree(self.name, True)
except OSError: # removal errors are not the only possible
pass
self.name = None
|
Django-locallibrary/env/Lib/site-packages/setuptools/py31compat.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/py31compat.py",
"repo_id": "Django-locallibrary",
"token_count": 365
}
| 38 |
//+------------------------------------------------------------------+
//| TestScript.mq5 |
//| Copyright 2022, MetaQuotes Ltd. |
//| https://www.mql5.com |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, MetaQuotes Ltd."
#property link "https://www.mql5.com"
#property version "1.00"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#include "LogisticRegressionLib.mqh";
CLogisticRegression *log_reg;
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
void OnStart()
{
//---
string file_name = "Apple Dataset.csv";
string delimiter = ",";
log_reg = new CLogisticRegression();
Print("APPLE");
log_reg.Init(file_name,delimiter,2,"3,5,6",0.7);
double accuracy = 0;
log_reg.LogisticRegressionMain(accuracy);
printf("Tested model accuracy =%.4f",accuracy);
delete log_reg;
//---
file_name = "Netflix Dataset.csv";
Print("NETFLIX");
log_reg = new CLogisticRegression();
log_reg.Init(file_name,delimiter,2,"4,5,6",0.7);
log_reg.LogisticRegressionMain(accuracy);
printf("Tested model accuracy =%.4f",accuracy);
delete log_reg;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
|
LogisticRegression-MQL5-and-python/LogisticRegression mql5/TestScript.mq5/0
|
{
"file_path": "LogisticRegression-MQL5-and-python/LogisticRegression mql5/TestScript.mq5",
"repo_id": "LogisticRegression-MQL5-and-python",
"token_count": 746
}
| 39 |
//+------------------------------------------------------------------+
//| AdaBoost.mqh |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| defines |
//+------------------------------------------------------------------+
#include <MALE5\MatrixExtend.mqh>
#include <MALE5\Naive Bayes\Naive Bayes.mqh>
//+------------------------------------------------------------------+
//| Model class |
//+------------------------------------------------------------------+
#include <MALE5\Decision Tree\tree.mqh>
#include <MALE5\Linear Models\Logistic Regression.mqh>
//+------------------------------------------------------------------+
//| AdaBoost class for Decision Tree |
//+------------------------------------------------------------------+
namespace DecisionTree
{
class AdaBoost
{
protected:
vector m_alphas;
vector classes_in_data;
int m_random_state;
bool m_boostrapping;
uint m_min_split, m_max_depth;
CDecisionTreeClassifier *weak_learners[]; //store weak_learner pointers for memory allocation tracking
CDecisionTreeClassifier *weak_learner;
uint m_estimators;
public:
AdaBoost(uint min_split, uint max_split, uint n_estimators=50, int random_state=42, bool bootstrapping=true);
~AdaBoost(void);
void fit(matrix &x, vector &y);
int predict(vector &x);
vector predict(matrix &x);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
AdaBoost::AdaBoost(uint min_split, uint max_split, uint n_estimators=50, int random_state=42, bool bootstrapping=true)
:m_estimators(n_estimators),
m_random_state(random_state),
m_boostrapping(bootstrapping),
m_min_split(min_split),
m_max_depth(max_split)
{
ArrayResize(weak_learners, m_estimators); //Resizing the array to retain the number of base weak_learners
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
AdaBoost::~AdaBoost(void)
{
for (uint i=0; i<m_estimators; i++) //Delete the forest | all trees
if (CheckPointer(weak_learners[i]) != POINTER_INVALID)
delete(weak_learners[i]);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void AdaBoost::fit(matrix &x,vector &y)
{
m_alphas.Resize(m_estimators);
classes_in_data = MatrixExtend::Unique(y); //Find the target variables in the class
ulong m = x.Rows(), n = x.Cols();
vector weights(m); weights = weights.Fill(1.0) / m; //Initialize instance weights
vector preds(m);
vector misclassified(m);
//---
matrix data = MatrixExtend::concatenate(x, y);
matrix temp_data;
matrix x_subset;
vector y_subset;
double error = 0;
for (uint i=0; i<m_estimators; i++)
{
temp_data = data;
MatrixExtend::Randomize(temp_data, this.m_random_state, this.m_boostrapping);
if (!MatrixExtend::XandYSplitMatrices(temp_data, x_subset, y_subset)) //Get randomized subsets
{
ArrayRemove(weak_learners,i,1); //Delete the invalid weak_learner
printf("%s %d Failed to split data",__FUNCTION__,__LINE__);
continue;
}
//---
weak_learner = new CDecisionTreeClassifier(this.m_min_split, m_max_depth);
weak_learner.fit(x_subset, y_subset); //fiting the randomized data to the i-th weak_learner
preds = weak_learner.predict(x_subset); //making predictions for the i-th weak_learner
for (ulong j=0; j<m; j++)
misclassified[j] = (preds[j] != y_subset[j]);
error = (misclassified * weights).Sum() / (double)weights.Sum();
//--- Calculate the weight of a weak learner in the final weak_learner
double alpha = 0.5 * log((1-error) / (error + 1e-10));
//--- Update instance weights
weights *= exp(-alpha * y_subset * preds);
weights /= weights.Sum();
//--- save a weak learner and its weight
this.m_alphas[i] = alpha;
this.weak_learners[i] = weak_learner;
printf("Building Estimator [%d/%d] Accuracy Score %.3f",i+1,m_estimators,Metrics::accuracy_score(y_subset,preds));
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int AdaBoost::predict(vector &x)
{
// Combine weak learners using weighted sum
vector weak_preds(m_estimators),
final_preds(m_estimators);
for (uint i=0; i<this.m_estimators; i++)
weak_preds[i] = this.weak_learners[i].predict(x);
return (int)weak_preds[(this.m_alphas*weak_preds).ArgMax()]; //Majority decision
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector AdaBoost::predict(matrix &x)
{
vector ret_v(x.Rows());
for (ulong i=0; i<ret_v.Size(); i++)
ret_v[i] = this.predict(x.Row(i));
return ret_v;
}
}
//+------------------------------------------------------------------+
//| Adaboost for Logistic Regression |
//+------------------------------------------------------------------+
namespace LogisticRegression
{
class AdaBoost
{
protected:
vector m_alphas;
vector classes_in_data;
int m_random_state;
bool m_boostrapping;
uint m_min_split, m_max_depth;
CLogisticRegression *weak_learners[]; //store weak_learner pointers for memory allocation tracking
CLogisticRegression *weak_learner;
uint m_estimators;
public:
AdaBoost(uint n_estimators=50, int random_state=42, bool bootstrapping=true);
~AdaBoost(void);
void fit(matrix &x, vector &y);
int predict(vector &x);
vector predict(matrix &x);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
AdaBoost::AdaBoost(uint n_estimators=50, int random_state=42, bool bootstrapping=true)
:m_estimators(n_estimators),
m_random_state(random_state),
m_boostrapping(bootstrapping)
{
ArrayResize(weak_learners, m_estimators); //Resizing the array to retain the number of base weak_learners
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
AdaBoost::~AdaBoost(void)
{
for (uint i=0; i<m_estimators; i++) //Delete the forest | all trees
if (CheckPointer(weak_learners[i]) != POINTER_INVALID)
delete(weak_learners[i]);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void AdaBoost::fit(matrix &x,vector &y)
{
m_alphas.Resize(m_estimators);
classes_in_data = MatrixExtend::Unique(y); //Find the target variables in the class
ulong m = x.Rows(), n = x.Cols();
vector weights(m); weights = weights.Fill(1.0) / m; //Initialize instance weights
vector preds(m);
vector misclassified(m);
//---
matrix data = MatrixExtend::concatenate(x, y);
matrix temp_data;
matrix x_subset;
vector y_subset;
double error = 0;
for (uint i=0; i<m_estimators; i++)
{
temp_data = data;
MatrixExtend::Randomize(temp_data, this.m_random_state, this.m_boostrapping);
if (!MatrixExtend::XandYSplitMatrices(temp_data, x_subset, y_subset)) //Get randomized subsets
{
ArrayRemove(weak_learners,i,1); //Delete the invalid weak_learner
printf("%s %d Failed to split data",__FUNCTION__,__LINE__);
continue;
}
//---
weak_learner = new CLogisticRegression();
weak_learner.fit(x_subset, y_subset); //fiting the randomized data to the i-th weak_learner
preds = weak_learner.predict(x_subset); //making predictions for the i-th weak_learner
for (ulong j=0; j<m; j++)
misclassified[j] = (preds[j] != y_subset[j]);
error = (misclassified * weights).Sum() / (double)weights.Sum();
//--- Calculate the weight of a weak learner in the final weak_learner
double alpha = 0.5 * log((1-error) / (error + 1e-10));
//--- Update instance weights
weights *= exp(-alpha * y_subset * preds);
weights /= weights.Sum();
//--- save a weak learner and its weight
this.m_alphas[i] = alpha;
this.weak_learners[i] = weak_learner;
printf("Building Estimator [%d/%d] Accuracy Score %.3f",i+1,m_estimators,Metrics::accuracy_score(y_subset,preds));
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
int AdaBoost::predict(vector &x)
{
// Combine weak learners using weighted sum
vector weak_preds(m_estimators),
final_preds(m_estimators);
for (uint i=0; i<this.m_estimators; i++)
weak_preds[i] = this.weak_learners[i].predict(x);
return (int)weak_preds[(this.m_alphas*weak_preds).ArgMax()]; //Majority decision
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector AdaBoost::predict(matrix &x)
{
vector ret_v(x.Rows());
for (ulong i=0; i<ret_v.Size(); i++)
ret_v[i] = this.predict(x.Row(i));
return ret_v;
}
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
|
MALE5/Ensemble/AdaBoost.mqh/0
|
{
"file_path": "MALE5/Ensemble/AdaBoost.mqh",
"repo_id": "MALE5",
"token_count": 5243
}
| 40 |
## Kohonen Maps (Self-Organizing Maps)
This documentation explains the `CKohonenMaps` class in MQL5, which implements **Kohonen Maps**, also known as **Self-Organizing Maps (SOM)**, for clustering and visualization tasks.
**I. Kohonen Maps Theory:**
Kohonen Maps are a type of **artificial neural network** used for unsupervised learning, specifically for **clustering** and **visualization** of high-dimensional data. They work by:
1. **Initializing a grid of neurons:** Each neuron is associated with a weight vector representing its position in the high-dimensional space.
2. **Iteratively presenting data points:**
* For each data point:
* Find the **winning neuron** (closest neuron in terms of distance) based on the weight vectors.
* Update the weights of the winning neuron and its **neighborhood** towards the data point, with decreasing influence as the distance from the winning neuron increases.
3. **Convergence:** After a certain number of iterations (epochs), the weight vectors of the neurons become organized in a way that reflects the underlying structure of the data.
**II. CKohonenMaps Class:**
The `CKohonenMaps` class provides functionalities for implementing Kohonen Maps in MQL5:
**Public Functions:**
* **CKohonenMaps(uint clusters=2, double alpha=0.01, uint epochs=100, int random_state=42)** Constructor, allows setting hyperparameters:
* `clusters`: Number of clusters (default: 2).
* `alpha`: Learning rate (default: 0.01).
* `epochs`: Number of training epochs (default: 100).
* `random_state`: Random seed for reproducibility (default: 42).
* `~CKohonenMaps(void)` Destructor.
* `void fit(const matrix &x)` Trains the model on the provided data (`x`).
* `int predict(const vector &x)` Predicts the cluster label for a single data point (`x`).
* `vector predict(const matrix &x)` Predicts cluster labels for multiple data points (`x`).
**Internal Functions:**
* `Euclidean_distance(const vector &v1, const vector &v2)`: Calculates the Euclidean distance between two vectors.
* `CalcTimeElapsed(double seconds)`: Converts seconds to a human-readable format (not relevant for core functionality).
**III. Additional Notes:**
* The class internally uses the `CPlots` class (not documented here) for potential visualization purposes.
* The `c_matrix` and `w_matrix` member variables store the cluster assignments and weight matrix, respectively.
* Choosing the appropriate number of clusters is crucial for the quality of the results.
By understanding the theoretical foundation and functionalities of the `CKohonenMaps` class, MQL5 users can leverage Kohonen Maps for:
* **Clustering:** Group similar data points together based on their features.
* **Data visualization:** Project the high-dimensional data onto a lower-dimensional space (e.g., a 2D grid of neurons) for easier visualization, potentially using the `CPlots` class.
## Pattern Recognition Neural Network
This documentation explains the `CPatternNets` class in MQL5, which implements a **feed-forward neural network** for pattern recognition tasks.
**I. Neural Network Theory:**
A feed-forward neural network consists of interconnected layers of **neurons**. Each neuron receives input from the previous layer, applies an **activation function** to transform the signal, and outputs the result to the next layer.
**II. CPatternNets Class:**
The `CPatternNets` class provides functionalities for training and using a feed-forward neural network for pattern recognition in MQL5:
**Public Functions:**
* `CPatternNets(matrix &xmatrix, vector &yvector,vector &HL_NODES, activation ActivationFx, bool SoftMaxLyr=false)` Constructor:
* `xmatrix`: Input data matrix (rows: samples, columns: features).
* `yvector`: Target labels vector (corresponding labels for each sample in `xmatrix`).
* `HL_NODES`: Vector specifying the number of neurons in each hidden layer.
* `ActivationFx`: Activation function (enum specifying the type of activation function).
* `SoftMaxLyr`: Flag indicating whether to use a SoftMax layer in the output (default: False).
* `~CPatternNets(void)` Destructor.
* `int PatternNetFF(vector &in_vector)` Performs a forward pass on the network with a single input vector and returns the predicted class label.
* `vector PatternNetFF(matrix &xmatrix)` Performs a forward pass on the network for all rows in the input matrix and returns a vector of predicted class labels.
**Internal Functions:**
* `SoftMaxLayerFX(matrix<double> &mat)`: Applies the SoftMax function to a matrix (used for the output layer if `SoftMaxLyr` is True).
**III. Class Functionality:**
1. **Initialization:**
* The constructor validates data dimensions and parses user-defined hyperparameters.
* The network architecture (number of layers and neurons) is determined based on the provided configuration.
* Weights (connections between neurons) and biases (individual offsets for each neuron) are randomly initialized.
2. **Forward Pass:**
* The provided input vector is fed into the first layer.
* Each layer performs the following steps:
* Calculates the weighted sum of the previous layer's outputs.
* Adds the bias term to the weighted sum.
* Applies the chosen activation function to the result.
* This process continues through all layers until the final output layer is reached.
3. **SoftMax Layer (Optional)**
* If the `SoftMaxLyr` flag is True, the output layer uses the SoftMax function to ensure the output values sum to 1 and represent class probabilities.
4. **Prediction:**
* For single-sample prediction (`PatternNetFF(vector &in_vector)`), the class label with the **highest output value** is returned.
* For batch prediction (`PatternNetFF(matrix &xmatrix)`), a vector containing the predicted class label for each sample in the input matrix is returned.
**IV. Additional Notes:**
* The class provides several debug statements (disabled by default) to print intermediate calculations for debugging purposes.
* The code uses helper functions from the `MatrixExtend` class (not documented here) for matrix and vector operations.
* Choosing the appropriate network architecture, activation function, and learning approach (not implemented in this class) is crucial for optimal performance on specific tasks.
By understanding the theoretical foundation and functionalities of the `CPatternNets` class, MQL5 users can leverage neural networks for various pattern recognition tasks, including:
* **Classification:** Classifying data points into predefined categories based on their features.
* **Anomaly detection:** Identifying data points that deviate significantly from the expected patterns.
* **Feature learning:** Extracting hidden patterns or representations from the data.
## Regression Neural Network
This documentation explains the `CRegressorNets` class in MQL5, which implements a **Multi-Layer Perceptron (MLP)** for regression tasks.
**I. Regression vs. Classification:**
* **Regression:** Predicts continuous output values.
* **Classification:** Assigns data points to predefined categories.
**II. MLP Neural Network:**
An MLP is a type of **feed-forward neural network** used for supervised learning tasks like regression. It consists of:
* **Input layer:** Receives the input data.
* **Hidden layers:** Process and transform the information.
* **Output layer:** Produces the final prediction (continuous value in regression).
**III. CRegressorNets Class:**
The `CRegressorNets` class provides functionalities for training and using an MLP for regression in MQL5:
**Public Functions:**
* `CRegressorNets(vector &HL_NODES, activation ActivationFX=AF_RELU_)` Constructor:
* `HL_NODES`: Vector specifying the number of neurons in each hidden layer.
* `ActivationFX`: Activation function (enum specifying the type of activation function).
* `~CRegressorNets(void)` Destructor.
* `void fit(matrix &x, vector &y)` Trains the model on the provided data (`x` - features, `y` - target values).
* `double predict(vector &x)` Predicts the output value for a single input vector.
* `vector predict(matrix &x)` Predicts output values for all rows in the input matrix.
**Internal Functions (not directly accessible)**
* `CalcTimeElapsed(double seconds)`: Calculates and returns a string representing the elapsed time in a human-readable format (not relevant for core functionality).
* `RegressorNetsBackProp(matrix& x, vector &y, uint epochs, double alpha, loss LossFx=LOSS_MSE_, optimizer OPTIMIZER=OPTIMIZER_ADAM)`: Performs backpropagation for training (details not provided but likely involve calculating gradients and updating weights and biases).
* Optimizer functions (e.g., `AdamOptimizerW`, `AdamOptimizerB`) Implement specific optimization algorithms like Adam for updating weights and biases during training.
**Other Class Members:**
* `mlp_struct mlp`: Stores information about the network architecture (inputs, hidden layers, and outputs).
* `CTensors*` pointers: Represent tensors holding weights, biases, and other internal calculations (specific implementation likely relies on a custom tensor library).
* `matrix` variables: Used for calculations during training and may hold temporary data (e.g., `W_MATRIX`, `B_MATRIX`).
* `vector` variables: Store network configuration details (e.g., `W_CONFIG`, `HL_CONFIG`).
* `bool isBackProp`: Flag indicating if backpropagation is being performed (private).
* `matrix` variables: Used for storing intermediate results during backpropagation (e.g., `ACTIVATIONS`, `Partial_Derivatives`).
**IV. Additional Notes:**
* The class provides various activation function options and supports different loss functions (e.g., Mean Squared Error, Mean Absolute Error) for selecting the appropriate evaluation metric during training.
* The class implements the Adam optimizer, one of several optimization algorithms used for efficient training of neural networks.
* Detailed implementation of the backpropagation algorithm is not provided but is likely the core functionality for training the network.
**Reference**
* [Data Science and Machine Learning (Part 12): Can Self-Training Neural Networks Help You Outsmart the Stock Market?](https://www.mql5.com/en/articles/12209)
* [Data Science and Machine Learning — Neural Network (Part 01): Feed Forward Neural Network demystified](https://www.mql5.com/en/articles/11275)
* [Data Science and Machine Learning — Neural Network (Part 02): Feed forward NN Architectures Design](https://www.mql5.com/en/articles/11334)
|
MALE5/Neural Networks/README.md/0
|
{
"file_path": "MALE5/Neural Networks/README.md",
"repo_id": "MALE5",
"token_count": 2777
}
| 41 |
//+------------------------------------------------------------------+
//| Matrix.mq5 |
//| Copyright 2022, Omega Joctan Ltd. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, MetaQuotes Ltd."
#property link "https://www.mql5.com/en/users/omegajoctan"
#property version "1.00"
#include "MatrixRegression.mqh";
#include "LinearRegressionLib.mqh";
CSimpleMatLinearRegression matlr;
CSimpleLinearRegression lr;
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
void OnStart()
{
//double x[] = {651,762,856,1063,1190,1298,1421,1440,1518}; //stands for sales
//double y[] = {23,26,30,34,43,48,52,57,58}; //money spent on ads
//---
double x[], y[];
string file_name = "NASDAQ_DATA.csv", delimiter = ",";
lr.GetDataToArray(x,file_name,delimiter,1);
lr.GetDataToArray(y,file_name,delimiter,2);
//ArrayPrint(x);
//ArrayPrint(y);
matlr.Init(x,y);
matlr.LinearRegression();
}
//+------------------------------------------------------------------+
|
MatrixRegressionMQL5/MatrixRegTest.mq5/0
|
{
"file_path": "MatrixRegressionMQL5/MatrixRegTest.mq5",
"repo_id": "MatrixRegressionMQL5",
"token_count": 553
}
| 42 |
### About the Project
The following code was used in an MQL5 Article about Neural Network for more details see the links below
https://www.mql5.com/en/articles/11334
https://www.mql5.com/en/articles/11275
### Support the project
Any thoughts and contribution will be appreciated. Buy me coffee if you appreciate the effort https://www.buymeacoffee.com/omegajoctan
### Hire me in MQL5
Create a personal work for me using this link https://www.mql5.com/en/job/new?prefered=omegajoctan on the following projects
* Support Vector Machine
* Linear Regression
* Logistic Regression
* Neural Networks
* And many more on machine learning Aspects
|
NeuralNetworks-MQL5/Readme.md/0
|
{
"file_path": "NeuralNetworks-MQL5/Readme.md",
"repo_id": "NeuralNetworks-MQL5",
"token_count": 196
}
| 43 |
{% extends "base_template.html" %}
{% block content %}
<h1>Borrowed Books</h1>
{% if loaned_books %}
<ul>
{% for bookinst in loaned_books %}
<li class="{% if bookinst.is_overdue %}text-danger{% endif %}">
<a href="{% url 'book-detail' bookinst.book.pk %}">{{bookinst.book.title}} ({{bookinst.book.author}})</a> {{bookinst.due_back }}
</li>
{% endfor %}
</ul>
{% else %}
<p>There are no books Borrowed by you</p>
{% endif %}
{% endblock %}
|
Django-locallibrary/LocalLibrary/catalog/Templates/books_borrowed_by_user.html/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/catalog/Templates/books_borrowed_by_user.html",
"repo_id": "Django-locallibrary",
"token_count": 314
}
| 0 |
from django.apps import AppConfig
class CatalogConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'catalog'
|
Django-locallibrary/LocalLibrary/catalog/apps.py/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/catalog/apps.py",
"repo_id": "Django-locallibrary",
"token_count": 50
}
| 1 |
release: python manage.py migrate
web: gunicorn --pythonpath LocalLibrary LocalLibrary.wsgi
|
Django-locallibrary/Procfile/0
|
{
"file_path": "Django-locallibrary/Procfile",
"repo_id": "Django-locallibrary",
"token_count": 24
}
| 2 |
from __future__ import division
import itertools
import sys
from signal import SIGINT, default_int_handler, signal
from pip._vendor import six
from pip._vendor.progress.bar import Bar, FillingCirclesBar, IncrementalBar
from pip._vendor.progress.spinner import Spinner
from pip._internal.utils.compat import WINDOWS
from pip._internal.utils.logging import get_indentation
from pip._internal.utils.misc import format_size
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Dict, List
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
def _select_progress_class(preferred, fallback):
# type: (Bar, Bar) -> Bar
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", six.text_type()),
getattr(preferred, "fill", six.text_type()),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
six.text_type().join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar) # type: Any
class InterruptibleMixin(object):
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
"""
Save the original SIGINT handler for later.
"""
# https://github.com/python/mypy/issues/5887
super(InterruptibleMixin, self).__init__( # type: ignore
*args,
**kwargs
)
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
# type: () -> None
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish() # type: ignore
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame): # type: ignore
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class SilentBar(Bar):
def update(self):
# type: () -> None
pass
class BlueEmojiBar(IncrementalBar):
suffix = "%(percent)d%%"
bar_prefix = " "
bar_suffix = " "
phases = (u"\U0001F539", u"\U0001F537", u"\U0001F535") # type: Any
class DownloadProgressMixin(object):
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
# https://github.com/python/mypy/issues/5887
super(DownloadProgressMixin, self).__init__( # type: ignore
*args,
**kwargs
)
self.message = (" " * (
get_indentation() + 2
)) + self.message # type: str
@property
def downloaded(self):
# type: () -> str
return format_size(self.index) # type: ignore
@property
def download_speed(self):
# type: () -> str
# Avoid zero division errors...
if self.avg == 0.0: # type: ignore
return "..."
return format_size(1 / self.avg) + "/s" # type: ignore
@property
def pretty_eta(self):
# type: () -> str
if self.eta: # type: ignore
return "eta {}".format(self.eta_td) # type: ignore
return ""
def iter(self, it): # type: ignore
for x in it:
yield x
# B305 is incorrectly raised here
# https://github.com/PyCQA/flake8-bugbear/issues/59
self.next(len(x)) # noqa: B305
self.finish()
class WindowsMixin(object):
def __init__(self, *args, **kwargs):
# type: (List[Any], Dict[Any, Any]) -> None
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call needs to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor: # type: ignore
self.hide_cursor = False
# https://github.com/python/mypy/issues/5887
super(WindowsMixin, self).__init__(*args, **kwargs) # type: ignore
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file) # type: ignore
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class BaseDownloadProgressBar(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DefaultDownloadProgressBar(BaseDownloadProgressBar,
_BaseBar):
pass
class DownloadSilentBar(BaseDownloadProgressBar, SilentBar):
pass
class DownloadBar(BaseDownloadProgressBar,
Bar):
pass
class DownloadFillingCirclesBar(BaseDownloadProgressBar,
FillingCirclesBar):
pass
class DownloadBlueEmojiProgressBar(BaseDownloadProgressBar,
BlueEmojiBar):
pass
class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
# type: () -> str
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
# type: () -> None
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = ''.join([
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
])
self.writeln(line)
BAR_TYPES = {
"off": (DownloadSilentBar, DownloadSilentBar),
"on": (DefaultDownloadProgressBar, DownloadProgressSpinner),
"ascii": (DownloadBar, DownloadProgressSpinner),
"pretty": (DownloadFillingCirclesBar, DownloadProgressSpinner),
"emoji": (DownloadBlueEmojiProgressBar, DownloadProgressSpinner)
}
def DownloadProgressProvider(progress_bar, max=None): # type: ignore
if max is None or max == 0:
return BAR_TYPES[progress_bar][1]().iter
else:
return BAR_TYPES[progress_bar][0](max=max).iter
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/progress_bars.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/progress_bars.py",
"repo_id": "Django-locallibrary",
"token_count": 3620
}
| 3 |
from __future__ import absolute_import
import logging
import sys
import textwrap
from collections import OrderedDict
from pip._vendor import pkg_resources
from pip._vendor.packaging.version import parse as parse_version
# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is
# why we ignore the type on this import
from pip._vendor.six.moves import xmlrpc_client # type: ignore
from pip._internal.cli.base_command import Command
from pip._internal.cli.req_command import SessionCommandMixin
from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS
from pip._internal.exceptions import CommandError
from pip._internal.models.index import PyPI
from pip._internal.network.xmlrpc import PipXmlrpcTransport
from pip._internal.utils.compat import get_terminal_size
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import get_distribution, write_output
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import List, Dict, Optional
from typing_extensions import TypedDict
TransformedHit = TypedDict(
'TransformedHit',
{'name': str, 'summary': str, 'versions': List[str]},
)
logger = logging.getLogger(__name__)
class SearchCommand(Command, SessionCommandMixin):
"""Search for PyPI packages whose name or summary contains <query>."""
usage = """
%prog [options] <query>"""
ignore_require_venv = True
def add_options(self):
# type: () -> None
self.cmd_opts.add_option(
'-i', '--index',
dest='index',
metavar='URL',
default=PyPI.pypi_url,
help='Base URL of Python Package Index (default %default)')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
# type: (Values, List[str]) -> int
if not args:
raise CommandError('Missing required argument (search query).')
query = args
pypi_hits = self.search(query, options)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
if pypi_hits:
return SUCCESS
return NO_MATCHES_FOUND
def search(self, query, options):
# type: (List[str], Values) -> List[Dict[str, str]]
index_url = options.index
session = self.get_default_session(options)
transport = PipXmlrpcTransport(index_url, session)
pypi = xmlrpc_client.ServerProxy(index_url, transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
# type: (List[Dict[str, str]]) -> List[TransformedHit]
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = OrderedDict() # type: OrderedDict[str, TransformedHit]
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
if name not in packages.keys():
packages[name] = {
'name': name,
'summary': summary,
'versions': [version],
}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
return list(packages.values())
def print_results(hits, name_column_width=None, terminal_width=None):
# type: (List[TransformedHit], Optional[int], Optional[int]) -> None
if not hits:
return
if name_column_width is None:
name_column_width = max([
len(hit['name']) + len(highest_version(hit.get('versions', ['-'])))
for hit in hits
]) + 4
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
latest = highest_version(hit.get('versions', ['-']))
if terminal_width is not None:
target_width = terminal_width - name_column_width - 5
if target_width > 10:
# wrap and indent summary to fit terminal
summary_lines = textwrap.wrap(summary, target_width)
summary = ('\n' + ' ' * (name_column_width + 3)).join(
summary_lines)
line = '{name_latest:{name_column_width}} - {summary}'.format(
name_latest='{name} ({latest})'.format(**locals()),
**locals())
try:
write_output(line)
if name in installed_packages:
dist = get_distribution(name)
assert dist is not None
with indent_log():
if dist.version == latest:
write_output('INSTALLED: %s (latest)', dist.version)
else:
write_output('INSTALLED: %s', dist.version)
if parse_version(latest).pre:
write_output('LATEST: %s (pre-release; install'
' with "pip install --pre")', latest)
else:
write_output('LATEST: %s', latest)
except UnicodeEncodeError:
pass
def highest_version(versions):
# type: (List[str]) -> str
return max(versions, key=parse_version)
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/search.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/search.py",
"repo_id": "Django-locallibrary",
"token_count": 2490
}
| 4 |
"""Index interaction code
"""
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/index/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/index/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 7
}
| 5 |
"""
A module that implements tooling to enable easy warnings about deprecations.
"""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import warnings
from pip._vendor.packaging.version import parse
from pip import __version__ as current_version
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Optional
DEPRECATION_MSG_PREFIX = "DEPRECATION: "
class PipDeprecationWarning(Warning):
pass
_original_showwarning = None # type: Any
# Warnings <-> Logging Integration
def _showwarning(message, category, filename, lineno, file=None, line=None):
if file is not None:
if _original_showwarning is not None:
_original_showwarning(
message, category, filename, lineno, file, line,
)
elif issubclass(category, PipDeprecationWarning):
# We use a specially named logger which will handle all of the
# deprecation messages for pip.
logger = logging.getLogger("pip._internal.deprecations")
logger.warning(message)
else:
_original_showwarning(
message, category, filename, lineno, file, line,
)
def install_warning_logger():
# type: () -> None
# Enable our Deprecation Warnings
warnings.simplefilter("default", PipDeprecationWarning, append=True)
global _original_showwarning
if _original_showwarning is None:
_original_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
def deprecated(reason, replacement, gone_in, issue=None):
# type: (str, Optional[str], Optional[str], Optional[int]) -> None
"""Helper to deprecate existing functionality.
reason:
Textual reason shown to the user about why this functionality has
been deprecated.
replacement:
Textual suggestion shown to the user about what alternative
functionality they can use.
gone_in:
The version of pip does this functionality should get removed in.
Raises errors if pip's current version is greater than or equal to
this.
issue:
Issue number on the tracker that would serve as a useful place for
users to find related discussion and provide feedback.
Always pass replacement, gone_in and issue as keyword arguments for clarity
at the call site.
"""
# Construct a nice message.
# This is eagerly formatted as we want it to get logged as if someone
# typed this entire message out.
sentences = [
(reason, DEPRECATION_MSG_PREFIX + "{}"),
(gone_in, "pip {} will remove support for this functionality."),
(replacement, "A possible replacement is {}."),
(issue, (
"You can find discussion regarding this at "
"https://github.com/pypa/pip/issues/{}."
)),
]
message = " ".join(
template.format(val) for val, template in sentences if val is not None
)
# Raise as an error if it has to be removed.
if gone_in is not None and parse(current_version) >= parse(gone_in):
raise PipDeprecationWarning(message)
warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/deprecation.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/deprecation.py",
"repo_id": "Django-locallibrary",
"token_count": 1153
}
| 6 |
import sys
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Sequence
# Shim to wrap setup.py invocation with setuptools
#
# We set sys.argv[0] to the path to the underlying setup.py file so
# setuptools / distutils don't take the path to the setup.py to be "-c" when
# invoking via the shim. This avoids e.g. the following manifest_maker
# warning: "warning: manifest_maker: standard file '-c' not found".
_SETUPTOOLS_SHIM = (
"import sys, setuptools, tokenize; sys.argv[0] = {0!r}; __file__={0!r};"
"f=getattr(tokenize, 'open', open)(__file__);"
"code=f.read().replace('\\r\\n', '\\n');"
"f.close();"
"exec(compile(code, __file__, 'exec'))"
)
def make_setuptools_shim_args(
setup_py_path, # type: str
global_options=None, # type: Sequence[str]
no_user_config=False, # type: bool
unbuffered_output=False # type: bool
):
# type: (...) -> List[str]
"""
Get setuptools command arguments with shim wrapped setup file invocation.
:param setup_py_path: The path to setup.py to be wrapped.
:param global_options: Additional global options.
:param no_user_config: If True, disables personal user configuration.
:param unbuffered_output: If True, adds the unbuffered switch to the
argument list.
"""
args = [sys.executable]
if unbuffered_output:
args += ["-u"]
args += ["-c", _SETUPTOOLS_SHIM.format(setup_py_path)]
if global_options:
args += global_options
if no_user_config:
args += ["--no-user-cfg"]
return args
def make_setuptools_bdist_wheel_args(
setup_py_path, # type: str
global_options, # type: Sequence[str]
build_options, # type: Sequence[str]
destination_dir, # type: str
):
# type: (...) -> List[str]
# NOTE: Eventually, we'd want to also -S to the flags here, when we're
# isolating. Currently, it breaks Python in virtualenvs, because it
# relies on site.py to find parts of the standard library outside the
# virtualenv.
args = make_setuptools_shim_args(
setup_py_path,
global_options=global_options,
unbuffered_output=True
)
args += ["bdist_wheel", "-d", destination_dir]
args += build_options
return args
def make_setuptools_clean_args(
setup_py_path, # type: str
global_options, # type: Sequence[str]
):
# type: (...) -> List[str]
args = make_setuptools_shim_args(
setup_py_path,
global_options=global_options,
unbuffered_output=True
)
args += ["clean", "--all"]
return args
def make_setuptools_develop_args(
setup_py_path, # type: str
global_options, # type: Sequence[str]
install_options, # type: Sequence[str]
no_user_config, # type: bool
prefix, # type: Optional[str]
home, # type: Optional[str]
use_user_site, # type: bool
):
# type: (...) -> List[str]
assert not (use_user_site and prefix)
args = make_setuptools_shim_args(
setup_py_path,
global_options=global_options,
no_user_config=no_user_config,
)
args += ["develop", "--no-deps"]
args += install_options
if prefix:
args += ["--prefix", prefix]
if home is not None:
args += ["--home", home]
if use_user_site:
args += ["--user", "--prefix="]
return args
def make_setuptools_egg_info_args(
setup_py_path, # type: str
egg_info_dir, # type: Optional[str]
no_user_config, # type: bool
):
# type: (...) -> List[str]
args = make_setuptools_shim_args(
setup_py_path, no_user_config=no_user_config
)
args += ["egg_info"]
if egg_info_dir:
args += ["--egg-base", egg_info_dir]
return args
def make_setuptools_install_args(
setup_py_path, # type: str
global_options, # type: Sequence[str]
install_options, # type: Sequence[str]
record_filename, # type: str
root, # type: Optional[str]
prefix, # type: Optional[str]
header_dir, # type: Optional[str]
home, # type: Optional[str]
use_user_site, # type: bool
no_user_config, # type: bool
pycompile # type: bool
):
# type: (...) -> List[str]
assert not (use_user_site and prefix)
assert not (use_user_site and root)
args = make_setuptools_shim_args(
setup_py_path,
global_options=global_options,
no_user_config=no_user_config,
unbuffered_output=True
)
args += ["install", "--record", record_filename]
args += ["--single-version-externally-managed"]
if root is not None:
args += ["--root", root]
if prefix is not None:
args += ["--prefix", prefix]
if home is not None:
args += ["--home", home]
if use_user_site:
args += ["--user", "--prefix="]
if pycompile:
args += ["--compile"]
else:
args += ["--no-compile"]
if header_dir:
args += ["--install-headers", header_dir]
args += install_options
return args
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/setuptools_build.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/setuptools_build.py",
"repo_id": "Django-locallibrary",
"token_count": 2069
}
| 7 |
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os.path
import re
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.exceptions import BadCommand, SubProcessError
from pip._internal.utils.misc import display_path, hide_url
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.vcs.versioncontrol import (
RemoteNotFoundError,
VersionControl,
find_path_to_setup_from_repo_root,
vcs,
)
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import AuthInfo, RevOptions
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
HASH_REGEX = re.compile('^[a-fA-F0-9]{40}$')
def looks_like_hash(sha):
return bool(HASH_REGEX.match(sha))
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
# Prevent the user's environment variables from interfering with pip:
# https://github.com/pypa/pip/issues/1130
unset_environ = ('GIT_DIR', 'GIT_WORK_TREE')
default_arg_rev = 'HEAD'
@staticmethod
def get_base_rev_args(rev):
return [rev]
def is_immutable_rev_checkout(self, url, dest):
# type: (str, str) -> bool
_, rev_options = self.get_url_rev_options(hide_url(url))
if not rev_options.rev:
return False
if not self.is_commit_id_equal(dest, rev_options.rev):
# the current commit is different from rev,
# which means rev was something else than a commit hash
return False
# return False in the rare case rev is both a commit hash
# and a tag or a branch; we don't want to cache in that case
# because that branch/tag could point to something else in the future
is_tag_or_branch = bool(
self.get_revision_sha(dest, rev_options.rev)[0]
)
return not is_tag_or_branch
def get_git_version(self):
VERSION_PFX = 'git version '
version = self.run_command(['version'])
if version.startswith(VERSION_PFX):
version = version[len(VERSION_PFX):].split()[0]
else:
version = ''
# get first 3 positions of the git version because
# on windows it is x.y.z.windows.t, and this parses as
# LegacyVersion which always smaller than a Version.
version = '.'.join(version.split('.')[:3])
return parse_version(version)
@classmethod
def get_current_branch(cls, location):
"""
Return the current branch, or None if HEAD isn't at a branch
(e.g. detached HEAD).
"""
# git-symbolic-ref exits with empty stdout if "HEAD" is a detached
# HEAD rather than a symbolic ref. In addition, the -q causes the
# command to exit with status code 1 instead of 128 in this case
# and to suppress the message to stderr.
args = ['symbolic-ref', '-q', 'HEAD']
output = cls.run_command(
args, extra_ok_returncodes=(1, ), cwd=location,
)
ref = output.strip()
if ref.startswith('refs/heads/'):
return ref[len('refs/heads/'):]
return None
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the Git repository at the url to the destination location"""
if not location.endswith('/'):
location = location + '/'
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path, url=url)
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
cwd=temp_dir.path
)
@classmethod
def get_revision_sha(cls, dest, rev):
"""
Return (sha_or_none, is_branch), where sha_or_none is a commit hash
if the revision names a remote branch or tag, otherwise None.
Args:
dest: the repository directory.
rev: the revision name.
"""
# Pass rev to pre-filter the list.
output = ''
try:
output = cls.run_command(['show-ref', rev], cwd=dest)
except SubProcessError:
pass
refs = {}
for line in output.strip().splitlines():
try:
sha, ref = line.split()
except ValueError:
# Include the offending line to simplify troubleshooting if
# this error ever occurs.
raise ValueError('unexpected show-ref line: {!r}'.format(line))
refs[ref] = sha
branch_ref = 'refs/remotes/origin/{}'.format(rev)
tag_ref = 'refs/tags/{}'.format(rev)
sha = refs.get(branch_ref)
if sha is not None:
return (sha, True)
sha = refs.get(tag_ref)
return (sha, False)
@classmethod
def resolve_revision(cls, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> RevOptions
"""
Resolve a revision to a new RevOptions object with the SHA1 of the
branch, tag, or ref if found.
Args:
rev_options: a RevOptions object.
"""
rev = rev_options.arg_rev
# The arg_rev property's implementation for Git ensures that the
# rev return value is always non-None.
assert rev is not None
sha, is_branch = cls.get_revision_sha(dest, rev)
if sha is not None:
rev_options = rev_options.make_new(sha)
rev_options.branch_name = rev if is_branch else None
return rev_options
# Do not show a warning for the common case of something that has
# the form of a Git commit hash.
if not looks_like_hash(rev):
logger.warning(
"Did not find branch or tag '%s', assuming revision or ref.",
rev,
)
if not rev.startswith('refs/'):
return rev_options
# If it looks like a ref, we have to fetch it explicitly.
cls.run_command(
make_command('fetch', '-q', url, rev_options.to_args()),
cwd=dest,
)
# Change the revision to the SHA of the ref we fetched
sha = cls.get_revision(dest, rev='FETCH_HEAD')
rev_options = rev_options.make_new(sha)
return rev_options
@classmethod
def is_commit_id_equal(cls, dest, name):
"""
Return whether the current commit hash equals the given name.
Args:
dest: the repository directory.
name: a string name.
"""
if not name:
# Then avoid an unnecessary subprocess call.
return False
return cls.get_revision(dest) == name
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info('Cloning %s%s to %s', url, rev_display, display_path(dest))
self.run_command(make_command('clone', '-q', url, dest))
if rev_options.rev:
# Then a specific revision was requested.
rev_options = self.resolve_revision(dest, url, rev_options)
branch_name = getattr(rev_options, 'branch_name', None)
if branch_name is None:
# Only do a checkout if the current commit id doesn't match
# the requested revision.
if not self.is_commit_id_equal(dest, rev_options.rev):
cmd_args = make_command(
'checkout', '-q', rev_options.to_args(),
)
self.run_command(cmd_args, cwd=dest)
elif self.get_current_branch(dest) != branch_name:
# Then a specific branch was requested, and that branch
# is not yet checked out.
track_branch = 'origin/{}'.format(branch_name)
cmd_args = [
'checkout', '-b', branch_name, '--track', track_branch,
]
self.run_command(cmd_args, cwd=dest)
#: repo may contain submodules
self.update_submodules(dest)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
self.run_command(
make_command('config', 'remote.origin.url', url),
cwd=dest,
)
cmd_args = make_command('checkout', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
self.update_submodules(dest)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
# First fetch changes from the default remote
if self.get_git_version() >= parse_version('1.9.0'):
# fetch tags in addition to everything else
self.run_command(['fetch', '-q', '--tags'], cwd=dest)
else:
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maybe even origin/master)
rev_options = self.resolve_revision(dest, url, rev_options)
cmd_args = make_command('reset', '--hard', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
#: update submodules
self.update_submodules(dest)
@classmethod
def get_remote_url(cls, location):
"""
Return URL of the first remote encountered.
Raises RemoteNotFoundError if the repository does not have a remote
url configured.
"""
# We need to pass 1 for extra_ok_returncodes since the command
# exits with return code 1 if there are no matching lines.
stdout = cls.run_command(
['config', '--get-regexp', r'remote\..*\.url'],
extra_ok_returncodes=(1, ), cwd=location,
)
remotes = stdout.splitlines()
try:
found_remote = remotes[0]
except IndexError:
raise RemoteNotFoundError
for remote in remotes:
if remote.startswith('remote.origin.url '):
found_remote = remote
break
url = found_remote.split(' ')[1]
return url.strip()
@classmethod
def get_revision(cls, location, rev=None):
if rev is None:
rev = 'HEAD'
current_rev = cls.run_command(
['rev-parse', rev], cwd=location,
)
return current_rev.strip()
@classmethod
def get_subdirectory(cls, location):
"""
Return the path to setup.py, relative to the repo root.
Return None if setup.py is in the repo root.
"""
# find the repo root
git_dir = cls.run_command(
['rev-parse', '--git-dir'],
cwd=location).strip()
if not os.path.isabs(git_dir):
git_dir = os.path.join(location, git_dir)
repo_root = os.path.abspath(os.path.join(git_dir, '..'))
return find_path_to_setup_from_repo_root(location, repo_root)
@classmethod
def get_url_rev_and_auth(cls, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes don't
work with a ssh:// scheme (e.g. GitHub). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
# Works around an apparent Git bug
# (see https://article.gmane.org/gmane.comp.version-control.git/146500)
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
if '://' not in url:
assert 'file:' not in url
url = url.replace('git+', 'git+ssh://')
url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url)
url = url.replace('ssh://', '')
else:
url, rev, user_pass = super(Git, cls).get_url_rev_and_auth(url)
return url, rev, user_pass
@classmethod
def update_submodules(cls, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
cls.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
@classmethod
def get_repository_root(cls, location):
loc = super(Git, cls).get_repository_root(location)
if loc:
return loc
try:
r = cls.run_command(
['rev-parse', '--show-toplevel'],
cwd=location,
log_failed_cmd=False,
)
except BadCommand:
logger.debug("could not determine if %s is under git control "
"because git is not available", location)
return None
except SubProcessError:
return None
return os.path.normpath(r.rstrip('\r\n'))
vcs.register(Git)
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/vcs/git.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/vcs/git.py",
"repo_id": "Django-locallibrary",
"token_count": 6367
}
| 8 |
######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .compat import PY2, PY3
from .universaldetector import UniversalDetector
from .version import __version__, VERSION
def detect(byte_str):
"""
Detect the encoding of the given byte string.
:param byte_str: The byte sequence to examine.
:type byte_str: ``bytes`` or ``bytearray``
"""
if not isinstance(byte_str, bytearray):
if not isinstance(byte_str, bytes):
raise TypeError('Expected object of type bytes or bytearray, got: '
'{0}'.format(type(byte_str)))
else:
byte_str = bytearray(byte_str)
detector = UniversalDetector()
detector.feed(byte_str)
return detector.close()
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 499
}
| 9 |
#!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from pip._vendor.chardet import __version__
from pip._vendor.chardet.compat import PY2
from pip._vendor.chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
line = bytearray(line)
u.feed(line)
# shortcut out of the loop to save reading further - particularly useful if we read a BOM.
if u.done:
break
u.close()
result = u.result
if PY2:
name = name.decode(sys.getfilesystemencoding(), 'ignore')
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
"""
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
"""
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings")
parser.add_argument('input',
help='File whose encoding we would like to determine. \
(default: stdin)',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin if PY2 else sys.stdin.buffer])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/cli/chardetect.py",
"repo_id": "Django-locallibrary",
"token_count": 1163
}
| 10 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
jp2CharContext = (
(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
)
class JapaneseContextAnalysis(object):
NUM_OF_CATEGORY = 6
DONT_KNOW = -1
ENOUGH_REL_THRESHOLD = 100
MAX_REL_THRESHOLD = 1000
MINIMUM_DATA_THRESHOLD = 4
def __init__(self):
self._total_rel = None
self._rel_sample = None
self._need_to_skip_char_num = None
self._last_char_order = None
self._done = None
self.reset()
def reset(self):
self._total_rel = 0 # total sequence received
# category counters, each integer counts sequence in its category
self._rel_sample = [0] * self.NUM_OF_CATEGORY
# if last byte in current buffer is not the last byte of a character,
# we need to know how many bytes to skip in next buffer
self._need_to_skip_char_num = 0
self._last_char_order = -1 # The order of previous char
# If this flag is set to True, detection is done and conclusion has
# been made
self._done = False
def feed(self, byte_str, num_bytes):
if self._done:
return
# The buffer we got is byte oriented, and a character may span in more than one
# buffers. In case the last one or two byte in last buffer is not
# complete, we record how many byte needed to complete that character
# and skip these bytes here. We can choose to record those bytes as
# well and analyse the character once it is complete, but since a
# character will not make much difference, by simply skipping
# this character will simply our logic and improve performance.
i = self._need_to_skip_char_num
while i < num_bytes:
order, char_len = self.get_order(byte_str[i:i + 2])
i += char_len
if i > num_bytes:
self._need_to_skip_char_num = i - num_bytes
self._last_char_order = -1
else:
if (order != -1) and (self._last_char_order != -1):
self._total_rel += 1
if self._total_rel > self.MAX_REL_THRESHOLD:
self._done = True
break
self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1
self._last_char_order = order
def got_enough_data(self):
return self._total_rel > self.ENOUGH_REL_THRESHOLD
def get_confidence(self):
# This is just one way to calculate confidence. It works well for me.
if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
return (self._total_rel - self._rel_sample[0]) / self._total_rel
else:
return self.DONT_KNOW
def get_order(self, byte_str):
return -1, 1
class SJISContextAnalysis(JapaneseContextAnalysis):
def __init__(self):
super(SJISContextAnalysis, self).__init__()
self._charset_name = "SHIFT_JIS"
@property
def charset_name(self):
return self._charset_name
def get_order(self, byte_str):
if not byte_str:
return -1, 1
# find out current char's byte length
first_char = byte_str[0]
if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC):
char_len = 2
if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
self._charset_name = "CP932"
else:
char_len = 1
# return its order if it is hiragana
if len(byte_str) > 1:
second_char = byte_str[1]
if (first_char == 202) and (0x9F <= second_char <= 0xF1):
return second_char - 0x9F, char_len
return -1, char_len
class EUCJPContextAnalysis(JapaneseContextAnalysis):
def get_order(self, byte_str):
if not byte_str:
return -1, 1
# find out current char's byte length
first_char = byte_str[0]
if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
char_len = 2
elif first_char == 0x8F:
char_len = 3
else:
char_len = 1
# return its order if it is hiragana
if len(byte_str) > 1:
second_char = byte_str[1]
if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
return second_char - 0xA1, char_len
return -1, char_len
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/jpcntx.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/jpcntx.py",
"repo_id": "Django-locallibrary",
"token_count": 16274
}
| 11 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8_SM_MODEL
class UTF8Prober(CharSetProber):
ONE_CHAR_PROB = 0.5
def __init__(self):
super(UTF8Prober, self).__init__()
self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
self._num_mb_chars = None
self.reset()
def reset(self):
super(UTF8Prober, self).reset()
self.coding_sm.reset()
self._num_mb_chars = 0
@property
def charset_name(self):
return "utf-8"
@property
def language(self):
return ""
def feed(self, byte_str):
for c in byte_str:
coding_state = self.coding_sm.next_state(c)
if coding_state == MachineState.ERROR:
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
if self.coding_sm.get_current_charlen() >= 2:
self._num_mb_chars += 1
if self.state == ProbingState.DETECTING:
if self.get_confidence() > self.SHORTCUT_THRESHOLD:
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
unlike = 0.99
if self._num_mb_chars < 6:
unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
return 1.0 - unlike
else:
return unlike
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/utf8prober.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/utf8prober.py",
"repo_id": "Django-locallibrary",
"token_count": 1072
}
| 12 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
'''
def enquote_executable(executable):
if ' ' in executable:
# make sure we quote only the executable in case of env
# for example /usr/bin/env "/dir with spaces/bin/jython"
# instead of "/usr/bin/env /dir with spaces/bin/jython"
# otherwise whole
if executable.startswith('/usr/bin/env '):
env, _executable = executable.split(' ', 1)
if ' ' in _executable and not _executable.startswith('"'):
executable = '%s "%s"' % (env, _executable)
else:
if not executable.startswith('"'):
executable = '"%s"' % executable
return executable
# Keep the old name around (for now), as there is at least one project using it!
_enquote_executable = enquote_executable
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (
os.name == 'java' and os._name == 'nt')
self.version_info = sys.version_info
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _build_shebang(self, executable, post_interp):
"""
Build a shebang line. In the simple case (on Windows, or a shebang line
which is not too long or contains spaces) use a simple formulation for
the shebang. Otherwise, use /bin/sh as the executable, with a contrived
shebang which allows the script to run either under Python or sh, using
suitable quoting. Thanks to Harald Nordgren for his input.
See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
https://hg.mozilla.org/mozilla-central/file/tip/mach
"""
if os.name != 'posix':
simple_shebang = True
else:
# Add 3 for '#!' prefix and newline suffix.
shebang_length = len(executable) + len(post_interp) + 3
if sys.platform == 'darwin':
max_shebang_length = 512
else:
max_shebang_length = 127
simple_shebang = ((b' ' not in executable) and
(shebang_length <= max_shebang_length))
if simple_shebang:
result = b'#!' + executable + post_interp + b'\n'
else:
result = b'#!/bin/sh\n'
result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
result += b"' '''"
return result
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv(): # pragma: no cover
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows - COMMENTED OUT
# executable = os.path.normcase(executable)
# N.B. The normalising operation above has been commented out: See
# issue #124. Although paths in Windows are generally case-insensitive,
# they aren't always. For example, a path containing a ẞ (which is a
# LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a
# LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by
# Windows as equivalent in path names.
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = self._build_shebang(executable, post_interp)
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
import_name=entry.suffix.split('.')[0],
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
linesep = os.linesep.encode('utf-8')
if not shebang.endswith(linesep):
shebang += linesep
if not use_launcher:
script_bytes = shebang + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, self.version_info[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s.%s' % (name, self.version_info[0],
self.version_info[1]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
resource = finder(distlib_package).find(name)
if not resource:
msg = ('Unable to find resource %s in package %s' % (name,
distlib_package))
raise ValueError(msg)
return resource.bytes
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/scripts.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/distlib/scripts.py",
"repo_id": "Django-locallibrary",
"token_count": 8175
}
| 13 |
# coding: utf-8
"""
Package resource API
--------------------
A resource is a logical file contained within a package, or a logical
subdirectory thereof. The package resource API expects resource names
to have their path parts separated with ``/``, *not* whatever the local
path separator is. Do not use os.path operations to manipulate resource
names being passed into the API.
The package resource API is designed to work with normal filesystem packages,
.egg files, and unpacked .egg files. It can also work in a limited way with
.zip files and with custom PEP 302 loaders that support the ``get_data()``
method.
"""
from __future__ import absolute_import
import sys
import os
import io
import time
import re
import types
import zipfile
import zipimport
import warnings
import stat
import functools
import pkgutil
import operator
import platform
import collections
import plistlib
import email.parser
import errno
import tempfile
import textwrap
import itertools
import inspect
import ntpath
import posixpath
from pkgutil import get_importer
try:
import _imp
except ImportError:
# Python 3.2 compatibility
import imp as _imp
try:
FileExistsError
except NameError:
FileExistsError = OSError
from pip._vendor import six
from pip._vendor.six.moves import urllib, map, filter
# capture these to bypass sandboxing
from os import utime
try:
from os import mkdir, rename, unlink
WRITE_SUPPORT = True
except ImportError:
# no write support, probably under GAE
WRITE_SUPPORT = False
from os import open as os_open
from os.path import isdir, split
try:
import importlib.machinery as importlib_machinery
# access attribute to force import under delayed import mechanisms.
importlib_machinery.__name__
except ImportError:
importlib_machinery = None
from . import py31compat
from pip._vendor import appdirs
from pip._vendor import packaging
__import__('pip._vendor.packaging.version')
__import__('pip._vendor.packaging.specifiers')
__import__('pip._vendor.packaging.requirements')
__import__('pip._vendor.packaging.markers')
__metaclass__ = type
if (3, 0) < sys.version_info < (3, 5):
raise RuntimeError("Python 3.5 or later is required")
if six.PY2:
# Those builtin exceptions are only defined in Python 3
PermissionError = None
NotADirectoryError = None
# declare some globals that will be defined later to
# satisfy the linters.
require = None
working_set = None
add_activation_listener = None
resources_stream = None
cleanup_resources = None
resource_dir = None
resource_stream = None
set_extraction_path = None
resource_isdir = None
resource_string = None
iter_entry_points = None
resource_listdir = None
resource_filename = None
resource_exists = None
_distribution_finders = None
_namespace_handlers = None
_namespace_packages = None
class PEP440Warning(RuntimeWarning):
"""
Used when there is an issue with a version or specifier not complying with
PEP 440.
"""
def parse_version(v):
try:
return packaging.version.Version(v)
except packaging.version.InvalidVersion:
return packaging.version.LegacyVersion(v)
_state_vars = {}
def _declare_state(vartype, **kw):
globals().update(kw)
_state_vars.update(dict.fromkeys(kw, vartype))
def __getstate__():
state = {}
g = globals()
for k, v in _state_vars.items():
state[k] = g['_sget_' + v](g[k])
return state
def __setstate__(state):
g = globals()
for k, v in state.items():
g['_sset_' + _state_vars[k]](k, g[k], v)
return state
def _sget_dict(val):
return val.copy()
def _sset_dict(key, ob, state):
ob.clear()
ob.update(state)
def _sget_object(val):
return val.__getstate__()
def _sset_object(key, ob, state):
ob.__setstate__(state)
_sget_none = _sset_none = lambda *args: None
def get_supported_platform():
"""Return this platform's maximum compatible version.
distutils.util.get_platform() normally reports the minimum version
of Mac OS X that would be required to *use* extensions produced by
distutils. But what we want when checking compatibility is to know the
version of Mac OS X that we are *running*. To allow usage of packages that
explicitly require a newer version of Mac OS X, we must also know the
current version of the OS.
If this condition occurs for any other platform with a version in its
platform strings, this function should be extended accordingly.
"""
plat = get_build_platform()
m = macosVersionString.match(plat)
if m is not None and sys.platform == "darwin":
try:
plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
except ValueError:
# not Mac OS X
pass
return plat
__all__ = [
# Basic resource access and distribution/entry point discovery
'require', 'run_script', 'get_provider', 'get_distribution',
'load_entry_point', 'get_entry_map', 'get_entry_info',
'iter_entry_points',
'resource_string', 'resource_stream', 'resource_filename',
'resource_listdir', 'resource_exists', 'resource_isdir',
# Environmental control
'declare_namespace', 'working_set', 'add_activation_listener',
'find_distributions', 'set_extraction_path', 'cleanup_resources',
'get_default_cache',
# Primary implementation classes
'Environment', 'WorkingSet', 'ResourceManager',
'Distribution', 'Requirement', 'EntryPoint',
# Exceptions
'ResolutionError', 'VersionConflict', 'DistributionNotFound',
'UnknownExtra', 'ExtractionError',
# Warnings
'PEP440Warning',
# Parsing functions and string utilities
'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
# filesystem utilities
'ensure_directory', 'normalize_path',
# Distribution "precedence" constants
'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
# "Provider" interfaces, implementations, and registration/lookup APIs
'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
'register_finder', 'register_namespace_handler', 'register_loader_type',
'fixup_namespace_packages', 'get_importer',
# Warnings
'PkgResourcesDeprecationWarning',
# Deprecated/backward compatibility only
'run_main', 'AvailableDistributions',
]
class ResolutionError(Exception):
"""Abstract base for dependency resolution errors"""
def __repr__(self):
return self.__class__.__name__ + repr(self.args)
class VersionConflict(ResolutionError):
"""
An already-installed version conflicts with the requested version.
Should be initialized with the installed Distribution and the requested
Requirement.
"""
_template = "{self.dist} is installed but {self.req} is required"
@property
def dist(self):
return self.args[0]
@property
def req(self):
return self.args[1]
def report(self):
return self._template.format(**locals())
def with_context(self, required_by):
"""
If required_by is non-empty, return a version of self that is a
ContextualVersionConflict.
"""
if not required_by:
return self
args = self.args + (required_by,)
return ContextualVersionConflict(*args)
class ContextualVersionConflict(VersionConflict):
"""
A VersionConflict that accepts a third parameter, the set of the
requirements that required the installed Distribution.
"""
_template = VersionConflict._template + ' by {self.required_by}'
@property
def required_by(self):
return self.args[2]
class DistributionNotFound(ResolutionError):
"""A requested distribution was not found"""
_template = ("The '{self.req}' distribution was not found "
"and is required by {self.requirers_str}")
@property
def req(self):
return self.args[0]
@property
def requirers(self):
return self.args[1]
@property
def requirers_str(self):
if not self.requirers:
return 'the application'
return ', '.join(self.requirers)
def report(self):
return self._template.format(**locals())
def __str__(self):
return self.report()
class UnknownExtra(ResolutionError):
"""Distribution doesn't have an "extra feature" of the given name"""
_provider_factories = {}
PY_MAJOR = '{}.{}'.format(*sys.version_info)
EGG_DIST = 3
BINARY_DIST = 2
SOURCE_DIST = 1
CHECKOUT_DIST = 0
DEVELOP_DIST = -1
def register_loader_type(loader_type, provider_factory):
"""Register `provider_factory` to make providers for `loader_type`
`loader_type` is the type or class of a PEP 302 ``module.__loader__``,
and `provider_factory` is a function that, passed a *module* object,
returns an ``IResourceProvider`` for that module.
"""
_provider_factories[loader_type] = provider_factory
def get_provider(moduleOrReq):
"""Return an IResourceProvider for the named module or requirement"""
if isinstance(moduleOrReq, Requirement):
return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
try:
module = sys.modules[moduleOrReq]
except KeyError:
__import__(moduleOrReq)
module = sys.modules[moduleOrReq]
loader = getattr(module, '__loader__', None)
return _find_adapter(_provider_factories, loader)(module)
def _macosx_vers(_cache=[]):
if not _cache:
version = platform.mac_ver()[0]
# fallback for MacPorts
if version == '':
plist = '/System/Library/CoreServices/SystemVersion.plist'
if os.path.exists(plist):
if hasattr(plistlib, 'readPlist'):
plist_content = plistlib.readPlist(plist)
if 'ProductVersion' in plist_content:
version = plist_content['ProductVersion']
_cache.append(version.split('.'))
return _cache[0]
def _macosx_arch(machine):
return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
def get_build_platform():
"""Return this platform's string for platform-specific distributions
XXX Currently this is the same as ``distutils.util.get_platform()``, but it
needs some hacks for Linux and Mac OS X.
"""
from sysconfig import get_platform
plat = get_platform()
if sys.platform == "darwin" and not plat.startswith('macosx-'):
try:
version = _macosx_vers()
machine = os.uname()[4].replace(" ", "_")
return "macosx-%d.%d-%s" % (
int(version[0]), int(version[1]),
_macosx_arch(machine),
)
except ValueError:
# if someone is running a non-Mac darwin system, this will fall
# through to the default implementation
pass
return plat
macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
# XXX backward compat
get_platform = get_build_platform
def compatible_platforms(provided, required):
"""Can code for the `provided` platform run on the `required` platform?
Returns true if either platform is ``None``, or the platforms are equal.
XXX Needs compatibility checks for Linux and other unixy OSes.
"""
if provided is None or required is None or provided == required:
# easy case
return True
# Mac OS X special cases
reqMac = macosVersionString.match(required)
if reqMac:
provMac = macosVersionString.match(provided)
# is this a Mac package?
if not provMac:
# this is backwards compatibility for packages built before
# setuptools 0.6. All packages built after this point will
# use the new macosx designation.
provDarwin = darwinVersionString.match(provided)
if provDarwin:
dversion = int(provDarwin.group(1))
macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
if dversion == 7 and macosversion >= "10.3" or \
dversion == 8 and macosversion >= "10.4":
return True
# egg isn't macosx or legacy darwin
return False
# are they the same major version and machine type?
if provMac.group(1) != reqMac.group(1) or \
provMac.group(3) != reqMac.group(3):
return False
# is the required OS major update >= the provided one?
if int(provMac.group(2)) > int(reqMac.group(2)):
return False
return True
# XXX Linux and other platforms' special cases should go here
return False
def run_script(dist_spec, script_name):
"""Locate distribution `dist_spec` and run its `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
require(dist_spec)[0].run_script(script_name, ns)
# backward compatibility
run_main = run_script
def get_distribution(dist):
"""Return a current distribution object for a Requirement or string"""
if isinstance(dist, six.string_types):
dist = Requirement.parse(dist)
if isinstance(dist, Requirement):
dist = get_provider(dist)
if not isinstance(dist, Distribution):
raise TypeError("Expected string, Requirement, or Distribution", dist)
return dist
def load_entry_point(dist, group, name):
"""Return `name` entry point of `group` for `dist` or raise ImportError"""
return get_distribution(dist).load_entry_point(group, name)
def get_entry_map(dist, group=None):
"""Return the entry point map for `group`, or the full entry map"""
return get_distribution(dist).get_entry_map(group)
def get_entry_info(dist, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return get_distribution(dist).get_entry_info(group, name)
class IMetadataProvider:
def has_metadata(name):
"""Does the package's distribution contain the named metadata?"""
def get_metadata(name):
"""The named metadata resource as a string"""
def get_metadata_lines(name):
"""Yield named metadata resource as list of non-blank non-comment lines
Leading and trailing whitespace is stripped from each line, and lines
with ``#`` as the first non-blank character are omitted."""
def metadata_isdir(name):
"""Is the named metadata a directory? (like ``os.path.isdir()``)"""
def metadata_listdir(name):
"""List of metadata names in the directory (like ``os.listdir()``)"""
def run_script(script_name, namespace):
"""Execute the named script in the supplied namespace dictionary"""
class IResourceProvider(IMetadataProvider):
"""An object that provides access to package resources"""
def get_resource_filename(manager, resource_name):
"""Return a true filesystem path for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_stream(manager, resource_name):
"""Return a readable file-like object for `resource_name`
`manager` must be an ``IResourceManager``"""
def get_resource_string(manager, resource_name):
"""Return a string containing the contents of `resource_name`
`manager` must be an ``IResourceManager``"""
def has_resource(resource_name):
"""Does the package contain the named resource?"""
def resource_isdir(resource_name):
"""Is the named resource a directory? (like ``os.path.isdir()``)"""
def resource_listdir(resource_name):
"""List of resource names in the directory (like ``os.listdir()``)"""
class WorkingSet:
"""A collection of active distributions on sys.path (or a similar list)"""
def __init__(self, entries=None):
"""Create working set from list of path entries (default=sys.path)"""
self.entries = []
self.entry_keys = {}
self.by_key = {}
self.callbacks = []
if entries is None:
entries = sys.path
for entry in entries:
self.add_entry(entry)
@classmethod
def _build_master(cls):
"""
Prepare the master working set.
"""
ws = cls()
try:
from __main__ import __requires__
except ImportError:
# The main program does not list any requirements
return ws
# ensure the requirements are met
try:
ws.require(__requires__)
except VersionConflict:
return cls._build_from_requirements(__requires__)
return ws
@classmethod
def _build_from_requirements(cls, req_spec):
"""
Build a working set from a requirement spec. Rewrites sys.path.
"""
# try it without defaults already on sys.path
# by starting with an empty path
ws = cls([])
reqs = parse_requirements(req_spec)
dists = ws.resolve(reqs, Environment())
for dist in dists:
ws.add(dist)
# add any missing entries from sys.path
for entry in sys.path:
if entry not in ws.entries:
ws.add_entry(entry)
# then copy back to sys.path
sys.path[:] = ws.entries
return ws
def add_entry(self, entry):
"""Add a path item to ``.entries``, finding any distributions on it
``find_distributions(entry, True)`` is used to find distributions
corresponding to the path entry, and they are added. `entry` is
always appended to ``.entries``, even if it is already present.
(This is because ``sys.path`` can contain the same value more than
once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
equal ``sys.path``.)
"""
self.entry_keys.setdefault(entry, [])
self.entries.append(entry)
for dist in find_distributions(entry, True):
self.add(dist, entry, False)
def __contains__(self, dist):
"""True if `dist` is the active distribution for its project"""
return self.by_key.get(dist.key) == dist
def find(self, req):
"""Find a distribution matching requirement `req`
If there is an active distribution for the requested project, this
returns it as long as it meets the version requirement specified by
`req`. But, if there is an active distribution for the project and it
does *not* meet the `req` requirement, ``VersionConflict`` is raised.
If there is no active distribution for the requested project, ``None``
is returned.
"""
dist = self.by_key.get(req.key)
if dist is not None and dist not in req:
# XXX add more info
raise VersionConflict(dist, req)
return dist
def iter_entry_points(self, group, name=None):
"""Yield entry point objects from `group` matching `name`
If `name` is None, yields all entry points in `group` from all
distributions in the working set, otherwise only ones matching
both `group` and `name` are yielded (in distribution order).
"""
return (
entry
for dist in self
for entry in dist.get_entry_map(group).values()
if name is None or name == entry.name
)
def run_script(self, requires, script_name):
"""Locate distribution for `requires` and run `script_name` script"""
ns = sys._getframe(1).f_globals
name = ns['__name__']
ns.clear()
ns['__name__'] = name
self.require(requires)[0].run_script(script_name, ns)
def __iter__(self):
"""Yield distributions for non-duplicate projects in the working set
The yield order is the order in which the items' path entries were
added to the working set.
"""
seen = {}
for item in self.entries:
if item not in self.entry_keys:
# workaround a cache issue
continue
for key in self.entry_keys[item]:
if key not in seen:
seen[key] = 1
yield self.by_key[key]
def add(self, dist, entry=None, insert=True, replace=False):
"""Add `dist` to working set, associated with `entry`
If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
On exit from this routine, `entry` is added to the end of the working
set's ``.entries`` (if it wasn't already present).
`dist` is only added to the working set if it's for a project that
doesn't already have a distribution in the set, unless `replace=True`.
If it's added, any callbacks registered with the ``subscribe()`` method
will be called.
"""
if insert:
dist.insert_on(self.entries, entry, replace=replace)
if entry is None:
entry = dist.location
keys = self.entry_keys.setdefault(entry, [])
keys2 = self.entry_keys.setdefault(dist.location, [])
if not replace and dist.key in self.by_key:
# ignore hidden distros
return
self.by_key[dist.key] = dist
if dist.key not in keys:
keys.append(dist.key)
if dist.key not in keys2:
keys2.append(dist.key)
self._added_new(dist)
def resolve(self, requirements, env=None, installer=None,
replace_conflicting=False, extras=None):
"""List all distributions needed to (recursively) meet `requirements`
`requirements` must be a sequence of ``Requirement`` objects. `env`,
if supplied, should be an ``Environment`` instance. If
not supplied, it defaults to all distributions available within any
entry or distribution in the working set. `installer`, if supplied,
will be invoked with each requirement that cannot be met by an
already-installed distribution; it should return a ``Distribution`` or
``None``.
Unless `replace_conflicting=True`, raises a VersionConflict exception
if
any requirements are found on the path that have the correct name but
the wrong version. Otherwise, if an `installer` is supplied it will be
invoked to obtain the correct version of the requirement and activate
it.
`extras` is a list of the extras to be used with these requirements.
This is important because extra requirements may look like `my_req;
extra = "my_extra"`, which would otherwise be interpreted as a purely
optional requirement. Instead, we want to be able to assert that these
requirements are truly required.
"""
# set up the stack
requirements = list(requirements)[::-1]
# set of processed requirements
processed = {}
# key -> dist
best = {}
to_activate = []
req_extras = _ReqExtras()
# Mapping of requirement to set of distributions that required it;
# useful for reporting info about conflicts.
required_by = collections.defaultdict(set)
while requirements:
# process dependencies breadth-first
req = requirements.pop(0)
if req in processed:
# Ignore cyclic or redundant dependencies
continue
if not req_extras.markers_pass(req, extras):
continue
dist = best.get(req.key)
if dist is None:
# Find the best distribution and add it to the map
dist = self.by_key.get(req.key)
if dist is None or (dist not in req and replace_conflicting):
ws = self
if env is None:
if dist is None:
env = Environment(self.entries)
else:
# Use an empty environment and workingset to avoid
# any further conflicts with the conflicting
# distribution
env = Environment([])
ws = WorkingSet([])
dist = best[req.key] = env.best_match(
req, ws, installer,
replace_conflicting=replace_conflicting
)
if dist is None:
requirers = required_by.get(req, None)
raise DistributionNotFound(req, requirers)
to_activate.append(dist)
if dist not in req:
# Oops, the "best" so far conflicts with a dependency
dependent_req = required_by[req]
raise VersionConflict(dist, req).with_context(dependent_req)
# push the new requirements onto the stack
new_requirements = dist.requires(req.extras)[::-1]
requirements.extend(new_requirements)
# Register the new requirements needed by req
for new_requirement in new_requirements:
required_by[new_requirement].add(req.project_name)
req_extras[new_requirement] = req.extras
processed[req] = True
# return list of distros to activate
return to_activate
def find_plugins(
self, plugin_env, full_env=None, installer=None, fallback=True):
"""Find all activatable distributions in `plugin_env`
Example usage::
distributions, errors = working_set.find_plugins(
Environment(plugin_dirlist)
)
# add plugins+libs to sys.path
map(working_set.add, distributions)
# display errors
print('Could not load', errors)
The `plugin_env` should be an ``Environment`` instance that contains
only distributions that are in the project's "plugin directory" or
directories. The `full_env`, if supplied, should be an ``Environment``
contains all currently-available distributions. If `full_env` is not
supplied, one is created automatically from the ``WorkingSet`` this
method is called on, which will typically mean that every directory on
``sys.path`` will be scanned for distributions.
`installer` is a standard installer callback as used by the
``resolve()`` method. The `fallback` flag indicates whether we should
attempt to resolve older versions of a plugin if the newest version
cannot be resolved.
This method returns a 2-tuple: (`distributions`, `error_info`), where
`distributions` is a list of the distributions found in `plugin_env`
that were loadable, along with any other distributions that are needed
to resolve their dependencies. `error_info` is a dictionary mapping
unloadable plugin distributions to an exception instance describing the
error that occurred. Usually this will be a ``DistributionNotFound`` or
``VersionConflict`` instance.
"""
plugin_projects = list(plugin_env)
# scan project names in alphabetic order
plugin_projects.sort()
error_info = {}
distributions = {}
if full_env is None:
env = Environment(self.entries)
env += plugin_env
else:
env = full_env + plugin_env
shadow_set = self.__class__([])
# put all our entries in shadow_set
list(map(shadow_set.add, self))
for project_name in plugin_projects:
for dist in plugin_env[project_name]:
req = [dist.as_requirement()]
try:
resolvees = shadow_set.resolve(req, env, installer)
except ResolutionError as v:
# save error info
error_info[dist] = v
if fallback:
# try the next older version of project
continue
else:
# give up on this project, keep going
break
else:
list(map(shadow_set.add, resolvees))
distributions.update(dict.fromkeys(resolvees))
# success, no need to try any more versions of this project
break
distributions = list(distributions)
distributions.sort()
return distributions, error_info
def require(self, *requirements):
"""Ensure that distributions matching `requirements` are activated
`requirements` must be a string or a (possibly-nested) sequence
thereof, specifying the distributions and versions required. The
return value is a sequence of the distributions that needed to be
activated to fulfill the requirements; all relevant distributions are
included, even if they were already activated in this working set.
"""
needed = self.resolve(parse_requirements(requirements))
for dist in needed:
self.add(dist)
return needed
def subscribe(self, callback, existing=True):
"""Invoke `callback` for all distributions
If `existing=True` (default),
call on all existing ones, as well.
"""
if callback in self.callbacks:
return
self.callbacks.append(callback)
if not existing:
return
for dist in self:
callback(dist)
def _added_new(self, dist):
for callback in self.callbacks:
callback(dist)
def __getstate__(self):
return (
self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
self.callbacks[:]
)
def __setstate__(self, e_k_b_c):
entries, keys, by_key, callbacks = e_k_b_c
self.entries = entries[:]
self.entry_keys = keys.copy()
self.by_key = by_key.copy()
self.callbacks = callbacks[:]
class _ReqExtras(dict):
"""
Map each requirement to the extras that demanded it.
"""
def markers_pass(self, req, extras=None):
"""
Evaluate markers for req against each extra that
demanded it.
Return False if the req has a marker and fails
evaluation. Otherwise, return True.
"""
extra_evals = (
req.marker.evaluate({'extra': extra})
for extra in self.get(req, ()) + (extras or (None,))
)
return not req.marker or any(extra_evals)
class Environment:
"""Searchable snapshot of distributions on a search path"""
def __init__(
self, search_path=None, platform=get_supported_platform(),
python=PY_MAJOR):
"""Snapshot distributions available on a search path
Any distributions found on `search_path` are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used.
`platform` is an optional string specifying the name of the platform
that platform-specific distributions must be compatible with. If
unspecified, it defaults to the current platform. `python` is an
optional string naming the desired version of Python (e.g. ``'3.6'``);
it defaults to the current version.
You may explicitly set `platform` (and/or `python`) to ``None`` if you
wish to map *all* distributions, not just those compatible with the
running platform or Python version.
"""
self._distmap = {}
self.platform = platform
self.python = python
self.scan(search_path)
def can_add(self, dist):
"""Is distribution `dist` acceptable for this environment?
The distribution must match the platform and python version
requirements specified when this environment was created, or False
is returned.
"""
py_compat = (
self.python is None
or dist.py_version is None
or dist.py_version == self.python
)
return py_compat and compatible_platforms(dist.platform, self.platform)
def remove(self, dist):
"""Remove `dist` from the environment"""
self._distmap[dist.key].remove(dist)
def scan(self, search_path=None):
"""Scan `search_path` for distributions usable in this environment
Any distributions found are added to the environment.
`search_path` should be a sequence of ``sys.path`` items. If not
supplied, ``sys.path`` is used. Only distributions conforming to
the platform/python version defined at initialization are added.
"""
if search_path is None:
search_path = sys.path
for item in search_path:
for dist in find_distributions(item):
self.add(dist)
def __getitem__(self, project_name):
"""Return a newest-to-oldest list of distributions for `project_name`
Uses case-insensitive `project_name` comparison, assuming all the
project's distributions use their project's name converted to all
lowercase as their key.
"""
distribution_key = project_name.lower()
return self._distmap.get(distribution_key, [])
def add(self, dist):
"""Add `dist` if we ``can_add()`` it and it has not already been added
"""
if self.can_add(dist) and dist.has_version():
dists = self._distmap.setdefault(dist.key, [])
if dist not in dists:
dists.append(dist)
dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
def best_match(
self, req, working_set, installer=None, replace_conflicting=False):
"""Find distribution best matching `req` and usable on `working_set`
This calls the ``find(req)`` method of the `working_set` to see if a
suitable distribution is already active. (This may raise
``VersionConflict`` if an unsuitable version of the project is already
active in the specified `working_set`.) If a suitable distribution
isn't active, this method returns the newest distribution in the
environment that meets the ``Requirement`` in `req`. If no suitable
distribution is found, and `installer` is supplied, then the result of
calling the environment's ``obtain(req, installer)`` method will be
returned.
"""
try:
dist = working_set.find(req)
except VersionConflict:
if not replace_conflicting:
raise
dist = None
if dist is not None:
return dist
for dist in self[req.key]:
if dist in req:
return dist
# try to download/install
return self.obtain(req, installer)
def obtain(self, requirement, installer=None):
"""Obtain a distribution matching `requirement` (e.g. via download)
Obtain a distro that matches requirement (e.g. via download). In the
base ``Environment`` class, this routine just returns
``installer(requirement)``, unless `installer` is None, in which case
None is returned instead. This method is a hook that allows subclasses
to attempt other ways of obtaining a distribution before falling back
to the `installer` argument."""
if installer is not None:
return installer(requirement)
def __iter__(self):
"""Yield the unique project names of the available distributions"""
for key in self._distmap.keys():
if self[key]:
yield key
def __iadd__(self, other):
"""In-place addition of a distribution or environment"""
if isinstance(other, Distribution):
self.add(other)
elif isinstance(other, Environment):
for project in other:
for dist in other[project]:
self.add(dist)
else:
raise TypeError("Can't add %r to environment" % (other,))
return self
def __add__(self, other):
"""Add an environment or distribution to an environment"""
new = self.__class__([], platform=None, python=None)
for env in self, other:
new += env
return new
# XXX backward compatibility
AvailableDistributions = Environment
class ExtractionError(RuntimeError):
"""An error occurred extracting a resource
The following attributes are available from instances of this exception:
manager
The resource manager that raised this exception
cache_path
The base directory for resource extraction
original_error
The exception instance that caused extraction to fail
"""
class ResourceManager:
"""Manage resource extraction and packages"""
extraction_path = None
def __init__(self):
self.cached_files = {}
def resource_exists(self, package_or_requirement, resource_name):
"""Does the named resource exist?"""
return get_provider(package_or_requirement).has_resource(resource_name)
def resource_isdir(self, package_or_requirement, resource_name):
"""Is the named resource an existing directory?"""
return get_provider(package_or_requirement).resource_isdir(
resource_name
)
def resource_filename(self, package_or_requirement, resource_name):
"""Return a true filesystem path for specified resource"""
return get_provider(package_or_requirement).get_resource_filename(
self, resource_name
)
def resource_stream(self, package_or_requirement, resource_name):
"""Return a readable file-like object for specified resource"""
return get_provider(package_or_requirement).get_resource_stream(
self, resource_name
)
def resource_string(self, package_or_requirement, resource_name):
"""Return specified resource as a string"""
return get_provider(package_or_requirement).get_resource_string(
self, resource_name
)
def resource_listdir(self, package_or_requirement, resource_name):
"""List the contents of the named resource directory"""
return get_provider(package_or_requirement).resource_listdir(
resource_name
)
def extraction_error(self):
"""Give an error message for problems extracting file(s)"""
old_exc = sys.exc_info()[1]
cache_path = self.extraction_path or get_default_cache()
tmpl = textwrap.dedent("""
Can't extract file(s) to egg cache
The following error occurred while trying to extract file(s)
to the Python egg cache:
{old_exc}
The Python egg cache directory is currently set to:
{cache_path}
Perhaps your account does not have write access to this directory?
You can change the cache directory by setting the PYTHON_EGG_CACHE
environment variable to point to an accessible directory.
""").lstrip()
err = ExtractionError(tmpl.format(**locals()))
err.manager = self
err.cache_path = cache_path
err.original_error = old_exc
raise err
def get_cache_path(self, archive_name, names=()):
"""Return absolute location in cache for `archive_name` and `names`
The parent directory of the resulting path will be created if it does
not already exist. `archive_name` should be the base filename of the
enclosing egg (which may not be the name of the enclosing zipfile!),
including its ".egg" extension. `names`, if provided, should be a
sequence of path name parts "under" the egg's extraction location.
This method should only be called by resource providers that need to
obtain an extraction location, and only for names they intend to
extract, as it tracks the generated names for possible cleanup later.
"""
extract_path = self.extraction_path or get_default_cache()
target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
try:
_bypass_ensure_directory(target_path)
except Exception:
self.extraction_error()
self._warn_unsafe_extraction_path(extract_path)
self.cached_files[target_path] = 1
return target_path
@staticmethod
def _warn_unsafe_extraction_path(path):
"""
If the default extraction path is overridden and set to an insecure
location, such as /tmp, it opens up an opportunity for an attacker to
replace an extracted file with an unauthorized payload. Warn the user
if a known insecure location is used.
See Distribute #375 for more details.
"""
if os.name == 'nt' and not path.startswith(os.environ['windir']):
# On Windows, permissions are generally restrictive by default
# and temp directories are not writable by other users, so
# bypass the warning.
return
mode = os.stat(path).st_mode
if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
msg = (
"%s is writable by group/others and vulnerable to attack "
"when "
"used with get_resource_filename. Consider a more secure "
"location (set with .set_extraction_path or the "
"PYTHON_EGG_CACHE environment variable)." % path
)
warnings.warn(msg, UserWarning)
def postprocess(self, tempname, filename):
"""Perform any platform-specific postprocessing of `tempname`
This is where Mac header rewrites should be done; other platforms don't
have anything special they should do.
Resource providers should call this method ONLY after successfully
extracting a compressed resource. They must NOT call it on resources
that are already in the filesystem.
`tempname` is the current (temporary) name of the file, and `filename`
is the name it will be renamed to by the caller after this routine
returns.
"""
if os.name == 'posix':
# Make the resource executable
mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
os.chmod(tempname, mode)
def set_extraction_path(self, path):
"""Set the base path where resources will be extracted to, if needed.
If you do not call this routine before any extractions take place, the
path defaults to the return value of ``get_default_cache()``. (Which
is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
platform-specific fallbacks. See that routine's documentation for more
details.)
Resources are extracted to subdirectories of this path based upon
information given by the ``IResourceProvider``. You may set this to a
temporary directory, but then you must call ``cleanup_resources()`` to
delete the extracted files when done. There is no guarantee that
``cleanup_resources()`` will be able to remove all extracted files.
(Note: you may not change the extraction path for a given resource
manager once resources have been extracted, unless you first call
``cleanup_resources()``.)
"""
if self.cached_files:
raise ValueError(
"Can't change extraction path, files already extracted"
)
self.extraction_path = path
def cleanup_resources(self, force=False):
"""
Delete all extracted resource files and directories, returning a list
of the file and directory names that could not be successfully removed.
This function does not have any concurrency protection, so it should
generally only be called when the extraction path is a temporary
directory exclusive to a single process. This method is not
automatically called; you must call it explicitly or register it as an
``atexit`` function if you wish to ensure cleanup of a temporary
directory used for extractions.
"""
# XXX
def get_default_cache():
"""
Return the ``PYTHON_EGG_CACHE`` environment variable
or a platform-relevant user cache dir for an app
named "Python-Eggs".
"""
return (
os.environ.get('PYTHON_EGG_CACHE')
or appdirs.user_cache_dir(appname='Python-Eggs')
)
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""
Convert an arbitrary string to a standard version string
"""
try:
# normalize the version
return str(packaging.version.Version(version))
except packaging.version.InvalidVersion:
version = version.replace(' ', '.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def safe_extra(extra):
"""Convert an arbitrary string to a standard 'extra' name
Any runs of non-alphanumeric characters are replaced with a single '_',
and the result is always lowercased.
"""
return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-', '_')
def invalid_marker(text):
"""
Validate text as a PEP 508 environment marker; return an exception
if invalid or False otherwise.
"""
try:
evaluate_marker(text)
except SyntaxError as e:
e.filename = None
e.lineno = None
return e
return False
def evaluate_marker(text, extra=None):
"""
Evaluate a PEP 508 environment marker.
Return a boolean indicating the marker result in this environment.
Raise SyntaxError if marker is invalid.
This implementation uses the 'pyparsing' module.
"""
try:
marker = packaging.markers.Marker(text)
return marker.evaluate()
except packaging.markers.InvalidMarker as e:
raise SyntaxError(e)
class NullProvider:
"""Try to implement resources and metadata for arbitrary PEP 302 loaders"""
egg_name = None
egg_info = None
loader = None
def __init__(self, module):
self.loader = getattr(module, '__loader__', None)
self.module_path = os.path.dirname(getattr(module, '__file__', ''))
def get_resource_filename(self, manager, resource_name):
return self._fn(self.module_path, resource_name)
def get_resource_stream(self, manager, resource_name):
return io.BytesIO(self.get_resource_string(manager, resource_name))
def get_resource_string(self, manager, resource_name):
return self._get(self._fn(self.module_path, resource_name))
def has_resource(self, resource_name):
return self._has(self._fn(self.module_path, resource_name))
def _get_metadata_path(self, name):
return self._fn(self.egg_info, name)
def has_metadata(self, name):
if not self.egg_info:
return self.egg_info
path = self._get_metadata_path(name)
return self._has(path)
def get_metadata(self, name):
if not self.egg_info:
return ""
path = self._get_metadata_path(name)
value = self._get(path)
if six.PY2:
return value
try:
return value.decode('utf-8')
except UnicodeDecodeError as exc:
# Include the path in the error message to simplify
# troubleshooting, and without changing the exception type.
exc.reason += ' in {} file at path: {}'.format(name, path)
raise
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
def resource_isdir(self, resource_name):
return self._isdir(self._fn(self.module_path, resource_name))
def metadata_isdir(self, name):
return self.egg_info and self._isdir(self._fn(self.egg_info, name))
def resource_listdir(self, resource_name):
return self._listdir(self._fn(self.module_path, resource_name))
def metadata_listdir(self, name):
if self.egg_info:
return self._listdir(self._fn(self.egg_info, name))
return []
def run_script(self, script_name, namespace):
script = 'scripts/' + script_name
if not self.has_metadata(script):
raise ResolutionError(
"Script {script!r} not found in metadata at {self.egg_info!r}"
.format(**locals()),
)
script_text = self.get_metadata(script).replace('\r\n', '\n')
script_text = script_text.replace('\r', '\n')
script_filename = self._fn(self.egg_info, script)
namespace['__file__'] = script_filename
if os.path.exists(script_filename):
source = open(script_filename).read()
code = compile(source, script_filename, 'exec')
exec(code, namespace, namespace)
else:
from linecache import cache
cache[script_filename] = (
len(script_text), 0, script_text.split('\n'), script_filename
)
script_code = compile(script_text, script_filename, 'exec')
exec(script_code, namespace, namespace)
def _has(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _isdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _listdir(self, path):
raise NotImplementedError(
"Can't perform this operation for unregistered loader type"
)
def _fn(self, base, resource_name):
self._validate_resource_path(resource_name)
if resource_name:
return os.path.join(base, *resource_name.split('/'))
return base
@staticmethod
def _validate_resource_path(path):
"""
Validate the resource paths according to the docs.
https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
>>> warned = getfixture('recwarn')
>>> warnings.simplefilter('always')
>>> vrp = NullProvider._validate_resource_path
>>> vrp('foo/bar.txt')
>>> bool(warned)
False
>>> vrp('../foo/bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('/foo/bar.txt')
>>> bool(warned)
True
>>> vrp('foo/../../bar.txt')
>>> bool(warned)
True
>>> warned.clear()
>>> vrp('foo/f../bar.txt')
>>> bool(warned)
False
Windows path separators are straight-up disallowed.
>>> vrp(r'\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
>>> vrp(r'C:\\foo/bar.txt')
Traceback (most recent call last):
...
ValueError: Use of .. or absolute path in a resource path \
is not allowed.
Blank values are allowed
>>> vrp('')
>>> bool(warned)
False
Non-string values are not.
>>> vrp(None)
Traceback (most recent call last):
...
AttributeError: ...
"""
invalid = (
os.path.pardir in path.split(posixpath.sep) or
posixpath.isabs(path) or
ntpath.isabs(path)
)
if not invalid:
return
msg = "Use of .. or absolute path in a resource path is not allowed."
# Aggressively disallow Windows absolute paths
if ntpath.isabs(path) and not posixpath.isabs(path):
raise ValueError(msg)
# for compatibility, warn; in future
# raise ValueError(msg)
warnings.warn(
msg[:-1] + " and will raise exceptions in a future release.",
DeprecationWarning,
stacklevel=4,
)
def _get(self, path):
if hasattr(self.loader, 'get_data'):
return self.loader.get_data(path)
raise NotImplementedError(
"Can't perform this operation for loaders without 'get_data()'"
)
register_loader_type(object, NullProvider)
class EggProvider(NullProvider):
"""Provider based on a virtual filesystem"""
def __init__(self, module):
NullProvider.__init__(self, module)
self._setup_prefix()
def _setup_prefix(self):
# we assume here that our metadata may be nested inside a "basket"
# of multiple eggs; that's why we use module_path instead of .archive
path = self.module_path
old = None
while path != old:
if _is_egg_path(path):
self.egg_name = os.path.basename(path)
self.egg_info = os.path.join(path, 'EGG-INFO')
self.egg_root = path
break
old = path
path, base = os.path.split(path)
class DefaultProvider(EggProvider):
"""Provides access to package resources in the filesystem"""
def _has(self, path):
return os.path.exists(path)
def _isdir(self, path):
return os.path.isdir(path)
def _listdir(self, path):
return os.listdir(path)
def get_resource_stream(self, manager, resource_name):
return open(self._fn(self.module_path, resource_name), 'rb')
def _get(self, path):
with open(path, 'rb') as stream:
return stream.read()
@classmethod
def _register(cls):
loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
for name in loader_names:
loader_cls = getattr(importlib_machinery, name, type(None))
register_loader_type(loader_cls, cls)
DefaultProvider._register()
class EmptyProvider(NullProvider):
"""Provider that returns nothing for all requests"""
module_path = None
_isdir = _has = lambda self, path: False
def _get(self, path):
return ''
def _listdir(self, path):
return []
def __init__(self):
pass
empty_provider = EmptyProvider()
class ZipManifests(dict):
"""
zip manifest builder
"""
@classmethod
def build(cls, path):
"""
Build a dictionary similar to the zipimport directory
caches, except instead of tuples, store ZipInfo objects.
Use a platform-specific path separator (os.sep) for the path keys
for compatibility with pypy on Windows.
"""
with zipfile.ZipFile(path) as zfile:
items = (
(
name.replace('/', os.sep),
zfile.getinfo(name),
)
for name in zfile.namelist()
)
return dict(items)
load = build
class MemoizedZipManifests(ZipManifests):
"""
Memoized zipfile manifests.
"""
manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
def load(self, path):
"""
Load a manifest at path or return a suitable manifest already loaded.
"""
path = os.path.normpath(path)
mtime = os.stat(path).st_mtime
if path not in self or self[path].mtime != mtime:
manifest = self.build(path)
self[path] = self.manifest_mod(manifest, mtime)
return self[path].manifest
class ZipProvider(EggProvider):
"""Resource support for zips and eggs"""
eagers = None
_zip_manifests = MemoizedZipManifests()
def __init__(self, module):
EggProvider.__init__(self, module)
self.zip_pre = self.loader.archive + os.sep
def _zipinfo_name(self, fspath):
# Convert a virtual filename (full path to file) into a zipfile subpath
# usable with the zipimport directory cache for our target archive
fspath = fspath.rstrip(os.sep)
if fspath == self.loader.archive:
return ''
if fspath.startswith(self.zip_pre):
return fspath[len(self.zip_pre):]
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.zip_pre)
)
def _parts(self, zip_path):
# Convert a zipfile subpath into an egg-relative path part list.
# pseudo-fs path
fspath = self.zip_pre + zip_path
if fspath.startswith(self.egg_root + os.sep):
return fspath[len(self.egg_root) + 1:].split(os.sep)
raise AssertionError(
"%s is not a subpath of %s" % (fspath, self.egg_root)
)
@property
def zipinfo(self):
return self._zip_manifests.load(self.loader.archive)
def get_resource_filename(self, manager, resource_name):
if not self.egg_name:
raise NotImplementedError(
"resource_filename() only supported for .egg, not .zip"
)
# no need to lock for extraction, since we use temp names
zip_path = self._resource_to_zip(resource_name)
eagers = self._get_eager_resources()
if '/'.join(self._parts(zip_path)) in eagers:
for name in eagers:
self._extract_resource(manager, self._eager_to_zip(name))
return self._extract_resource(manager, zip_path)
@staticmethod
def _get_date_and_size(zip_stat):
size = zip_stat.file_size
# ymdhms+wday, yday, dst
date_time = zip_stat.date_time + (0, 0, -1)
# 1980 offset already done
timestamp = time.mktime(date_time)
return timestamp, size
def _extract_resource(self, manager, zip_path):
if zip_path in self._index():
for name in self._index()[zip_path]:
last = self._extract_resource(
manager, os.path.join(zip_path, name)
)
# return the extracted directory name
return os.path.dirname(last)
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not WRITE_SUPPORT:
raise IOError('"os.rename" and "os.unlink" are not supported '
'on this platform')
try:
real_path = manager.get_cache_path(
self.egg_name, self._parts(zip_path)
)
if self._is_current(real_path, zip_path):
return real_path
outf, tmpnam = _mkstemp(
".$extract",
dir=os.path.dirname(real_path),
)
os.write(outf, self.loader.get_data(zip_path))
os.close(outf)
utime(tmpnam, (timestamp, timestamp))
manager.postprocess(tmpnam, real_path)
try:
rename(tmpnam, real_path)
except os.error:
if os.path.isfile(real_path):
if self._is_current(real_path, zip_path):
# the file became current since it was checked above,
# so proceed.
return real_path
# Windows, del old file and retry
elif os.name == 'nt':
unlink(real_path)
rename(tmpnam, real_path)
return real_path
raise
except os.error:
# report a user-friendly error
manager.extraction_error()
return real_path
def _is_current(self, file_path, zip_path):
"""
Return True if the file_path is current for this zip_path
"""
timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
if not os.path.isfile(file_path):
return False
stat = os.stat(file_path)
if stat.st_size != size or stat.st_mtime != timestamp:
return False
# check that the contents match
zip_contents = self.loader.get_data(zip_path)
with open(file_path, 'rb') as f:
file_contents = f.read()
return zip_contents == file_contents
def _get_eager_resources(self):
if self.eagers is None:
eagers = []
for name in ('native_libs.txt', 'eager_resources.txt'):
if self.has_metadata(name):
eagers.extend(self.get_metadata_lines(name))
self.eagers = eagers
return self.eagers
def _index(self):
try:
return self._dirindex
except AttributeError:
ind = {}
for path in self.zipinfo:
parts = path.split(os.sep)
while parts:
parent = os.sep.join(parts[:-1])
if parent in ind:
ind[parent].append(parts[-1])
break
else:
ind[parent] = [parts.pop()]
self._dirindex = ind
return ind
def _has(self, fspath):
zip_path = self._zipinfo_name(fspath)
return zip_path in self.zipinfo or zip_path in self._index()
def _isdir(self, fspath):
return self._zipinfo_name(fspath) in self._index()
def _listdir(self, fspath):
return list(self._index().get(self._zipinfo_name(fspath), ()))
def _eager_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.egg_root, resource_name))
def _resource_to_zip(self, resource_name):
return self._zipinfo_name(self._fn(self.module_path, resource_name))
register_loader_type(zipimport.zipimporter, ZipProvider)
class FileMetadata(EmptyProvider):
"""Metadata handler for standalone PKG-INFO files
Usage::
metadata = FileMetadata("/path/to/PKG-INFO")
This provider rejects all data and metadata requests except for PKG-INFO,
which is treated as existing, and will be the contents of the file at
the provided location.
"""
def __init__(self, path):
self.path = path
def _get_metadata_path(self, name):
return self.path
def has_metadata(self, name):
return name == 'PKG-INFO' and os.path.isfile(self.path)
def get_metadata(self, name):
if name != 'PKG-INFO':
raise KeyError("No metadata except PKG-INFO is available")
with io.open(self.path, encoding='utf-8', errors="replace") as f:
metadata = f.read()
self._warn_on_replacement(metadata)
return metadata
def _warn_on_replacement(self, metadata):
# Python 2.7 compat for: replacement_char = '�'
replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
if replacement_char in metadata:
tmpl = "{self.path} could not be properly decoded in UTF-8"
msg = tmpl.format(**locals())
warnings.warn(msg)
def get_metadata_lines(self, name):
return yield_lines(self.get_metadata(name))
class PathMetadata(DefaultProvider):
"""Metadata provider for egg directories
Usage::
# Development eggs:
egg_info = "/path/to/PackageName.egg-info"
base_dir = os.path.dirname(egg_info)
metadata = PathMetadata(base_dir, egg_info)
dist_name = os.path.splitext(os.path.basename(egg_info))[0]
dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
# Unpacked egg directories:
egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
dist = Distribution.from_filename(egg_path, metadata=metadata)
"""
def __init__(self, path, egg_info):
self.module_path = path
self.egg_info = egg_info
class EggMetadata(ZipProvider):
"""Metadata provider for .egg files"""
def __init__(self, importer):
"""Create a metadata provider from a zipimporter"""
self.zip_pre = importer.archive + os.sep
self.loader = importer
if importer.prefix:
self.module_path = os.path.join(importer.archive, importer.prefix)
else:
self.module_path = importer.archive
self._setup_prefix()
_declare_state('dict', _distribution_finders={})
def register_finder(importer_type, distribution_finder):
"""Register `distribution_finder` to find distributions in sys.path items
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `distribution_finder` is a callable that, passed a path
item and the importer instance, yields ``Distribution`` instances found on
that path item. See ``pkg_resources.find_on_path`` for an example."""
_distribution_finders[importer_type] = distribution_finder
def find_distributions(path_item, only=False):
"""Yield distributions accessible via `path_item`"""
importer = get_importer(path_item)
finder = _find_adapter(_distribution_finders, importer)
return finder(importer, path_item, only)
def find_eggs_in_zip(importer, path_item, only=False):
"""
Find eggs in zip files; possibly multiple nested eggs.
"""
if importer.archive.endswith('.whl'):
# wheels are not supported with this finder
# they don't have PKG-INFO metadata, and won't ever contain eggs
return
metadata = EggMetadata(importer)
if metadata.has_metadata('PKG-INFO'):
yield Distribution.from_filename(path_item, metadata=metadata)
if only:
# don't yield nested distros
return
for subitem in metadata.resource_listdir(''):
if _is_egg_path(subitem):
subpath = os.path.join(path_item, subitem)
dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
for dist in dists:
yield dist
elif subitem.lower().endswith('.dist-info'):
subpath = os.path.join(path_item, subitem)
submeta = EggMetadata(zipimport.zipimporter(subpath))
submeta.egg_info = subpath
yield Distribution.from_location(path_item, subitem, submeta)
register_finder(zipimport.zipimporter, find_eggs_in_zip)
def find_nothing(importer, path_item, only=False):
return ()
register_finder(object, find_nothing)
def _by_version_descending(names):
"""
Given a list of filenames, return them in descending order
by version number.
>>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
>>> _by_version_descending(names)
['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
>>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
>>> _by_version_descending(names)
['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
"""
def _by_version(name):
"""
Parse each component of the filename
"""
name, ext = os.path.splitext(name)
parts = itertools.chain(name.split('-'), [ext])
return [packaging.version.parse(part) for part in parts]
return sorted(names, key=_by_version, reverse=True)
def find_on_path(importer, path_item, only=False):
"""Yield distributions accessible on a sys.path directory"""
path_item = _normalize_cached(path_item)
if _is_unpacked_egg(path_item):
yield Distribution.from_filename(
path_item, metadata=PathMetadata(
path_item, os.path.join(path_item, 'EGG-INFO')
)
)
return
entries = safe_listdir(path_item)
# for performance, before sorting by version,
# screen entries for only those that will yield
# distributions
filtered = (
entry
for entry in entries
if dist_factory(path_item, entry, only)
)
# scan for .egg and .egg-info in directory
path_item_entries = _by_version_descending(filtered)
for entry in path_item_entries:
fullpath = os.path.join(path_item, entry)
factory = dist_factory(path_item, entry, only)
for dist in factory(fullpath):
yield dist
def dist_factory(path_item, entry, only):
"""
Return a dist_factory for a path_item and entry
"""
lower = entry.lower()
is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
return (
distributions_from_metadata
if is_meta else
find_distributions
if not only and _is_egg_path(entry) else
resolve_egg_link
if not only and lower.endswith('.egg-link') else
NoDists()
)
class NoDists:
"""
>>> bool(NoDists())
False
>>> list(NoDists()('anything'))
[]
"""
def __bool__(self):
return False
if six.PY2:
__nonzero__ = __bool__
def __call__(self, fullpath):
return iter(())
def safe_listdir(path):
"""
Attempt to list contents of path, but suppress some exceptions.
"""
try:
return os.listdir(path)
except (PermissionError, NotADirectoryError):
pass
except OSError as e:
# Ignore the directory if does not exist, not a directory or
# permission denied
ignorable = (
e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
# Python 2 on Windows needs to be handled this way :(
or getattr(e, "winerror", None) == 267
)
if not ignorable:
raise
return ()
def distributions_from_metadata(path):
root = os.path.dirname(path)
if os.path.isdir(path):
if len(os.listdir(path)) == 0:
# empty metadata dir; skip
return
metadata = PathMetadata(root, path)
else:
metadata = FileMetadata(path)
entry = os.path.basename(path)
yield Distribution.from_location(
root, entry, metadata, precedence=DEVELOP_DIST,
)
def non_empty_lines(path):
"""
Yield non-empty lines from file at path
"""
with open(path) as f:
for line in f:
line = line.strip()
if line:
yield line
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ())
register_finder(pkgutil.ImpImporter, find_on_path)
if hasattr(importlib_machinery, 'FileFinder'):
register_finder(importlib_machinery.FileFinder, find_on_path)
_declare_state('dict', _namespace_handlers={})
_declare_state('dict', _namespace_packages={})
def register_namespace_handler(importer_type, namespace_handler):
"""Register `namespace_handler` to declare namespace packages
`importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
handler), and `namespace_handler` is a callable like this::
def namespace_handler(importer, path_entry, moduleName, module):
# return a path_entry to use for child packages
Namespace handlers are only called if the importer object has already
agreed that it can handle the relevant path item, and they should only
return a subpath if the module __path__ does not already contain an
equivalent subpath. For an example namespace handler, see
``pkg_resources.file_ns_handler``.
"""
_namespace_handlers[importer_type] = namespace_handler
def _handle_ns(packageName, path_item):
"""Ensure that named package includes a subpath of path_item (if needed)"""
importer = get_importer(path_item)
if importer is None:
return None
# capture warnings due to #1111
with warnings.catch_warnings():
warnings.simplefilter("ignore")
loader = importer.find_module(packageName)
if loader is None:
return None
module = sys.modules.get(packageName)
if module is None:
module = sys.modules[packageName] = types.ModuleType(packageName)
module.__path__ = []
_set_parent_ns(packageName)
elif not hasattr(module, '__path__'):
raise TypeError("Not a package:", packageName)
handler = _find_adapter(_namespace_handlers, importer)
subpath = handler(importer, path_item, packageName, module)
if subpath is not None:
path = module.__path__
path.append(subpath)
loader.load_module(packageName)
_rebuild_mod_path(path, packageName, module)
return subpath
def _rebuild_mod_path(orig_path, package_name, module):
"""
Rebuild module.__path__ ensuring that all entries are ordered
corresponding to their sys.path order
"""
sys_path = [_normalize_cached(p) for p in sys.path]
def safe_sys_path_index(entry):
"""
Workaround for #520 and #513.
"""
try:
return sys_path.index(entry)
except ValueError:
return float('inf')
def position_in_sys_path(path):
"""
Return the ordinal of the path based on its position in sys.path
"""
path_parts = path.split(os.sep)
module_parts = package_name.count('.') + 1
parts = path_parts[:-module_parts]
return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
new_path = sorted(orig_path, key=position_in_sys_path)
new_path = [_normalize_cached(p) for p in new_path]
if isinstance(module.__path__, list):
module.__path__[:] = new_path
else:
module.__path__ = new_path
def declare_namespace(packageName):
"""Declare that package 'packageName' is a namespace package"""
_imp.acquire_lock()
try:
if packageName in _namespace_packages:
return
path = sys.path
parent, _, _ = packageName.rpartition('.')
if parent:
declare_namespace(parent)
if parent not in _namespace_packages:
__import__(parent)
try:
path = sys.modules[parent].__path__
except AttributeError:
raise TypeError("Not a package:", parent)
# Track what packages are namespaces, so when new path items are added,
# they can be updated
_namespace_packages.setdefault(parent or None, []).append(packageName)
_namespace_packages.setdefault(packageName, [])
for path_item in path:
# Ensure all the parent's path items are reflected in the child,
# if they apply
_handle_ns(packageName, path_item)
finally:
_imp.release_lock()
def fixup_namespace_packages(path_item, parent=None):
"""Ensure that previously-declared namespace packages include path_item"""
_imp.acquire_lock()
try:
for package in _namespace_packages.get(parent, ()):
subpath = _handle_ns(package, path_item)
if subpath:
fixup_namespace_packages(subpath, package)
finally:
_imp.release_lock()
def file_ns_handler(importer, path_item, packageName, module):
"""Compute an ns-package subpath for a filesystem or zipfile importer"""
subpath = os.path.join(path_item, packageName.split('.')[-1])
normalized = _normalize_cached(subpath)
for item in module.__path__:
if _normalize_cached(item) == normalized:
break
else:
# Only return the path if it's not already there
return subpath
register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
register_namespace_handler(zipimport.zipimporter, file_ns_handler)
if hasattr(importlib_machinery, 'FileFinder'):
register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
def null_ns_handler(importer, path_item, packageName, module):
return None
register_namespace_handler(object, null_ns_handler)
def normalize_path(filename):
"""Normalize a file/dir name for comparison purposes"""
return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
def _cygwin_patch(filename): # pragma: nocover
"""
Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
symlink components. Using
os.path.abspath() works around this limitation. A fix in os.getcwd()
would probably better, in Cygwin even more so, except
that this seems to be by design...
"""
return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
def _normalize_cached(filename, _cache={}):
try:
return _cache[filename]
except KeyError:
_cache[filename] = result = normalize_path(filename)
return result
def _is_egg_path(path):
"""
Determine if given path appears to be an egg.
"""
return path.lower().endswith('.egg')
def _is_unpacked_egg(path):
"""
Determine if given path appears to be an unpacked egg.
"""
return (
_is_egg_path(path) and
os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
)
def _set_parent_ns(packageName):
parts = packageName.split('.')
name = parts.pop()
if parts:
parent = '.'.join(parts)
setattr(sys.modules[parent], name, sys.modules[packageName])
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, six.string_types):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
MODULE = re.compile(r"\w+(\.\w+)*$").match
EGG_NAME = re.compile(
r"""
(?P<name>[^-]+) (
-(?P<ver>[^-]+) (
-py(?P<pyver>[^-]+) (
-(?P<plat>.+)
)?
)?
)?
""",
re.VERBOSE | re.IGNORECASE,
).match
class EntryPoint:
"""Object representing an advertised importable object"""
def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
if not MODULE(module_name):
raise ValueError("Invalid module name", module_name)
self.name = name
self.module_name = module_name
self.attrs = tuple(attrs)
self.extras = tuple(extras)
self.dist = dist
def __str__(self):
s = "%s = %s" % (self.name, self.module_name)
if self.attrs:
s += ':' + '.'.join(self.attrs)
if self.extras:
s += ' [%s]' % ','.join(self.extras)
return s
def __repr__(self):
return "EntryPoint.parse(%r)" % str(self)
def load(self, require=True, *args, **kwargs):
"""
Require packages for this EntryPoint, then resolve it.
"""
if not require or args or kwargs:
warnings.warn(
"Parameters to load are deprecated. Call .resolve and "
".require separately.",
PkgResourcesDeprecationWarning,
stacklevel=2,
)
if require:
self.require(*args, **kwargs)
return self.resolve()
def resolve(self):
"""
Resolve the entry point from its module and attrs.
"""
module = __import__(self.module_name, fromlist=['__name__'], level=0)
try:
return functools.reduce(getattr, self.attrs, module)
except AttributeError as exc:
raise ImportError(str(exc))
def require(self, env=None, installer=None):
if self.extras and not self.dist:
raise UnknownExtra("Can't require() without a distribution", self)
# Get the requirements for this entry point with all its extras and
# then resolve them. We have to pass `extras` along when resolving so
# that the working set knows what extras we want. Otherwise, for
# dist-info distributions, the working set will assume that the
# requirements for that extra are purely optional and skip over them.
reqs = self.dist.requires(self.extras)
items = working_set.resolve(reqs, env, installer, extras=self.extras)
list(map(working_set.add, items))
pattern = re.compile(
r'\s*'
r'(?P<name>.+?)\s*'
r'=\s*'
r'(?P<module>[\w.]+)\s*'
r'(:\s*(?P<attr>[\w.]+))?\s*'
r'(?P<extras>\[.*\])?\s*$'
)
@classmethod
def parse(cls, src, dist=None):
"""Parse a single entry point from string `src`
Entry point syntax follows the form::
name = some.module:some.attr [extra1, extra2]
The entry name and module name are required, but the ``:attrs`` and
``[extras]`` parts are optional
"""
m = cls.pattern.match(src)
if not m:
msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
raise ValueError(msg, src)
res = m.groupdict()
extras = cls._parse_extras(res['extras'])
attrs = res['attr'].split('.') if res['attr'] else ()
return cls(res['name'], res['module'], attrs, extras, dist)
@classmethod
def _parse_extras(cls, extras_spec):
if not extras_spec:
return ()
req = Requirement.parse('x' + extras_spec)
if req.specs:
raise ValueError()
return req.extras
@classmethod
def parse_group(cls, group, lines, dist=None):
"""Parse an entry point group"""
if not MODULE(group):
raise ValueError("Invalid group name", group)
this = {}
for line in yield_lines(lines):
ep = cls.parse(line, dist)
if ep.name in this:
raise ValueError("Duplicate entry point", group, ep.name)
this[ep.name] = ep
return this
@classmethod
def parse_map(cls, data, dist=None):
"""Parse a map of entry point groups"""
if isinstance(data, dict):
data = data.items()
else:
data = split_sections(data)
maps = {}
for group, lines in data:
if group is None:
if not lines:
continue
raise ValueError("Entry points must be listed in groups")
group = group.strip()
if group in maps:
raise ValueError("Duplicate group name", group)
maps[group] = cls.parse_group(group, lines, dist)
return maps
def _remove_md5_fragment(location):
if not location:
return ''
parsed = urllib.parse.urlparse(location)
if parsed[-1].startswith('md5='):
return urllib.parse.urlunparse(parsed[:-1] + ('',))
return location
def _version_from_file(lines):
"""
Given an iterable of lines from a Metadata file, return
the value of the Version field, if present, or None otherwise.
"""
def is_version_line(line):
return line.lower().startswith('version:')
version_lines = filter(is_version_line, lines)
line = next(iter(version_lines), '')
_, _, value = line.partition(':')
return safe_version(value.strip()) or None
class Distribution:
"""Wrap an actual or potential sys.path entry w/metadata"""
PKG_INFO = 'PKG-INFO'
def __init__(
self, location=None, metadata=None, project_name=None,
version=None, py_version=PY_MAJOR, platform=None,
precedence=EGG_DIST):
self.project_name = safe_name(project_name or 'Unknown')
if version is not None:
self._version = safe_version(version)
self.py_version = py_version
self.platform = platform
self.location = location
self.precedence = precedence
self._provider = metadata or empty_provider
@classmethod
def from_location(cls, location, basename, metadata=None, **kw):
project_name, version, py_version, platform = [None] * 4
basename, ext = os.path.splitext(basename)
if ext.lower() in _distributionImpl:
cls = _distributionImpl[ext.lower()]
match = EGG_NAME(basename)
if match:
project_name, version, py_version, platform = match.group(
'name', 'ver', 'pyver', 'plat'
)
return cls(
location, metadata, project_name=project_name, version=version,
py_version=py_version, platform=platform, **kw
)._reload_version()
def _reload_version(self):
return self
@property
def hashcmp(self):
return (
self.parsed_version,
self.precedence,
self.key,
_remove_md5_fragment(self.location),
self.py_version or '',
self.platform or '',
)
def __hash__(self):
return hash(self.hashcmp)
def __lt__(self, other):
return self.hashcmp < other.hashcmp
def __le__(self, other):
return self.hashcmp <= other.hashcmp
def __gt__(self, other):
return self.hashcmp > other.hashcmp
def __ge__(self, other):
return self.hashcmp >= other.hashcmp
def __eq__(self, other):
if not isinstance(other, self.__class__):
# It's not a Distribution, so they are not equal
return False
return self.hashcmp == other.hashcmp
def __ne__(self, other):
return not self == other
# These properties have to be lazy so that we don't have to load any
# metadata until/unless it's actually needed. (i.e., some distributions
# may not know their name or version without loading PKG-INFO)
@property
def key(self):
try:
return self._key
except AttributeError:
self._key = key = self.project_name.lower()
return key
@property
def parsed_version(self):
if not hasattr(self, "_parsed_version"):
self._parsed_version = parse_version(self.version)
return self._parsed_version
def _warn_legacy_version(self):
LV = packaging.version.LegacyVersion
is_legacy = isinstance(self._parsed_version, LV)
if not is_legacy:
return
# While an empty version is technically a legacy version and
# is not a valid PEP 440 version, it's also unlikely to
# actually come from someone and instead it is more likely that
# it comes from setuptools attempting to parse a filename and
# including it in the list. So for that we'll gate this warning
# on if the version is anything at all or not.
if not self.version:
return
tmpl = textwrap.dedent("""
'{project_name} ({version})' is being parsed as a legacy,
non PEP 440,
version. You may find odd behavior and sort order.
In particular it will be sorted as less than 0.0. It
is recommended to migrate to PEP 440 compatible
versions.
""").strip().replace('\n', ' ')
warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
@property
def version(self):
try:
return self._version
except AttributeError:
version = self._get_version()
if version is None:
path = self._get_metadata_path_for_display(self.PKG_INFO)
msg = (
"Missing 'Version:' header and/or {} file at path: {}"
).format(self.PKG_INFO, path)
raise ValueError(msg, self)
return version
@property
def _dep_map(self):
"""
A map of extra to its list of (direct) requirements
for this distribution, including the null extra.
"""
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._filter_extras(self._build_dep_map())
return self.__dep_map
@staticmethod
def _filter_extras(dm):
"""
Given a mapping of extras to dependencies, strip off
environment markers and filter out any dependencies
not matching the markers.
"""
for extra in list(filter(None, dm)):
new_extra = extra
reqs = dm.pop(extra)
new_extra, _, marker = extra.partition(':')
fails_marker = marker and (
invalid_marker(marker)
or not evaluate_marker(marker)
)
if fails_marker:
reqs = []
new_extra = safe_extra(new_extra) or None
dm.setdefault(new_extra, []).extend(reqs)
return dm
def _build_dep_map(self):
dm = {}
for name in 'requires.txt', 'depends.txt':
for extra, reqs in split_sections(self._get_metadata(name)):
dm.setdefault(extra, []).extend(parse_requirements(reqs))
return dm
def requires(self, extras=()):
"""List of Requirements needed for this distro if `extras` are used"""
dm = self._dep_map
deps = []
deps.extend(dm.get(None, ()))
for ext in extras:
try:
deps.extend(dm[safe_extra(ext)])
except KeyError:
raise UnknownExtra(
"%s has no such extra feature %r" % (self, ext)
)
return deps
def _get_metadata_path_for_display(self, name):
"""
Return the path to the given metadata file, if available.
"""
try:
# We need to access _get_metadata_path() on the provider object
# directly rather than through this class's __getattr__()
# since _get_metadata_path() is marked private.
path = self._provider._get_metadata_path(name)
# Handle exceptions e.g. in case the distribution's metadata
# provider doesn't support _get_metadata_path().
except Exception:
return '[could not detect]'
return path
def _get_metadata(self, name):
if self.has_metadata(name):
for line in self.get_metadata_lines(name):
yield line
def _get_version(self):
lines = self._get_metadata(self.PKG_INFO)
version = _version_from_file(lines)
return version
def activate(self, path=None, replace=False):
"""Ensure distribution is importable on `path` (default=sys.path)"""
if path is None:
path = sys.path
self.insert_on(path, replace=replace)
if path is sys.path:
fixup_namespace_packages(self.location)
for pkg in self._get_metadata('namespace_packages.txt'):
if pkg in sys.modules:
declare_namespace(pkg)
def egg_name(self):
"""Return what this distribution's standard .egg filename should be"""
filename = "%s-%s-py%s" % (
to_filename(self.project_name), to_filename(self.version),
self.py_version or PY_MAJOR
)
if self.platform:
filename += '-' + self.platform
return filename
def __repr__(self):
if self.location:
return "%s (%s)" % (self, self.location)
else:
return str(self)
def __str__(self):
try:
version = getattr(self, 'version', None)
except ValueError:
version = None
version = version or "[unknown version]"
return "%s %s" % (self.project_name, version)
def __getattr__(self, attr):
"""Delegate all unrecognized public attributes to .metadata provider"""
if attr.startswith('_'):
raise AttributeError(attr)
return getattr(self._provider, attr)
def __dir__(self):
return list(
set(super(Distribution, self).__dir__())
| set(
attr for attr in self._provider.__dir__()
if not attr.startswith('_')
)
)
if not hasattr(object, '__dir__'):
# python 2.7 not supported
del __dir__
@classmethod
def from_filename(cls, filename, metadata=None, **kw):
return cls.from_location(
_normalize_cached(filename), os.path.basename(filename), metadata,
**kw
)
def as_requirement(self):
"""Return a ``Requirement`` that matches this distribution exactly"""
if isinstance(self.parsed_version, packaging.version.Version):
spec = "%s==%s" % (self.project_name, self.parsed_version)
else:
spec = "%s===%s" % (self.project_name, self.parsed_version)
return Requirement.parse(spec)
def load_entry_point(self, group, name):
"""Return the `name` entry point of `group` or raise ImportError"""
ep = self.get_entry_info(group, name)
if ep is None:
raise ImportError("Entry point %r not found" % ((group, name),))
return ep.load()
def get_entry_map(self, group=None):
"""Return the entry point map for `group`, or the full entry map"""
try:
ep_map = self._ep_map
except AttributeError:
ep_map = self._ep_map = EntryPoint.parse_map(
self._get_metadata('entry_points.txt'), self
)
if group is not None:
return ep_map.get(group, {})
return ep_map
def get_entry_info(self, group, name):
"""Return the EntryPoint object for `group`+`name`, or ``None``"""
return self.get_entry_map(group).get(name)
def insert_on(self, path, loc=None, replace=False):
"""Ensure self.location is on path
If replace=False (default):
- If location is already in path anywhere, do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent.
- Else: add to the end of path.
If replace=True:
- If location is already on path anywhere (not eggs)
or higher priority than its parent (eggs)
do nothing.
- Else:
- If it's an egg and its parent directory is on path,
insert just ahead of the parent,
removing any lower-priority entries.
- Else: add it to the front of path.
"""
loc = loc or self.location
if not loc:
return
nloc = _normalize_cached(loc)
bdir = os.path.dirname(nloc)
npath = [(p and _normalize_cached(p) or p) for p in path]
for p, item in enumerate(npath):
if item == nloc:
if replace:
break
else:
# don't modify path (even removing duplicates) if
# found and not replace
return
elif item == bdir and self.precedence == EGG_DIST:
# if it's an .egg, give it precedence over its directory
# UNLESS it's already been added to sys.path and replace=False
if (not replace) and nloc in npath[p:]:
return
if path is sys.path:
self.check_version_conflict()
path.insert(p, loc)
npath.insert(p, nloc)
break
else:
if path is sys.path:
self.check_version_conflict()
if replace:
path.insert(0, loc)
else:
path.append(loc)
return
# p is the spot where we found or inserted loc; now remove duplicates
while True:
try:
np = npath.index(nloc, p + 1)
except ValueError:
break
else:
del npath[np], path[np]
# ha!
p = np
return
def check_version_conflict(self):
if self.key == 'setuptools':
# ignore the inevitable setuptools self-conflicts :(
return
nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
loc = normalize_path(self.location)
for modname in self._get_metadata('top_level.txt'):
if (modname not in sys.modules or modname in nsp
or modname in _namespace_packages):
continue
if modname in ('pkg_resources', 'setuptools', 'site'):
continue
fn = getattr(sys.modules[modname], '__file__', None)
if fn and (normalize_path(fn).startswith(loc) or
fn.startswith(self.location)):
continue
issue_warning(
"Module %s was already imported from %s, but %s is being added"
" to sys.path" % (modname, fn, self.location),
)
def has_version(self):
try:
self.version
except ValueError:
issue_warning("Unbuilt egg for " + repr(self))
return False
return True
def clone(self, **kw):
"""Copy this distribution, substituting in any changed keyword args"""
names = 'project_name version py_version platform location precedence'
for attr in names.split():
kw.setdefault(attr, getattr(self, attr, None))
kw.setdefault('metadata', self._provider)
return self.__class__(**kw)
@property
def extras(self):
return [dep for dep in self._dep_map if dep]
class EggInfoDistribution(Distribution):
def _reload_version(self):
"""
Packages installed by distutils (e.g. numpy or scipy),
which uses an old safe_version, and so
their version numbers can get mangled when
converted to filenames (e.g., 1.11.0.dev0+2329eae to
1.11.0.dev0_2329eae). These distributions will not be
parsed properly
downstream by Distribution and safe_version, so
take an extra step and try to get the version number from
the metadata file itself instead of the filename.
"""
md_version = self._get_version()
if md_version:
self._version = md_version
return self
class DistInfoDistribution(Distribution):
"""
Wrap an actual or potential sys.path entry
w/metadata, .dist-info style.
"""
PKG_INFO = 'METADATA'
EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
@property
def _parsed_pkg_info(self):
"""Parse and cache metadata"""
try:
return self._pkg_info
except AttributeError:
metadata = self.get_metadata(self.PKG_INFO)
self._pkg_info = email.parser.Parser().parsestr(metadata)
return self._pkg_info
@property
def _dep_map(self):
try:
return self.__dep_map
except AttributeError:
self.__dep_map = self._compute_dependencies()
return self.__dep_map
def _compute_dependencies(self):
"""Recompute this distribution's dependencies."""
dm = self.__dep_map = {None: []}
reqs = []
# Including any condition expressions
for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
reqs.extend(parse_requirements(req))
def reqs_for_extra(extra):
for req in reqs:
if not req.marker or req.marker.evaluate({'extra': extra}):
yield req
common = frozenset(reqs_for_extra(None))
dm[None].extend(common)
for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
s_extra = safe_extra(extra.strip())
dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
return dm
_distributionImpl = {
'.egg': Distribution,
'.egg-info': EggInfoDistribution,
'.dist-info': DistInfoDistribution,
}
def issue_warning(*args, **kw):
level = 1
g = globals()
try:
# find the first stack frame that is *not* code in
# the pkg_resources module, to use for the warning
while sys._getframe(level).f_globals is g:
level += 1
except ValueError:
pass
warnings.warn(stacklevel=level + 1, *args, **kw)
class RequirementParseError(ValueError):
def __str__(self):
return ' '.join(self.args)
def parse_requirements(strs):
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
try:
line += next(lines)
except StopIteration:
return
yield Requirement(line)
class Requirement(packaging.requirements.Requirement):
def __init__(self, requirement_string):
"""DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
try:
super(Requirement, self).__init__(requirement_string)
except packaging.requirements.InvalidRequirement as e:
raise RequirementParseError(str(e))
self.unsafe_name = self.name
project_name = safe_name(self.name)
self.project_name, self.key = project_name, project_name.lower()
self.specs = [
(spec.operator, spec.version) for spec in self.specifier]
self.extras = tuple(map(safe_extra, self.extras))
self.hashCmp = (
self.key,
self.url,
self.specifier,
frozenset(self.extras),
str(self.marker) if self.marker else None,
)
self.__hash = hash(self.hashCmp)
def __eq__(self, other):
return (
isinstance(other, Requirement) and
self.hashCmp == other.hashCmp
)
def __ne__(self, other):
return not self == other
def __contains__(self, item):
if isinstance(item, Distribution):
if item.key != self.key:
return False
item = item.version
# Allow prereleases always in order to match the previous behavior of
# this method. In the future this should be smarter and follow PEP 440
# more accurately.
return self.specifier.contains(item, prereleases=True)
def __hash__(self):
return self.__hash
def __repr__(self):
return "Requirement.parse(%r)" % str(self)
@staticmethod
def parse(s):
req, = parse_requirements(s)
return req
def _always_object(classes):
"""
Ensure object appears in the mro even
for old-style classes.
"""
if object not in classes:
return classes + (object,)
return classes
def _find_adapter(registry, ob):
"""Return an adapter factory for `ob` from `registry`"""
types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
for t in types:
if t in registry:
return registry[t]
def ensure_directory(path):
"""Ensure that the parent directory of `path` exists"""
dirname = os.path.dirname(path)
py31compat.makedirs(dirname, exist_ok=True)
def _bypass_ensure_directory(path):
"""Sandbox-bypassing version of ensure_directory()"""
if not WRITE_SUPPORT:
raise IOError('"os.mkdir" not supported on this platform.')
dirname, filename = split(path)
if dirname and filename and not isdir(dirname):
_bypass_ensure_directory(dirname)
try:
mkdir(dirname, 0o755)
except FileExistsError:
pass
def split_sections(s):
"""Split a string or iterable thereof into (section, content) pairs
Each ``section`` is a stripped version of the section header ("[section]")
and each ``content`` is a list of stripped lines excluding blank lines and
comment-only lines. If there are any such lines before the first section
header, they're returned in a first ``section`` of ``None``.
"""
section = None
content = []
for line in yield_lines(s):
if line.startswith("["):
if line.endswith("]"):
if section or content:
yield section, content
section = line[1:-1].strip()
content = []
else:
raise ValueError("Invalid section heading", line)
else:
content.append(line)
# wrap up last segment
yield section, content
def _mkstemp(*args, **kw):
old_open = os.open
try:
# temporarily bypass sandboxing
os.open = os_open
return tempfile.mkstemp(*args, **kw)
finally:
# and then put it back
os.open = old_open
# Silence the PEP440Warning by default, so that end users don't get hit by it
# randomly just because they use pkg_resources. We want to append the rule
# because we want earlier uses of filterwarnings to take precedence over this
# one.
warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
# from jaraco.functools 1.3
def _call_aside(f, *args, **kwargs):
f(*args, **kwargs)
return f
@_call_aside
def _initialize(g=globals()):
"Set up global resource manager (deliberately not state-saved)"
manager = ResourceManager()
g['_manager'] = manager
g.update(
(name, getattr(manager, name))
for name in dir(manager)
if not name.startswith('_')
)
@_call_aside
def _initialize_master_working_set():
"""
Prepare the master working set and make the ``require()``
API available.
This function has explicit effects on the global state
of pkg_resources. It is intended to be invoked once at
the initialization of this module.
Invocation by other packages is unsupported and done
at their own risk.
"""
working_set = WorkingSet._build_master()
_declare_state('object', working_set=working_set)
require = working_set.require
iter_entry_points = working_set.iter_entry_points
add_activation_listener = working_set.subscribe
run_script = working_set.run_script
# backward compatibility
run_main = run_script
# Activate all distributions already on sys.path with replace=False and
# ensure that all distributions added to the working set in the future
# (e.g. by calling ``require()``) will get activated as well,
# with higher priority (replace=True).
tuple(
dist.activate(replace=False)
for dist in working_set
)
add_activation_listener(
lambda dist: dist.activate(replace=True),
existing=False,
)
working_set.entries = []
# match order
list(map(working_set.add_entry, sys.path))
globals().update(locals())
class PkgResourcesDeprecationWarning(Warning):
"""
Base class for warning about deprecations in ``pkg_resources``
This class is not derived from ``DeprecationWarning``, and as such is
visible by default.
"""
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/pkg_resources/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 45290
}
| 14 |
# .-. .-. .-. . . .-. .-. .-. .-.
# |( |- |.| | | |- `-. | `-.
# ' ' `-' `-`.`-' `-' `-' ' `-'
__title__ = 'requests'
__description__ = 'Python HTTP for Humans.'
__url__ = 'https://requests.readthedocs.io'
__version__ = '2.24.0'
__build__ = 0x022400
__author__ = 'Kenneth Reitz'
__author_email__ = 'me@kennethreitz.org'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2020 Kenneth Reitz'
__cake__ = u'\u2728 \U0001f370 \u2728'
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/__version__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/__version__.py",
"repo_id": "Django-locallibrary",
"token_count": 198
}
| 15 |
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import codecs
import contextlib
import io
import os
import re
import socket
import struct
import sys
import tempfile
import warnings
import zipfile
from collections import OrderedDict
from .__version__ import __version__
from . import certs
# to_native_string is unused here, but imported here for backwards compatibility
from ._internal_utils import to_native_string
from .compat import parse_http_list as _parse_list_header
from .compat import (
quote, urlparse, bytes, str, unquote, getproxies,
proxy_bypass, urlunparse, basestring, integer_types, is_py3,
proxy_bypass_environment, getproxies_environment, Mapping)
from .cookies import cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import (
InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
DEFAULT_PORTS = {'http': 80, 'https': 443}
if sys.platform == 'win32':
# provide a proxy_bypass version on Windows without DNS lookups
def proxy_bypass_registry(host):
try:
if is_py3:
import winreg
else:
import _winreg as winreg
except ImportError:
return False
try:
internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
# ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
proxyEnable = int(winreg.QueryValueEx(internetSettings,
'ProxyEnable')[0])
# ProxyOverride is almost always a string
proxyOverride = winreg.QueryValueEx(internetSettings,
'ProxyOverride')[0]
except OSError:
return False
if not proxyEnable or not proxyOverride:
return False
# make a check value list from the registry entry: replace the
# '<local>' string by the localhost entry and the corresponding
# canonical entry.
proxyOverride = proxyOverride.split(';')
# now check if we match one of the registry values.
for test in proxyOverride:
if test == '<local>':
if '.' not in host:
return True
test = test.replace(".", r"\.") # mask dots
test = test.replace("*", r".*") # change glob sequence
test = test.replace("?", r".") # change glob char
if re.match(test, host, re.I):
return True
return False
def proxy_bypass(host): # noqa
"""Return True, if the host should be bypassed.
Checks proxy settings gathered from the environment, if specified,
or the registry.
"""
if getproxies_environment():
return proxy_bypass_environment(host)
else:
return proxy_bypass_registry(host)
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = None
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
if total_length is not None:
current_position = total_length
else:
if hasattr(o, 'seek') and total_length is None:
# StringIO and BytesIO have seek but no useable fileno
try:
# seek to end of file
o.seek(0, 2)
total_length = o.tell()
# seek back to current position to support
# partially read file-like objects
o.seek(current_position or 0)
except (OSError, IOError):
total_length = 0
if total_length is None:
total_length = 0
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See https://bugs.python.org/issue20164 &
# https://github.com/psf/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def extract_zipped_paths(path):
"""Replace nonexistent paths that look like they refer to a member of a zip
archive with the location of an extracted copy of the target, or else
just return the provided path unchanged.
"""
if os.path.exists(path):
# this is already a valid path, no need to do anything further
return path
# find the first valid part of the provided path and treat that as a zip archive
# assume the rest of the path is the name of a member in the archive
archive, member = os.path.split(path)
while archive and not os.path.exists(archive):
archive, prefix = os.path.split(archive)
member = '/'.join([prefix, member])
if not zipfile.is_zipfile(archive):
return path
zip_file = zipfile.ZipFile(archive)
if member not in zip_file.namelist():
return path
# we have a valid zip archive and a valid member of that archive
tmp = tempfile.gettempdir()
extracted_path = os.path.join(tmp, *member.split('/'))
if not os.path.exists(extracted_path):
extracted_path = zip_file.extract(member, path=tmp)
return extracted_path
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
Traceback (most recent call last):
...
ValueError: cannot encode objects that are not 2-tuples
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
return cookiejar_from_dict(cookie_dict, cj)
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def _parse_content_type_header(header):
"""Returns content type and parameters from given header
:param header: string
:return: tuple containing content type and dictionary of
parameters
"""
tokens = header.split(';')
content_type, params = tokens[0].strip(), tokens[1:]
params_dict = {}
items_to_strip = "\"' "
for param in params:
param = param.strip()
if param:
key, value = param, True
index_of_equals = param.find("=")
if index_of_equals != -1:
key = param[:index_of_equals].strip(items_to_strip)
value = param[index_of_equals + 1:].strip(items_to_strip)
params_dict[key.lower()] = value
return content_type, params_dict
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = _parse_content_type_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if an IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
@contextlib.contextmanager
def set_environ(env_name, value):
"""Set the environment variable 'env_name' to 'value'
Save previous value, yield, and then restore the previous value stored in
the environment variable 'env_name'.
If 'value' is None, do nothing"""
value_changed = value is not None
if value_changed:
old_value = os.environ.get(env_name)
os.environ[env_name] = value
try:
yield
finally:
if value_changed:
if old_value is None:
del os.environ[env_name]
else:
os.environ[env_name] = old_value
def should_bypass_proxies(url, no_proxy):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
# Prioritize lowercase environment variables over uppercase
# to keep a consistent behaviour with other http projects (curl, wget).
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy_arg = no_proxy
if no_proxy is None:
no_proxy = get_proxy('no_proxy')
parsed = urlparse(url)
if parsed.hostname is None:
# URLs don't always have hostnames, e.g. file:/// urls.
return True
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the hostname, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
if is_ipv4_address(parsed.hostname):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(parsed.hostname, proxy_ip):
return True
elif parsed.hostname == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
host_with_port = parsed.hostname
if parsed.port:
host_with_port += ':{}'.format(parsed.port)
for host in no_proxy:
if parsed.hostname.endswith(host) or host_with_port.endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
with set_environ('no_proxy', no_proxy_arg):
# parsed.hostname can be `None` in cases such as a file URI.
try:
bypass = proxy_bypass(parsed.hostname)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url, no_proxy=None):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url, no_proxy=no_proxy):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get(urlparts.scheme, proxies.get('all'))
proxy_keys = [
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
'all://' + urlparts.hostname,
'all',
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Value for header {%s: %s} must be of type str or "
"bytes, not %s" % (name, value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
def rewind_body(prepared_request):
"""Move file pointer back to its recorded starting position
so it can be read again on redirect.
"""
body_seek = getattr(prepared_request.body, 'seek', None)
if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
try:
body_seek(prepared_request._body_position)
except (IOError, OSError):
raise UnrewindableBodyError("An error occurred when rewinding request "
"body for redirect.")
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/utils.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/utils.py",
"repo_id": "Django-locallibrary",
"token_count": 12698
}
| 16 |
from __future__ import absolute_import
import email.utils
import mimetypes
import re
from .packages import six
def guess_content_type(filename, default="application/octet-stream"):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param_rfc2231(name, value):
"""
Helper function to format and quote a single header parameter using the
strategy defined in RFC 2231.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as ``bytes`` or `str``.
:ret:
An RFC-2231-formatted unicode string.
"""
if isinstance(value, six.binary_type):
value = value.decode("utf-8")
if not any(ch in value for ch in '"\\\r\n'):
result = u'%s="%s"' % (name, value)
try:
result.encode("ascii")
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if six.PY2: # Python 2:
value = value.encode("utf-8")
# encode_rfc2231 accepts an encoded string and returns an ascii-encoded
# string in Python 2 but accepts and returns unicode strings in Python 3
value = email.utils.encode_rfc2231(value, "utf-8")
value = "%s*=%s" % (name, value)
if six.PY2: # Python 2:
value = value.decode("utf-8")
return value
_HTML5_REPLACEMENTS = {
u"\u0022": u"%22",
# Replace "\" with "\\".
u"\u005C": u"\u005C\u005C",
u"\u005C": u"\u005C\u005C",
}
# All control characters from 0x00 to 0x1F *except* 0x1B.
_HTML5_REPLACEMENTS.update(
{
six.unichr(cc): u"%{:02X}".format(cc)
for cc in range(0x00, 0x1F + 1)
if cc not in (0x1B,)
}
)
def _replace_multiple(value, needles_and_replacements):
def replacer(match):
return needles_and_replacements[match.group(0)]
pattern = re.compile(
r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()])
)
result = pattern.sub(replacer, value)
return result
def format_header_param_html5(name, value):
"""
Helper function to format and quote a single header parameter using the
HTML5 strategy.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows the `HTML5 Working Draft
Section 4.10.22.7`_ and matches the behavior of curl and modern browsers.
.. _HTML5 Working Draft Section 4.10.22.7:
https://w3c.github.io/html/sec-forms.html#multipart-form-data
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as ``bytes`` or `str``.
:ret:
A unicode string, stripped of troublesome characters.
"""
if isinstance(value, six.binary_type):
value = value.decode("utf-8")
value = _replace_multiple(value, _HTML5_REPLACEMENTS)
return u'%s="%s"' % (name, value)
# For backwards-compatibility.
format_header_param = format_header_param_html5
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field. Must be unicode.
:param data:
The data/value body.
:param filename:
An optional filename of the request field. Must be unicode.
:param headers:
An optional dict-like object of headers to initially use for the field.
:param header_formatter:
An optional callable that is used to encode and format the headers. By
default, this is :func:`format_header_param_html5`.
"""
def __init__(
self,
name,
data,
filename=None,
headers=None,
header_formatter=format_header_param_html5,
):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
self.header_formatter = header_formatter
@classmethod
def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(
fieldname, data, filename=filename, header_formatter=header_formatter
)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter. By
default, this calls ``self.header_formatter``.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return self.header_formatter(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return u"; ".join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"]
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append(u"%s: %s" % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append(u"%s: %s" % (header_name, header_value))
lines.append(u"\r\n")
return u"\r\n".join(lines)
def make_multipart(
self, content_disposition=None, content_type=None, content_location=None
):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers["Content-Disposition"] = content_disposition or u"form-data"
self.headers["Content-Disposition"] += u"; ".join(
[
u"",
self._render_parts(
((u"name", self._name), (u"filename", self._filename))
),
]
)
self.headers["Content-Type"] = content_type
self.headers["Content-Location"] = content_location
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/fields.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/fields.py",
"repo_id": "Django-locallibrary",
"token_count": 3604
}
| 17 |
from __future__ import absolute_import
from contextlib import contextmanager
import zlib
import io
import logging
from socket import timeout as SocketTimeout
from socket import error as SocketError
try:
import brotli
except ImportError:
brotli = None
from ._collections import HTTPHeaderDict
from .exceptions import (
BodyNotHttplibCompatible,
ProtocolError,
DecodeError,
ReadTimeoutError,
ResponseNotChunked,
IncompleteRead,
InvalidHeader,
HTTPError,
)
from .packages.six import string_types as basestring, PY3
from .packages.six.moves import http_client as httplib
from .connection import HTTPException, BaseSSLError
from .util.response import is_fp_closed, is_response_to_head
log = logging.getLogger(__name__)
class DeflateDecoder(object):
def __init__(self):
self._first_try = True
self._data = b""
self._obj = zlib.decompressobj()
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
if not data:
return data
if not self._first_try:
return self._obj.decompress(data)
self._data += data
try:
decompressed = self._obj.decompress(data)
if decompressed:
self._first_try = False
self._data = None
return decompressed
except zlib.error:
self._first_try = False
self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
try:
return self.decompress(self._data)
finally:
self._data = None
class GzipDecoderState(object):
FIRST_MEMBER = 0
OTHER_MEMBERS = 1
SWALLOW_DATA = 2
class GzipDecoder(object):
def __init__(self):
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
self._state = GzipDecoderState.FIRST_MEMBER
def __getattr__(self, name):
return getattr(self._obj, name)
def decompress(self, data):
ret = bytearray()
if self._state == GzipDecoderState.SWALLOW_DATA or not data:
return bytes(ret)
while True:
try:
ret += self._obj.decompress(data)
except zlib.error:
previous_state = self._state
# Ignore data after the first error
self._state = GzipDecoderState.SWALLOW_DATA
if previous_state == GzipDecoderState.OTHER_MEMBERS:
# Allow trailing garbage acceptable in other gzip clients
return bytes(ret)
raise
data = self._obj.unused_data
if not data:
return bytes(ret)
self._state = GzipDecoderState.OTHER_MEMBERS
self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
if brotli is not None:
class BrotliDecoder(object):
# Supports both 'brotlipy' and 'Brotli' packages
# since they share an import name. The top branches
# are for 'brotlipy' and bottom branches for 'Brotli'
def __init__(self):
self._obj = brotli.Decompressor()
def decompress(self, data):
if hasattr(self._obj, "decompress"):
return self._obj.decompress(data)
return self._obj.process(data)
def flush(self):
if hasattr(self._obj, "flush"):
return self._obj.flush()
return b""
class MultiDecoder(object):
"""
From RFC7231:
If one or more encodings have been applied to a representation, the
sender that applied the encodings MUST generate a Content-Encoding
header field that lists the content codings in the order in which
they were applied.
"""
def __init__(self, modes):
self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
def flush(self):
return self._decoders[0].flush()
def decompress(self, data):
for d in reversed(self._decoders):
data = d.decompress(data)
return data
def _get_decoder(mode):
if "," in mode:
return MultiDecoder(mode)
if mode == "gzip":
return GzipDecoder()
if brotli is not None and mode == "br":
return BrotliDecoder()
return DeflateDecoder()
class HTTPResponse(io.IOBase):
"""
HTTP Response container.
Backwards-compatible to httplib's HTTPResponse but the response ``body`` is
loaded and decoded on-demand when the ``data`` property is accessed. This
class is also compatible with the Python standard library's :mod:`io`
module, and can hence be treated as a readable object in the context of that
framework.
Extra parameters for behaviour not present in httplib.HTTPResponse:
:param preload_content:
If True, the response's body will be preloaded during construction.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param original_response:
When this HTTPResponse wrapper is generated from an httplib.HTTPResponse
object, it's convenient to include the original for debug purposes. It's
otherwise unused.
:param retries:
The retries contains the last :class:`~urllib3.util.retry.Retry` that
was used during the request.
:param enforce_content_length:
Enforce content length checking. Body returned by server must match
value of Content-Length header, if present. Otherwise, raise error.
"""
CONTENT_DECODERS = ["gzip", "deflate"]
if brotli is not None:
CONTENT_DECODERS += ["br"]
REDIRECT_STATUSES = [301, 302, 303, 307, 308]
def __init__(
self,
body="",
headers=None,
status=0,
version=0,
reason=None,
strict=0,
preload_content=True,
decode_content=True,
original_response=None,
pool=None,
connection=None,
msg=None,
retries=None,
enforce_content_length=False,
request_method=None,
request_url=None,
auto_close=True,
):
if isinstance(headers, HTTPHeaderDict):
self.headers = headers
else:
self.headers = HTTPHeaderDict(headers)
self.status = status
self.version = version
self.reason = reason
self.strict = strict
self.decode_content = decode_content
self.retries = retries
self.enforce_content_length = enforce_content_length
self.auto_close = auto_close
self._decoder = None
self._body = None
self._fp = None
self._original_response = original_response
self._fp_bytes_read = 0
self.msg = msg
self._request_url = request_url
if body and isinstance(body, (basestring, bytes)):
self._body = body
self._pool = pool
self._connection = connection
if hasattr(body, "read"):
self._fp = body
# Are we using the chunked-style of transfer encoding?
self.chunked = False
self.chunk_left = None
tr_enc = self.headers.get("transfer-encoding", "").lower()
# Don't incur the penalty of creating a list and then discarding it
encodings = (enc.strip() for enc in tr_enc.split(","))
if "chunked" in encodings:
self.chunked = True
# Determine length of response
self.length_remaining = self._init_length(request_method)
# If requested, preload the body.
if preload_content and not self._body:
self._body = self.read(decode_content=decode_content)
def get_redirect_location(self):
"""
Should we redirect and where to?
:returns: Truthy redirect location string if we got a redirect status
code and valid location. ``None`` if redirect status and no
location. ``False`` if not a redirect status code.
"""
if self.status in self.REDIRECT_STATUSES:
return self.headers.get("location")
return False
def release_conn(self):
if not self._pool or not self._connection:
return
self._pool._put_conn(self._connection)
self._connection = None
def drain_conn(self):
"""
Read and discard any remaining HTTP response data in the response connection.
Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
"""
try:
self.read()
except (HTTPError, SocketError, BaseSSLError, HTTPException):
pass
@property
def data(self):
# For backwords-compat with earlier urllib3 0.4 and earlier.
if self._body:
return self._body
if self._fp:
return self.read(cache_content=True)
@property
def connection(self):
return self._connection
def isclosed(self):
return is_fp_closed(self._fp)
def tell(self):
"""
Obtain the number of bytes pulled over the wire so far. May differ from
the amount of content returned by :meth:``HTTPResponse.read`` if bytes
are encoded on the wire (e.g, compressed).
"""
return self._fp_bytes_read
def _init_length(self, request_method):
"""
Set initial length value for Response content if available.
"""
length = self.headers.get("content-length")
if length is not None:
if self.chunked:
# This Response will fail with an IncompleteRead if it can't be
# received as chunked. This method falls back to attempt reading
# the response before raising an exception.
log.warning(
"Received response with both Content-Length and "
"Transfer-Encoding set. This is expressly forbidden "
"by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
"attempting to process response as Transfer-Encoding: "
"chunked."
)
return None
try:
# RFC 7230 section 3.3.2 specifies multiple content lengths can
# be sent in a single Content-Length header
# (e.g. Content-Length: 42, 42). This line ensures the values
# are all valid ints and that as long as the `set` length is 1,
# all values are the same. Otherwise, the header is invalid.
lengths = set([int(val) for val in length.split(",")])
if len(lengths) > 1:
raise InvalidHeader(
"Content-Length contained multiple "
"unmatching values (%s)" % length
)
length = lengths.pop()
except ValueError:
length = None
else:
if length < 0:
length = None
# Convert status to int for comparison
# In some cases, httplib returns a status of "_UNKNOWN"
try:
status = int(self.status)
except ValueError:
status = 0
# Check for responses that shouldn't include a body
if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
length = 0
return length
def _init_decoder(self):
"""
Set-up the _decoder attribute if necessary.
"""
# Note: content-encoding value should be case-insensitive, per RFC 7230
# Section 3.2
content_encoding = self.headers.get("content-encoding", "").lower()
if self._decoder is None:
if content_encoding in self.CONTENT_DECODERS:
self._decoder = _get_decoder(content_encoding)
elif "," in content_encoding:
encodings = [
e.strip()
for e in content_encoding.split(",")
if e.strip() in self.CONTENT_DECODERS
]
if len(encodings):
self._decoder = _get_decoder(content_encoding)
DECODER_ERROR_CLASSES = (IOError, zlib.error)
if brotli is not None:
DECODER_ERROR_CLASSES += (brotli.error,)
def _decode(self, data, decode_content, flush_decoder):
"""
Decode the data passed in and potentially flush the decoder.
"""
if not decode_content:
return data
try:
if self._decoder:
data = self._decoder.decompress(data)
except self.DECODER_ERROR_CLASSES as e:
content_encoding = self.headers.get("content-encoding", "").lower()
raise DecodeError(
"Received response with content-encoding: %s, but "
"failed to decode it." % content_encoding,
e,
)
if flush_decoder:
data += self._flush_decoder()
return data
def _flush_decoder(self):
"""
Flushes the decoder. Should only be called if the decoder is actually
being used.
"""
if self._decoder:
buf = self._decoder.decompress(b"")
return buf + self._decoder.flush()
return b""
@contextmanager
def _error_catcher(self):
"""
Catch low-level python exceptions, instead re-raising urllib3
variants, so that low-level exceptions are not leaked in the
high-level api.
On exit, release the connection back to the pool.
"""
clean_exit = False
try:
try:
yield
except SocketTimeout:
# FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
# there is yet no clean way to get at it from this context.
raise ReadTimeoutError(self._pool, None, "Read timed out.")
except BaseSSLError as e:
# FIXME: Is there a better way to differentiate between SSLErrors?
if "read operation timed out" not in str(e): # Defensive:
# This shouldn't happen but just in case we're missing an edge
# case, let's avoid swallowing SSL errors.
raise
raise ReadTimeoutError(self._pool, None, "Read timed out.")
except (HTTPException, SocketError) as e:
# This includes IncompleteRead.
raise ProtocolError("Connection broken: %r" % e, e)
# If no exception is thrown, we should avoid cleaning up
# unnecessarily.
clean_exit = True
finally:
# If we didn't terminate cleanly, we need to throw away our
# connection.
if not clean_exit:
# The response may not be closed but we're not going to use it
# anymore so close it now to ensure that the connection is
# released back to the pool.
if self._original_response:
self._original_response.close()
# Closing the response may not actually be sufficient to close
# everything, so if we have a hold of the connection close that
# too.
if self._connection:
self._connection.close()
# If we hold the original response but it's closed now, we should
# return the connection back to the pool.
if self._original_response and self._original_response.isclosed():
self.release_conn()
def read(self, amt=None, decode_content=None, cache_content=False):
"""
Similar to :meth:`httplib.HTTPResponse.read`, but with two additional
parameters: ``decode_content`` and ``cache_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
:param cache_content:
If True, will save the returned data such that the same result is
returned despite of the state of the underlying file object. This
is useful if you want the ``.data`` property to continue working
after having ``.read()`` the file object. (Overridden if ``amt`` is
set.)
"""
self._init_decoder()
if decode_content is None:
decode_content = self.decode_content
if self._fp is None:
return
flush_decoder = False
fp_closed = getattr(self._fp, "closed", False)
with self._error_catcher():
if amt is None:
# cStringIO doesn't like amt=None
data = self._fp.read() if not fp_closed else b""
flush_decoder = True
else:
cache_content = False
data = self._fp.read(amt) if not fp_closed else b""
if (
amt != 0 and not data
): # Platform-specific: Buggy versions of Python.
# Close the connection when no data is returned
#
# This is redundant to what httplib/http.client _should_
# already do. However, versions of python released before
# December 15, 2012 (http://bugs.python.org/issue16298) do
# not properly close the connection in all cases. There is
# no harm in redundantly calling close.
self._fp.close()
flush_decoder = True
if self.enforce_content_length and self.length_remaining not in (
0,
None,
):
# This is an edge case that httplib failed to cover due
# to concerns of backward compatibility. We're
# addressing it here to make sure IncompleteRead is
# raised during streaming, so all calls with incorrect
# Content-Length are caught.
raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
if data:
self._fp_bytes_read += len(data)
if self.length_remaining is not None:
self.length_remaining -= len(data)
data = self._decode(data, decode_content, flush_decoder)
if cache_content:
self._body = data
return data
def stream(self, amt=2 ** 16, decode_content=None):
"""
A generator wrapper for the read() method. A call will block until
``amt`` bytes have been read from the connection or until the
connection is closed.
:param amt:
How much of the content to read. The generator will return up to
much data per iteration, but may return less. This is particularly
likely when using compressed data. However, the empty string will
never be returned.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
if self.chunked and self.supports_chunked_reads():
for line in self.read_chunked(amt, decode_content=decode_content):
yield line
else:
while not is_fp_closed(self._fp):
data = self.read(amt=amt, decode_content=decode_content)
if data:
yield data
@classmethod
def from_httplib(ResponseCls, r, **response_kw):
"""
Given an :class:`httplib.HTTPResponse` instance ``r``, return a
corresponding :class:`urllib3.response.HTTPResponse` object.
Remaining parameters are passed to the HTTPResponse constructor, along
with ``original_response=r``.
"""
headers = r.msg
if not isinstance(headers, HTTPHeaderDict):
if PY3:
headers = HTTPHeaderDict(headers.items())
else:
# Python 2.7
headers = HTTPHeaderDict.from_httplib(headers)
# HTTPResponse objects in Python 3 don't have a .strict attribute
strict = getattr(r, "strict", 0)
resp = ResponseCls(
body=r,
headers=headers,
status=r.status,
version=r.version,
reason=r.reason,
strict=strict,
original_response=r,
**response_kw
)
return resp
# Backwards-compatibility methods for httplib.HTTPResponse
def getheaders(self):
return self.headers
def getheader(self, name, default=None):
return self.headers.get(name, default)
# Backwards compatibility for http.cookiejar
def info(self):
return self.headers
# Overrides from io.IOBase
def close(self):
if not self.closed:
self._fp.close()
if self._connection:
self._connection.close()
if not self.auto_close:
io.IOBase.close(self)
@property
def closed(self):
if not self.auto_close:
return io.IOBase.closed.__get__(self)
elif self._fp is None:
return True
elif hasattr(self._fp, "isclosed"):
return self._fp.isclosed()
elif hasattr(self._fp, "closed"):
return self._fp.closed
else:
return True
def fileno(self):
if self._fp is None:
raise IOError("HTTPResponse has no file to get a fileno from")
elif hasattr(self._fp, "fileno"):
return self._fp.fileno()
else:
raise IOError(
"The file-like object this HTTPResponse is wrapped "
"around has no file descriptor"
)
def flush(self):
if (
self._fp is not None
and hasattr(self._fp, "flush")
and not getattr(self._fp, "closed", False)
):
return self._fp.flush()
def readable(self):
# This method is required for `io` module compatibility.
return True
def readinto(self, b):
# This method is required for `io` module compatibility.
temp = self.read(len(b))
if len(temp) == 0:
return 0
else:
b[: len(temp)] = temp
return len(temp)
def supports_chunked_reads(self):
"""
Checks if the underlying file-like object looks like a
httplib.HTTPResponse object. We do this by testing for the fp
attribute. If it is present we assume it returns raw chunks as
processed by read_chunked().
"""
return hasattr(self._fp, "fp")
def _update_chunk_length(self):
# First, we'll figure out length of a chunk and then
# we'll try to read it from socket.
if self.chunk_left is not None:
return
line = self._fp.fp.readline()
line = line.split(b";", 1)[0]
try:
self.chunk_left = int(line, 16)
except ValueError:
# Invalid chunked protocol response, abort.
self.close()
raise httplib.IncompleteRead(line)
def _handle_chunk(self, amt):
returned_chunk = None
if amt is None:
chunk = self._fp._safe_read(self.chunk_left)
returned_chunk = chunk
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
elif amt < self.chunk_left:
value = self._fp._safe_read(amt)
self.chunk_left = self.chunk_left - amt
returned_chunk = value
elif amt == self.chunk_left:
value = self._fp._safe_read(amt)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
returned_chunk = value
else: # amt > self.chunk_left
returned_chunk = self._fp._safe_read(self.chunk_left)
self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
self.chunk_left = None
return returned_chunk
def read_chunked(self, amt=None, decode_content=None):
"""
Similar to :meth:`HTTPResponse.read`, but with an additional
parameter: ``decode_content``.
:param amt:
How much of the content to read. If specified, caching is skipped
because it doesn't make sense to cache partial content as the full
response.
:param decode_content:
If True, will attempt to decode the body based on the
'content-encoding' header.
"""
self._init_decoder()
# FIXME: Rewrite this method and make it a class with a better structured logic.
if not self.chunked:
raise ResponseNotChunked(
"Response is not chunked. "
"Header 'transfer-encoding: chunked' is missing."
)
if not self.supports_chunked_reads():
raise BodyNotHttplibCompatible(
"Body should be httplib.HTTPResponse like. "
"It should have have an fp attribute which returns raw chunks."
)
with self._error_catcher():
# Don't bother reading the body of a HEAD request.
if self._original_response and is_response_to_head(self._original_response):
self._original_response.close()
return
# If a response is already read and closed
# then return immediately.
if self._fp.fp is None:
return
while True:
self._update_chunk_length()
if self.chunk_left == 0:
break
chunk = self._handle_chunk(amt)
decoded = self._decode(
chunk, decode_content=decode_content, flush_decoder=False
)
if decoded:
yield decoded
if decode_content:
# On CPython and PyPy, we should never need to flush the
# decoder. However, on Jython we *might* need to, so
# lets defensively do it anyway.
decoded = self._flush_decoder()
if decoded: # Platform-specific: Jython.
yield decoded
# Chunk content ends with \r\n: discard it.
while True:
line = self._fp.fp.readline()
if not line:
# Some sites may not end with '\r\n'.
break
if line == b"\r\n":
break
# We read everything; close the "file".
if self._original_response:
self._original_response.close()
def geturl(self):
"""
Returns the URL that was the source of this response.
If the request that generated this response redirected, this method
will return the final redirect location.
"""
if self.retries is not None and len(self.retries.history):
return self.retries.history[-1].redirect_location
else:
return self._request_url
def __iter__(self):
buffer = []
for chunk in self.stream(decode_content=True):
if b"\n" in chunk:
chunk = chunk.split(b"\n")
yield b"".join(buffer) + chunk[0] + b"\n"
for x in chunk[1:-1]:
yield x + b"\n"
if chunk[-1]:
buffer = [chunk[-1]]
else:
buffer = []
else:
buffer.append(chunk)
if buffer:
yield b"".join(buffer)
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/response.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/response.py",
"repo_id": "Django-locallibrary",
"token_count": 13079
}
| 18 |
from __future__ import absolute_import
import time
import logging
from collections import namedtuple
from itertools import takewhile
import email
import re
from ..exceptions import (
ConnectTimeoutError,
MaxRetryError,
ProtocolError,
ReadTimeoutError,
ResponseError,
InvalidHeader,
ProxyError,
)
from ..packages import six
log = logging.getLogger(__name__)
# Data structure for representing the metadata of requests that result in a retry.
RequestHistory = namedtuple(
"RequestHistory", ["method", "url", "error", "status", "redirect_location"]
)
class Retry(object):
""" Retry configuration.
Each retry attempt will create a new Retry object with updated values, so
they can be safely reused.
Retries can be defined as a default for a pool::
retries = Retry(connect=5, read=2, redirect=5)
http = PoolManager(retries=retries)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', retries=Retry(10))
Retries can be disabled by passing ``False``::
response = http.request('GET', 'http://example.com/', retries=False)
Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
retries are disabled, in which case the causing exception will be raised.
:param int total:
Total number of retries to allow. Takes precedence over other counts.
Set to ``None`` to remove this constraint and fall back on other
counts. It's a good idea to set this to some sensibly-high value to
account for unexpected edge cases and avoid infinite retry loops.
Set to ``0`` to fail on the first retry.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int connect:
How many connection-related errors to retry on.
These are errors raised before the request is sent to the remote server,
which we assume has not triggered the server to process the request.
Set to ``0`` to fail on the first retry of this type.
:param int read:
How many times to retry on read errors.
These errors are raised after the request was sent to the server, so the
request may have side-effects.
Set to ``0`` to fail on the first retry of this type.
:param int redirect:
How many redirects to perform. Limit this to avoid infinite redirect
loops.
A redirect is a HTTP response with a status code 301, 302, 303, 307 or
308.
Set to ``0`` to fail on the first retry of this type.
Set to ``False`` to disable and imply ``raise_on_redirect=False``.
:param int status:
How many times to retry on bad status codes.
These are retries made on responses, where status code matches
``status_forcelist``.
Set to ``0`` to fail on the first retry of this type.
:param iterable method_whitelist:
Set of uppercased HTTP method verbs that we should retry on.
By default, we only retry on methods which are considered to be
idempotent (multiple requests with the same parameters end with the
same state). See :attr:`Retry.DEFAULT_METHOD_WHITELIST`.
Set to a ``False`` value to retry on any verb.
:param iterable status_forcelist:
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ``method_whitelist``
and the response status code is in ``status_forcelist``.
By default, this is disabled with ``None``.
:param float backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff is disabled (set to 0).
:param bool raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:param tuple history: The history of the request encountered during
each call to :meth:`~Retry.increment`. The list is in the order
the requests occurred. Each list item is of class :class:`RequestHistory`.
:param bool respect_retry_after_header:
Whether to respect Retry-After header on status codes defined as
:attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
:param iterable remove_headers_on_redirect:
Sequence of headers to remove from the request when a response
indicating a redirect is returned before firing off the redirected
request.
"""
DEFAULT_METHOD_WHITELIST = frozenset(
["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
)
RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
DEFAULT_REDIRECT_HEADERS_BLACKLIST = frozenset(["Authorization"])
#: Maximum backoff time.
BACKOFF_MAX = 120
def __init__(
self,
total=10,
connect=None,
read=None,
redirect=None,
status=None,
method_whitelist=DEFAULT_METHOD_WHITELIST,
status_forcelist=None,
backoff_factor=0,
raise_on_redirect=True,
raise_on_status=True,
history=None,
respect_retry_after_header=True,
remove_headers_on_redirect=DEFAULT_REDIRECT_HEADERS_BLACKLIST,
):
self.total = total
self.connect = connect
self.read = read
self.status = status
if redirect is False or total is False:
redirect = 0
raise_on_redirect = False
self.redirect = redirect
self.status_forcelist = status_forcelist or set()
self.method_whitelist = method_whitelist
self.backoff_factor = backoff_factor
self.raise_on_redirect = raise_on_redirect
self.raise_on_status = raise_on_status
self.history = history or tuple()
self.respect_retry_after_header = respect_retry_after_header
self.remove_headers_on_redirect = frozenset(
[h.lower() for h in remove_headers_on_redirect]
)
def new(self, **kw):
params = dict(
total=self.total,
connect=self.connect,
read=self.read,
redirect=self.redirect,
status=self.status,
method_whitelist=self.method_whitelist,
status_forcelist=self.status_forcelist,
backoff_factor=self.backoff_factor,
raise_on_redirect=self.raise_on_redirect,
raise_on_status=self.raise_on_status,
history=self.history,
remove_headers_on_redirect=self.remove_headers_on_redirect,
respect_retry_after_header=self.respect_retry_after_header,
)
params.update(kw)
return type(self)(**params)
@classmethod
def from_int(cls, retries, redirect=True, default=None):
""" Backwards-compatibility for the old retries format."""
if retries is None:
retries = default if default is not None else cls.DEFAULT
if isinstance(retries, Retry):
return retries
redirect = bool(redirect) and None
new_retries = cls(retries, redirect=redirect)
log.debug("Converted retries value: %r -> %r", retries, new_retries)
return new_retries
def get_backoff_time(self):
""" Formula for computing the current backoff
:rtype: float
"""
# We want to consider only the last consecutive errors sequence (Ignore redirects).
consecutive_errors_len = len(
list(
takewhile(lambda x: x.redirect_location is None, reversed(self.history))
)
)
if consecutive_errors_len <= 1:
return 0
backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
return min(self.BACKOFF_MAX, backoff_value)
def parse_retry_after(self, retry_after):
# Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
if re.match(r"^\s*[0-9]+\s*$", retry_after):
seconds = int(retry_after)
else:
retry_date_tuple = email.utils.parsedate(retry_after)
if retry_date_tuple is None:
raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
retry_date = time.mktime(retry_date_tuple)
seconds = retry_date - time.time()
if seconds < 0:
seconds = 0
return seconds
def get_retry_after(self, response):
""" Get the value of Retry-After in seconds. """
retry_after = response.getheader("Retry-After")
if retry_after is None:
return None
return self.parse_retry_after(retry_after)
def sleep_for_retry(self, response=None):
retry_after = self.get_retry_after(response)
if retry_after:
time.sleep(retry_after)
return True
return False
def _sleep_backoff(self):
backoff = self.get_backoff_time()
if backoff <= 0:
return
time.sleep(backoff)
def sleep(self, response=None):
""" Sleep between retry attempts.
This method will respect a server's ``Retry-After`` response header
and sleep the duration of the time requested. If that is not present, it
will use an exponential backoff. By default, the backoff factor is 0 and
this method will return immediately.
"""
if self.respect_retry_after_header and response:
slept = self.sleep_for_retry(response)
if slept:
return
self._sleep_backoff()
def _is_connection_error(self, err):
""" Errors when we're fairly sure that the server did not receive the
request, so it should be safe to retry.
"""
if isinstance(err, ProxyError):
err = err.original_error
return isinstance(err, ConnectTimeoutError)
def _is_read_error(self, err):
""" Errors that occur after the request has been started, so we should
assume that the server began processing it.
"""
return isinstance(err, (ReadTimeoutError, ProtocolError))
def _is_method_retryable(self, method):
""" Checks if a given HTTP method should be retried upon, depending if
it is included on the method whitelist.
"""
if self.method_whitelist and method.upper() not in self.method_whitelist:
return False
return True
def is_retry(self, method, status_code, has_retry_after=False):
""" Is this method/status code retryable? (Based on whitelists and control
variables such as the number of total retries to allow, whether to
respect the Retry-After header, whether this header is present, and
whether the returned status code is on the list of status codes to
be retried upon on the presence of the aforementioned header)
"""
if not self._is_method_retryable(method):
return False
if self.status_forcelist and status_code in self.status_forcelist:
return True
return (
self.total
and self.respect_retry_after_header
and has_retry_after
and (status_code in self.RETRY_AFTER_STATUS_CODES)
)
def is_exhausted(self):
""" Are we out of retries? """
retry_counts = (self.total, self.connect, self.read, self.redirect, self.status)
retry_counts = list(filter(None, retry_counts))
if not retry_counts:
return False
return min(retry_counts) < 0
def increment(
self,
method=None,
url=None,
response=None,
error=None,
_pool=None,
_stacktrace=None,
):
""" Return a new Retry object with incremented retry counters.
:param response: A response object, or None, if the server did not
return a response.
:type response: :class:`~urllib3.response.HTTPResponse`
:param Exception error: An error encountered during the request, or
None if the response was received successfully.
:return: A new ``Retry`` object.
"""
if self.total is False and error:
# Disabled, indicate to re-raise the error.
raise six.reraise(type(error), error, _stacktrace)
total = self.total
if total is not None:
total -= 1
connect = self.connect
read = self.read
redirect = self.redirect
status_count = self.status
cause = "unknown"
status = None
redirect_location = None
if error and self._is_connection_error(error):
# Connect retry?
if connect is False:
raise six.reraise(type(error), error, _stacktrace)
elif connect is not None:
connect -= 1
elif error and self._is_read_error(error):
# Read retry?
if read is False or not self._is_method_retryable(method):
raise six.reraise(type(error), error, _stacktrace)
elif read is not None:
read -= 1
elif response and response.get_redirect_location():
# Redirect retry?
if redirect is not None:
redirect -= 1
cause = "too many redirects"
redirect_location = response.get_redirect_location()
status = response.status
else:
# Incrementing because of a server error like a 500 in
# status_forcelist and a the given method is in the whitelist
cause = ResponseError.GENERIC_ERROR
if response and response.status:
if status_count is not None:
status_count -= 1
cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
status = response.status
history = self.history + (
RequestHistory(method, url, error, status, redirect_location),
)
new_retry = self.new(
total=total,
connect=connect,
read=read,
redirect=redirect,
status=status_count,
history=history,
)
if new_retry.is_exhausted():
raise MaxRetryError(_pool, url, error or ResponseError(cause))
log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
return new_retry
def __repr__(self):
return (
"{cls.__name__}(total={self.total}, connect={self.connect}, "
"read={self.read}, redirect={self.redirect}, status={self.status})"
).format(cls=type(self), self=self)
# For backwards compatibility (equivalent to pre-v1.9):
Retry.DEFAULT = Retry(3)
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/retry.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/retry.py",
"repo_id": "Django-locallibrary",
"token_count": 6403
}
| 19 |
pip
|
Django-locallibrary/env/Lib/site-packages/setuptools-49.2.1.dist-info/INSTALLER/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools-49.2.1.dist-info/INSTALLER",
"repo_id": "Django-locallibrary",
"token_count": 3
}
| 20 |
"""distutils.command.config
Implements the Distutils 'config' command, a (mostly) empty command class
that exists mainly to be sub-classed by specific module distributions and
applications. The idea is that while every "config" command is different,
at least they're all named the same, and users always see "config" in the
list of standard commands. Also, this is a good place to put common
configure-like tasks: "try to compile this C code", or "figure out where
this header file lives".
"""
import os, re
from distutils.core import Command
from distutils.errors import DistutilsExecError
from distutils.sysconfig import customize_compiler
from distutils import log
LANG_EXT = {"c": ".c", "c++": ".cxx"}
class config(Command):
description = "prepare to build"
user_options = [
('compiler=', None,
"specify the compiler type"),
('cc=', None,
"specify the compiler executable"),
('include-dirs=', 'I',
"list of directories to search for header files"),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries"),
('noisy', None,
"show every action (compile, link, run, ...) taken"),
('dump-source', None,
"dump generated source files before attempting to compile them"),
]
# The three standard command methods: since the "config" command
# does nothing by default, these are empty.
def initialize_options(self):
self.compiler = None
self.cc = None
self.include_dirs = None
self.libraries = None
self.library_dirs = None
# maximal output for now
self.noisy = 1
self.dump_source = 1
# list of temporary files generated along-the-way that we have
# to clean at some point
self.temp_files = []
def finalize_options(self):
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
elif isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
if self.libraries is None:
self.libraries = []
elif isinstance(self.libraries, str):
self.libraries = [self.libraries]
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
def run(self):
pass
# Utility methods for actual "config" commands. The interfaces are
# loosely based on Autoconf macros of similar names. Sub-classes
# may use these freely.
def _check_compiler(self):
"""Check that 'self.compiler' really is a CCompiler object;
if not, make it one.
"""
# We do this late, and only on-demand, because this is an expensive
# import.
from distutils.ccompiler import CCompiler, new_compiler
if not isinstance(self.compiler, CCompiler):
self.compiler = new_compiler(compiler=self.compiler,
dry_run=self.dry_run, force=1)
customize_compiler(self.compiler)
if self.include_dirs:
self.compiler.set_include_dirs(self.include_dirs)
if self.libraries:
self.compiler.set_libraries(self.libraries)
if self.library_dirs:
self.compiler.set_library_dirs(self.library_dirs)
def _gen_temp_sourcefile(self, body, headers, lang):
filename = "_configtest" + LANG_EXT[lang]
with open(filename, "w") as file:
if headers:
for header in headers:
file.write("#include <%s>\n" % header)
file.write("\n")
file.write(body)
if body[-1] != "\n":
file.write("\n")
return filename
def _preprocess(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
out = "_configtest.i"
self.temp_files.extend([src, out])
self.compiler.preprocess(src, out, include_dirs=include_dirs)
return (src, out)
def _compile(self, body, headers, include_dirs, lang):
src = self._gen_temp_sourcefile(body, headers, lang)
if self.dump_source:
dump_file(src, "compiling '%s':" % src)
(obj,) = self.compiler.object_filenames([src])
self.temp_files.extend([src, obj])
self.compiler.compile([src], include_dirs=include_dirs)
return (src, obj)
def _link(self, body, headers, include_dirs, libraries, library_dirs,
lang):
(src, obj) = self._compile(body, headers, include_dirs, lang)
prog = os.path.splitext(os.path.basename(src))[0]
self.compiler.link_executable([obj], prog,
libraries=libraries,
library_dirs=library_dirs,
target_lang=lang)
if self.compiler.exe_extension is not None:
prog = prog + self.compiler.exe_extension
self.temp_files.append(prog)
return (src, obj, prog)
def _clean(self, *filenames):
if not filenames:
filenames = self.temp_files
self.temp_files = []
log.info("removing: %s", ' '.join(filenames))
for filename in filenames:
try:
os.remove(filename)
except OSError:
pass
# XXX these ignore the dry-run flag: what to do, what to do? even if
# you want a dry-run build, you still need some sort of configuration
# info. My inclination is to make it up to the real config command to
# consult 'dry_run', and assume a default (minimal) configuration if
# true. The problem with trying to do it here is that you'd have to
# return either true or false from all the 'try' methods, neither of
# which is correct.
# XXX need access to the header search path and maybe default macros.
def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
"""Construct a source file from 'body' (a string containing lines
of C/C++ code) and 'headers' (a list of header files to include)
and run it through the preprocessor. Return true if the
preprocessor succeeded, false if there were any errors.
('body' probably isn't of much use, but what the heck.)
"""
from distutils.ccompiler import CompileError
self._check_compiler()
ok = True
try:
self._preprocess(body, headers, include_dirs, lang)
except CompileError:
ok = False
self._clean()
return ok
def search_cpp(self, pattern, body=None, headers=None, include_dirs=None,
lang="c"):
"""Construct a source file (just like 'try_cpp()'), run it through
the preprocessor, and return true if any line of the output matches
'pattern'. 'pattern' should either be a compiled regex object or a
string containing a regex. If both 'body' and 'headers' are None,
preprocesses an empty file -- which can be useful to determine the
symbols the preprocessor and compiler set by default.
"""
self._check_compiler()
src, out = self._preprocess(body, headers, include_dirs, lang)
if isinstance(pattern, str):
pattern = re.compile(pattern)
with open(out) as file:
match = False
while True:
line = file.readline()
if line == '':
break
if pattern.search(line):
match = True
break
self._clean()
return match
def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
"""Try to compile a source file built from 'body' and 'headers'.
Return true on success, false otherwise.
"""
from distutils.ccompiler import CompileError
self._check_compiler()
try:
self._compile(body, headers, include_dirs, lang)
ok = True
except CompileError:
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_link(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile and link a source file, built from 'body' and
'headers', to executable form. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
ok = True
except (CompileError, LinkError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
def try_run(self, body, headers=None, include_dirs=None, libraries=None,
library_dirs=None, lang="c"):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Return true on success, false
otherwise.
"""
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
self.spawn([exe])
ok = True
except (CompileError, LinkError, DistutilsExecError):
ok = False
log.info(ok and "success!" or "failure.")
self._clean()
return ok
# -- High-level methods --------------------------------------------
# (these are the ones that are actually likely to be useful
# when implementing a real-world config command!)
def check_func(self, func, headers=None, include_dirs=None,
libraries=None, library_dirs=None, decl=0, call=0):
"""Determine if function 'func' is available by constructing a
source file that refers to 'func', and compiles and links it.
If everything succeeds, returns true; otherwise returns false.
The constructed source file starts out by including the header
files listed in 'headers'. If 'decl' is true, it then declares
'func' (as "int func()"); you probably shouldn't supply 'headers'
and set 'decl' true in the same call, or you might get errors about
a conflicting declarations for 'func'. Finally, the constructed
'main()' function either references 'func' or (if 'call' is true)
calls it. 'libraries' and 'library_dirs' are used when
linking.
"""
self._check_compiler()
body = []
if decl:
body.append("int %s ();" % func)
body.append("int main () {")
if call:
body.append(" %s();" % func)
else:
body.append(" %s;" % func)
body.append("}")
body = "\n".join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_lib(self, library, library_dirs=None, headers=None,
include_dirs=None, other_libraries=[]):
"""Determine if 'library' is available to be linked against,
without actually checking that any particular symbols are provided
by it. 'headers' will be used in constructing the source file to
be compiled, but the only effect of this is to check if all the
header files listed are available. Any libraries listed in
'other_libraries' will be included in the link, in case 'library'
has symbols that depend on other libraries.
"""
self._check_compiler()
return self.try_link("int main (void) { }", headers, include_dirs,
[library] + other_libraries, library_dirs)
def check_header(self, header, include_dirs=None, library_dirs=None,
lang="c"):
"""Determine if the system header file named by 'header_file'
exists and can be found by the preprocessor; return true if so,
false otherwise.
"""
return self.try_cpp(body="/* No body */", headers=[header],
include_dirs=include_dirs)
def dump_file(filename, head=None):
"""Dumps a file content into log.info.
If head is not None, will be dumped before the file content.
"""
if head is None:
log.info('%s', filename)
else:
log.info(head)
file = open(filename)
try:
log.info(file.read())
finally:
file.close()
|
Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/config.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/config.py",
"repo_id": "Django-locallibrary",
"token_count": 5634
}
| 21 |
"""distutils.dist
Provides the Distribution class, which represents the module distribution
being built/installed/distributed.
"""
import sys
import os
import re
from email import message_from_file
try:
import warnings
except ImportError:
warnings = None
from distutils.errors import *
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
# Regex to define acceptable Distutils command names. This is not *quite*
# the same as a Python NAME -- I don't allow leading underscores. The fact
# that they're very similar is no coincidence; the default naming scheme is
# to look for a Python module named after the command.
command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
def _ensure_list(value, fieldname):
if isinstance(value, str):
# a string containing comma separated values is okay. It will
# be converted to a list by Distribution.finalize_options().
pass
elif not isinstance(value, list):
# passing a tuple or an iterator perhaps, warn and convert
typename = type(value).__name__
msg = "Warning: '{fieldname}' should be a list, got type '{typename}'"
msg = msg.format(**locals())
log.log(log.WARN, msg)
value = list(value)
return value
class Distribution:
"""The core of the Distutils. Most of the work hiding behind 'setup'
is really done within a Distribution instance, which farms the work out
to the Distutils commands specified on the command line.
Setup scripts will almost never instantiate Distribution directly,
unless the 'setup()' function is totally inadequate to their needs.
However, it is conceivable that a setup script might wish to subclass
Distribution for some specialized purpose, and then pass the subclass
to 'setup()' as the 'distclass' keyword argument. If so, it is
necessary to respect the expectations that 'setup' has of Distribution.
See the code for 'setup()', in core.py, for details.
"""
# 'global_options' describes the command-line options that may be
# supplied to the setup script prior to any actual commands.
# Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
# these global options. This list should be kept to a bare minimum,
# since every global option is also valid as a command option -- and we
# don't want to pollute the commands with too many options that they
# have minimal control over.
# The fourth entry for verbose means that it can be repeated.
global_options = [
('verbose', 'v', "run verbosely (default)", 1),
('quiet', 'q', "run quietly (turns verbosity off)"),
('dry-run', 'n', "don't actually do anything"),
('help', 'h', "show detailed help message"),
('no-user-cfg', None,
'ignore pydistutils.cfg in your home directory'),
]
# 'common_usage' is a short (2-3 line) string describing the common
# usage of the setup script.
common_usage = """\
Common commands: (see '--help-commands' for more)
setup.py build will build the package underneath 'build/'
setup.py install will install the package
"""
# options that are not propagated to the commands
display_options = [
('help-commands', None,
"list all available commands"),
('name', None,
"print package name"),
('version', 'V',
"print package version"),
('fullname', None,
"print <package name>-<version>"),
('author', None,
"print the author's name"),
('author-email', None,
"print the author's email address"),
('maintainer', None,
"print the maintainer's name"),
('maintainer-email', None,
"print the maintainer's email address"),
('contact', None,
"print the maintainer's name if known, else the author's"),
('contact-email', None,
"print the maintainer's email address if known, else the author's"),
('url', None,
"print the URL for this package"),
('license', None,
"print the license of the package"),
('licence', None,
"alias for --license"),
('description', None,
"print the package description"),
('long-description', None,
"print the long package description"),
('platforms', None,
"print the list of platforms"),
('classifiers', None,
"print the list of classifiers"),
('keywords', None,
"print the list of keywords"),
('provides', None,
"print the list of packages/modules provided"),
('requires', None,
"print the list of packages/modules required"),
('obsoletes', None,
"print the list of packages/modules made obsolete")
]
display_option_names = [translate_longopt(x[0]) for x in display_options]
# negative options are options that exclude other options
negative_opt = {'quiet': 'verbose'}
# -- Creation/initialization methods -------------------------------
def __init__(self, attrs=None):
"""Construct a new Distribution instance: initialize all the
attributes of a Distribution, and then use 'attrs' (a dictionary
mapping attribute names to values) to assign some of those
attributes their "real" values. (Any attributes not mentioned in
'attrs' will be assigned to some null value: 0, None, an empty list
or dictionary, etc.) Most importantly, initialize the
'command_obj' attribute to the empty dictionary; this will be
filled in with real command objects by 'parse_command_line()'.
"""
# Default values for our command-line options
self.verbose = 1
self.dry_run = 0
self.help = 0
for attr in self.display_option_names:
setattr(self, attr, 0)
# Store the distribution meta-data (name, version, author, and so
# forth) in a separate object -- we're getting to have enough
# information here (and enough command-line options) that it's
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
# object in a sneaky and underhanded (but efficient!) way.
self.metadata = DistributionMetadata()
for basename in self.metadata._METHOD_BASENAMES:
method_name = "get_" + basename
setattr(self, method_name, getattr(self.metadata, method_name))
# 'cmdclass' maps command names to class objects, so we
# can 1) quickly figure out which class to instantiate when
# we need to create a new command object, and 2) have a way
# for the setup script to override command classes
self.cmdclass = {}
# 'command_packages' is a list of packages in which commands
# are searched for. The factory for command 'foo' is expected
# to be named 'foo' in the module 'foo' in one of the packages
# named here. This list is searched from the left; an error
# is raised if no named package provides the command being
# searched for. (Always access using get_command_packages().)
self.command_packages = None
# 'script_name' and 'script_args' are usually set to sys.argv[0]
# and sys.argv[1:], but they can be overridden when the caller is
# not necessarily a setup script run from the command-line.
self.script_name = None
self.script_args = None
# 'command_options' is where we store command options between
# parsing them (from config files, the command-line, etc.) and when
# they are actually needed -- ie. when the command in question is
# instantiated. It is a dictionary of dictionaries of 2-tuples:
# command_options = { command_name : { option : (source, value) } }
self.command_options = {}
# 'dist_files' is the list of (command, pyversion, file) that
# have been created by any dist commands run so far. This is
# filled regardless of whether the run is dry or not. pyversion
# gives sysconfig.get_python_version() if the dist file is
# specific to a Python version, 'any' if it is good for all
# Python versions on the target platform, and '' for a source
# file. pyversion should not be used to specify minimum or
# maximum required Python versions; use the metainfo for that
# instead.
self.dist_files = []
# These options are really the business of various commands, rather
# than of the Distribution itself. We provide aliases for them in
# Distribution as a convenience to the developer.
self.packages = None
self.package_data = {}
self.package_dir = None
self.py_modules = None
self.libraries = None
self.headers = None
self.ext_modules = None
self.ext_package = None
self.include_dirs = None
self.extra_path = None
self.scripts = None
self.data_files = None
self.password = ''
# And now initialize bookkeeping stuff that can't be supplied by
# the caller at all. 'command_obj' maps command names to
# Command instances -- that's how we enforce that every command
# class is a singleton.
self.command_obj = {}
# 'have_run' maps command names to boolean values; it keeps track
# of whether we have actually run a particular command, to make it
# cheap to "run" a command whenever we think we might need to -- if
# it's already been done, no need for expensive filesystem
# operations, we just check the 'have_run' dictionary and carry on.
# It's only safe to query 'have_run' for a command class that has
# been instantiated -- a false value will be inserted when the
# command object is created, and replaced with a true value when
# the command is successfully run. Thus it's probably best to use
# '.get()' rather than a straight lookup.
self.have_run = {}
# Now we'll use the attrs dictionary (ultimately, keyword args from
# the setup script) to possibly override any or all of these
# distribution options.
if attrs:
# Pull out the set of command options and work on them
# specifically. Note that this order guarantees that aliased
# command options will override any supplied redundantly
# through the general options dictionary.
options = attrs.get('options')
if options is not None:
del attrs['options']
for (command, cmd_options) in options.items():
opt_dict = self.get_option_dict(command)
for (opt, val) in cmd_options.items():
opt_dict[opt] = ("setup script", val)
if 'licence' in attrs:
attrs['license'] = attrs['licence']
del attrs['licence']
msg = "'licence' distribution option is deprecated; use 'license'"
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# Now work on the rest of the attributes. Any attribute that's
# not already defined is invalid!
for (key, val) in attrs.items():
if hasattr(self.metadata, "set_" + key):
getattr(self.metadata, "set_" + key)(val)
elif hasattr(self.metadata, key):
setattr(self.metadata, key, val)
elif hasattr(self, key):
setattr(self, key, val)
else:
msg = "Unknown distribution option: %s" % repr(key)
warnings.warn(msg)
# no-user-cfg is handled before other command line args
# because other args override the config files, and this
# one is needed before we can load the config files.
# If attrs['script_args'] wasn't passed, assume false.
#
# This also make sure we just look at the global options
self.want_user_cfg = True
if self.script_args is not None:
for arg in self.script_args:
if not arg.startswith('-'):
break
if arg == '--no-user-cfg':
self.want_user_cfg = False
break
self.finalize_options()
def get_option_dict(self, command):
"""Get the option dictionary for a given command. If that
command's option dictionary hasn't been created yet, then create it
and return the new dictionary; otherwise, return the existing
option dictionary.
"""
dict = self.command_options.get(command)
if dict is None:
dict = self.command_options[command] = {}
return dict
def dump_option_dicts(self, header=None, commands=None, indent=""):
from pprint import pformat
if commands is None: # dump all command option dicts
commands = sorted(self.command_options.keys())
if header is not None:
self.announce(indent + header)
indent = indent + " "
if not commands:
self.announce(indent + "no commands known yet")
return
for cmd_name in commands:
opt_dict = self.command_options.get(cmd_name)
if opt_dict is None:
self.announce(indent +
"no option dict for '%s' command" % cmd_name)
else:
self.announce(indent +
"option dict for '%s' command:" % cmd_name)
out = pformat(opt_dict)
for line in out.split('\n'):
self.announce(indent + " " + line)
# -- Config file finding/parsing methods ---------------------------
def find_config_files(self):
"""Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac; and setup.cfg in the current directory.
The file in the user's home directory can be disabled with the
--no-user-cfg option.
"""
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
if self.want_user_cfg:
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
if DEBUG:
self.announce("using config files: %s" % ', '.join(files))
return files
def parse_config_files(self, filenames=None):
from configparser import ConfigParser
# Ignore install directory options if we have a venv
if sys.prefix != sys.base_prefix:
ignore_options = [
'install-base', 'install-platbase', 'install-lib',
'install-platlib', 'install-purelib', 'install-headers',
'install-scripts', 'install-data', 'prefix', 'exec-prefix',
'home', 'user', 'root']
else:
ignore_options = []
ignore_options = frozenset(ignore_options)
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__' and opt not in ignore_options:
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
# -- Command-line parsing methods ----------------------------------
def parse_command_line(self):
"""Parse the setup script's command line, taken from the
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
-- see 'setup()' in core.py). This list is first processed for
"global options" -- options that set attributes of the Distribution
instance. Then, it is alternately scanned for Distutils commands
and options for that command. Each new command terminates the
options for the previous command. The allowed options for a
command are determined by the 'user_options' attribute of the
command class -- thus, we have to be able to load command classes
in order to parse the command line. Any error in that 'options'
attribute raises DistutilsGetoptError; any error on the
command-line raises DistutilsArgError. If no Distutils commands
were found on the command line, raises DistutilsArgError. Return
true if command-line was successfully parsed and we should carry
on with executing commands; false if no errors but we shouldn't
execute commands (currently, this only happens if user asks for
help).
"""
#
# We now have enough information to show the Macintosh dialog
# that allows the user to interactively specify the "command line".
#
toplevel_options = self._get_toplevel_options()
# We have to parse the command line a bit at a time -- global
# options, then the first command, then its options, and so on --
# because each command will be handled by a different class, and
# the options that are valid for a particular class aren't known
# until we have loaded the command class, which doesn't happen
# until we know what the command is.
self.commands = []
parser = FancyGetopt(toplevel_options + self.display_options)
parser.set_negative_aliases(self.negative_opt)
parser.set_aliases({'licence': 'license'})
args = parser.getopt(args=self.script_args, object=self)
option_order = parser.get_option_order()
log.set_verbosity(self.verbose)
# for display options we return immediately
if self.handle_display_options(option_order):
return
while args:
args = self._parse_command_opts(parser, args)
if args is None: # user asked for help (and got it)
return
# Handle the cases of --help as a "global" option, ie.
# "setup.py --help" and "setup.py --help command ...". For the
# former, we show global options (--verbose, --dry-run, etc.)
# and display-only options (--name, --version, etc.); for the
# latter, we omit the display-only options and show help for
# each command listed on the command line.
if self.help:
self._show_help(parser,
display_options=len(self.commands) == 0,
commands=self.commands)
return
# Oops, no commands found -- an end-user error
if not self.commands:
raise DistutilsArgError("no commands supplied")
# All is well: return true
return True
def _get_toplevel_options(self):
"""Return the non-display options recognized at the top level.
This includes options that are recognized *only* at the top
level as well as options recognized for commands.
"""
return self.global_options + [
("command-packages=", None,
"list of packages that provide distutils commands"),
]
def _parse_command_opts(self, parser, args):
"""Parse the command-line options for a single command.
'parser' must be a FancyGetopt instance; 'args' must be the list
of arguments, starting with the current command (whose options
we are about to parse). Returns a new version of 'args' with
the next command at the front of the list; will be the empty
list if there are no more commands on the command line. Returns
None if the user asked for help on this command.
"""
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
raise SystemExit("invalid command name '%s'" % command)
self.commands.append(command)
# Dig up the command class that implements this command, so we
# 1) know that it's a valid command, and 2) know which options
# it takes.
try:
cmd_class = self.get_command_class(command)
except DistutilsModuleError as msg:
raise DistutilsArgError(msg)
# Require that the command class be derived from Command -- want
# to be sure that the basic "command" interface is implemented.
if not issubclass(cmd_class, Command):
raise DistutilsClassError(
"command class %s must subclass Command" % cmd_class)
# Also make sure that the command object provides a list of its
# known options.
if not (hasattr(cmd_class, 'user_options') and
isinstance(cmd_class.user_options, list)):
msg = ("command class %s must provide "
"'user_options' attribute (a list of tuples)")
raise DistutilsClassError(msg % cmd_class)
# If the command class has a list of negative alias options,
# merge it in with the global negative aliases.
negative_opt = self.negative_opt
if hasattr(cmd_class, 'negative_opt'):
negative_opt = negative_opt.copy()
negative_opt.update(cmd_class.negative_opt)
# Check for help_options in command class. They have a different
# format (tuple of four) so we need to preprocess them here.
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_options = fix_help_options(cmd_class.help_options)
else:
help_options = []
# All commands support the global options too, just by adding
# in 'global_options'.
parser.set_option_table(self.global_options +
cmd_class.user_options +
help_options)
parser.set_negative_aliases(negative_opt)
(args, opts) = parser.getopt(args[1:])
if hasattr(opts, 'help') and opts.help:
self._show_help(parser, display_options=0, commands=[cmd_class])
return
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_option_found=0
for (help_option, short, desc, func) in cmd_class.help_options:
if hasattr(opts, parser.get_attr_name(help_option)):
help_option_found=1
if callable(func):
func()
else:
raise DistutilsClassError(
"invalid help function %r for help option '%s': "
"must be a callable object (function, etc.)"
% (func, help_option))
if help_option_found:
return
# Put the options from the command-line into their official
# holding pen, the 'command_options' dictionary.
opt_dict = self.get_option_dict(command)
for (name, value) in vars(opts).items():
opt_dict[name] = ("command line", value)
return args
def finalize_options(self):
"""Set final values for all the options on the Distribution
instance, analogous to the .finalize_options() method of Command
objects.
"""
for attr in ('keywords', 'platforms'):
value = getattr(self.metadata, attr)
if value is None:
continue
if isinstance(value, str):
value = [elm.strip() for elm in value.split(',')]
setattr(self.metadata, attr, value)
def _show_help(self, parser, global_options=1, display_options=1,
commands=[]):
"""Show help for the setup script command-line in the form of
several lists of command-line options. 'parser' should be a
FancyGetopt instance; do not expect it to be returned in the
same state, as its option table will be reset to make it
generate the correct help text.
If 'global_options' is true, lists the global options:
--verbose, --dry-run, etc. If 'display_options' is true, lists
the "display-only" options: --name, --version, etc. Finally,
lists per-command help for every command name or command class
in 'commands'.
"""
# late import because of mutual dependence between these modules
from distutils.core import gen_usage
from distutils.cmd import Command
if global_options:
if display_options:
options = self._get_toplevel_options()
else:
options = self.global_options
parser.set_option_table(options)
parser.print_help(self.common_usage + "\nGlobal options:")
print('')
if display_options:
parser.set_option_table(self.display_options)
parser.print_help(
"Information display options (just display " +
"information, ignore any commands)")
print('')
for command in self.commands:
if isinstance(command, type) and issubclass(command, Command):
klass = command
else:
klass = self.get_command_class(command)
if (hasattr(klass, 'help_options') and
isinstance(klass.help_options, list)):
parser.set_option_table(klass.user_options +
fix_help_options(klass.help_options))
else:
parser.set_option_table(klass.user_options)
parser.print_help("Options for '%s' command:" % klass.__name__)
print('')
print(gen_usage(self.script_name))
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
from distutils.core import gen_usage
# User just wants a list of commands -- we'll print it out and stop
# processing now (ie. if they ran "setup --help-commands foo bar",
# we ignore "foo bar").
if self.help_commands:
self.print_commands()
print('')
print(gen_usage(self.script_name))
return 1
# If user supplied any of the "display metadata" options, then
# display that metadata in the order in which the user supplied the
# metadata options.
any_display_options = 0
is_display_option = {}
for option in self.display_options:
is_display_option[option[0]] = 1
for (opt, val) in option_order:
if val and is_display_option.get(opt):
opt = translate_longopt(opt)
value = getattr(self.metadata, "get_"+opt)()
if opt in ['keywords', 'platforms']:
print(','.join(value))
elif opt in ('classifiers', 'provides', 'requires',
'obsoletes'):
print('\n'.join(value))
else:
print(value)
any_display_options = 1
return any_display_options
def print_command_list(self, commands, header, max_length):
"""Print a subset of the list of all commands -- used by
'print_commands()'.
"""
print(header + ":")
for cmd in commands:
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
print(" %-*s %s" % (max_length, cmd, description))
def print_commands(self):
"""Print out a help message listing all available commands with a
description of each. The list is divided into "standard commands"
(listed in distutils.command.__all__) and "extra commands"
(mentioned in self.cmdclass, but not a standard command). The
descriptions come from the command class attribute
'description'.
"""
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
max_length = 0
for cmd in (std_commands + extra_commands):
if len(cmd) > max_length:
max_length = len(cmd)
self.print_command_list(std_commands,
"Standard commands",
max_length)
if extra_commands:
print()
self.print_command_list(extra_commands,
"Extra commands",
max_length)
def get_command_list(self):
"""Get a list of (command, description) tuples.
The list is divided into "standard commands" (listed in
distutils.command.__all__) and "extra commands" (mentioned in
self.cmdclass, but not a standard command). The descriptions come
from the command class attribute 'description'.
"""
# Currently this is only used on Mac OS, for the Mac-only GUI
# Distutils interface (by Jack Jansen)
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
rv = []
for cmd in (std_commands + extra_commands):
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
rv.append((cmd, description))
return rv
# -- Command class/object methods ----------------------------------
def get_command_packages(self):
"""Return a list of packages from which commands are loaded."""
pkgs = self.command_packages
if not isinstance(pkgs, list):
if pkgs is None:
pkgs = ''
pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
if "distutils.command" not in pkgs:
pkgs.insert(0, "distutils.command")
self.command_packages = pkgs
return pkgs
def get_command_class(self, command):
"""Return the class that implements the Distutils command named by
'command'. First we check the 'cmdclass' dictionary; if the
command is mentioned there, we fetch the class object from the
dictionary and return it. Otherwise we load the command module
("distutils.command." + command) and fetch the command class from
the module. The loaded class is also stored in 'cmdclass'
to speed future calls to 'get_command_class()'.
Raises DistutilsModuleError if the expected module could not be
found, or if that module does not define the expected class.
"""
klass = self.cmdclass.get(command)
if klass:
return klass
for pkgname in self.get_command_packages():
module_name = "%s.%s" % (pkgname, command)
klass_name = command
try:
__import__(module_name)
module = sys.modules[module_name]
except ImportError:
continue
try:
klass = getattr(module, klass_name)
except AttributeError:
raise DistutilsModuleError(
"invalid command '%s' (no class '%s' in module '%s')"
% (command, klass_name, module_name))
self.cmdclass[command] = klass
return klass
raise DistutilsModuleError("invalid command '%s'" % command)
def get_command_obj(self, command, create=1):
"""Return the command object for 'command'. Normally this object
is cached on a previous call to 'get_command_obj()'; if no command
object for 'command' is in the cache, then we either create and
return it (if 'create' is true) or return None.
"""
cmd_obj = self.command_obj.get(command)
if not cmd_obj and create:
if DEBUG:
self.announce("Distribution.get_command_obj(): "
"creating '%s' command object" % command)
klass = self.get_command_class(command)
cmd_obj = self.command_obj[command] = klass(self)
self.have_run[command] = 0
# Set any options that were supplied in config files
# or on the command line. (NB. support for error
# reporting is lame here: any errors aren't reported
# until 'finalize_options()' is called, which means
# we won't report the source of the error.)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj
def _set_command_options(self, command_obj, option_dict=None):
"""Set the options for 'command_obj' from 'option_dict'. Basically
this means copying elements of a dictionary ('option_dict') to
attributes of an instance ('command').
'command_obj' must be a Command instance. If 'option_dict' is not
supplied, uses the standard option dictionary for this command
(from 'self.command_options').
"""
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG:
self.announce(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG:
self.announce(" %s = %s (from %s)" % (option, value,
source))
try:
bool_opts = [translate_longopt(o)
for o in command_obj.boolean_options]
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, str)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError(
"error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError as msg:
raise DistutilsOptionError(msg)
def reinitialize_command(self, command, reinit_subcommands=0):
"""Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
'command' should be a command name (string) or command object. If
'reinit_subcommands' is true, also reinitializes the command's
sub-commands, as declared by the 'sub_commands' class attribute (if
it has one). See the "install" command for an example. Only
reinitializes the sub-commands that actually matter, ie. those
whose test predicates return true.
Returns the reinitialized command object.
"""
from distutils.cmd import Command
if not isinstance(command, Command):
command_name = command
command = self.get_command_obj(command_name)
else:
command_name = command.get_command_name()
if not command.finalized:
return command
command.initialize_options()
command.finalized = 0
self.have_run[command_name] = 0
self._set_command_options(command)
if reinit_subcommands:
for sub in command.get_sub_commands():
self.reinitialize_command(sub, reinit_subcommands)
return command
# -- Methods that operate on the Distribution ----------------------
def announce(self, msg, level=log.INFO):
log.log(level, msg)
def run_commands(self):
"""Run each command that was seen on the setup script command line.
Uses the list of commands found and cache of command objects
created by 'get_command_obj()'.
"""
for cmd in self.commands:
self.run_command(cmd)
# -- Methods that operate on its Commands --------------------------
def run_command(self, command):
"""Do whatever it takes to run a command (including nothing at all,
if the command has already been run). Specifically: if we have
already created and run the command named by 'command', return
silently without doing anything. If the command named by 'command'
doesn't even have a command object yet, create one. Then invoke
'run()' on that command object (or an existing one).
"""
# Already been here, done that? then return silently.
if self.have_run.get(command):
return
log.info("running %s", command)
cmd_obj = self.get_command_obj(command)
cmd_obj.ensure_finalized()
cmd_obj.run()
self.have_run[command] = 1
# -- Distribution query methods ------------------------------------
def has_pure_modules(self):
return len(self.packages or self.py_modules or []) > 0
def has_ext_modules(self):
return self.ext_modules and len(self.ext_modules) > 0
def has_c_libraries(self):
return self.libraries and len(self.libraries) > 0
def has_modules(self):
return self.has_pure_modules() or self.has_ext_modules()
def has_headers(self):
return self.headers and len(self.headers) > 0
def has_scripts(self):
return self.scripts and len(self.scripts) > 0
def has_data_files(self):
return self.data_files and len(self.data_files) > 0
def is_pure(self):
return (self.has_pure_modules() and
not self.has_ext_modules() and
not self.has_c_libraries())
# -- Metadata query methods ----------------------------------------
# If you're looking for 'get_name()', 'get_version()', and so forth,
# they are defined in a sneaky way: the constructor binds self.get_XXX
# to self.metadata.get_XXX. The actual code is in the
# DistributionMetadata class, below.
class DistributionMetadata:
"""Dummy class to hold the distribution meta-data: name, version,
author, and so forth.
"""
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email", "classifiers", "download_url",
# PEP 314
"provides", "requires", "obsoletes",
)
def __init__(self, path=None):
if path is not None:
self.read_pkg_file(open(path))
else:
self.name = None
self.version = None
self.author = None
self.author_email = None
self.maintainer = None
self.maintainer_email = None
self.url = None
self.license = None
self.description = None
self.long_description = None
self.keywords = None
self.platforms = None
self.classifiers = None
self.download_url = None
# PEP 314
self.provides = None
self.requires = None
self.obsoletes = None
def read_pkg_file(self, file):
"""Reads the metadata values from a file object."""
msg = message_from_file(file)
def _read_field(name):
value = msg[name]
if value == 'UNKNOWN':
return None
return value
def _read_list(name):
values = msg.get_all(name, None)
if values == []:
return None
return values
metadata_version = msg['metadata-version']
self.name = _read_field('name')
self.version = _read_field('version')
self.description = _read_field('summary')
# we are filling author only.
self.author = _read_field('author')
self.maintainer = None
self.author_email = _read_field('author-email')
self.maintainer_email = None
self.url = _read_field('home-page')
self.license = _read_field('license')
if 'download-url' in msg:
self.download_url = _read_field('download-url')
else:
self.download_url = None
self.long_description = _read_field('description')
self.description = _read_field('summary')
if 'keywords' in msg:
self.keywords = _read_field('keywords').split(',')
self.platforms = _read_list('platform')
self.classifiers = _read_list('classifier')
# PEP 314 - these fields only exist in 1.1
if metadata_version == '1.1':
self.requires = _read_list('requires')
self.provides = _read_list('provides')
self.obsoletes = _read_list('obsoletes')
else:
self.requires = None
self.provides = None
self.obsoletes = None
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
encoding='UTF-8') as pkg_info:
self.write_pkg_file(pkg_info)
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if (self.provides or self.requires or self.obsoletes or
self.classifiers or self.download_url):
version = '1.1'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name())
file.write('Version: %s\n' % self.get_version())
file.write('Summary: %s\n' % self.get_description())
file.write('Home-page: %s\n' % self.get_url())
file.write('Author: %s\n' % self.get_contact())
file.write('Author-email: %s\n' % self.get_contact_email())
file.write('License: %s\n' % self.get_license())
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape(self.get_long_description())
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords)
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
def _write_list(self, file, name, values):
for value in values:
file.write('%s: %s\n' % (name, value))
# -- Metadata query methods ----------------------------------------
def get_name(self):
return self.name or "UNKNOWN"
def get_version(self):
return self.version or "0.0.0"
def get_fullname(self):
return "%s-%s" % (self.get_name(), self.get_version())
def get_author(self):
return self.author or "UNKNOWN"
def get_author_email(self):
return self.author_email or "UNKNOWN"
def get_maintainer(self):
return self.maintainer or "UNKNOWN"
def get_maintainer_email(self):
return self.maintainer_email or "UNKNOWN"
def get_contact(self):
return self.maintainer or self.author or "UNKNOWN"
def get_contact_email(self):
return self.maintainer_email or self.author_email or "UNKNOWN"
def get_url(self):
return self.url or "UNKNOWN"
def get_license(self):
return self.license or "UNKNOWN"
get_licence = get_license
def get_description(self):
return self.description or "UNKNOWN"
def get_long_description(self):
return self.long_description or "UNKNOWN"
def get_keywords(self):
return self.keywords or []
def set_keywords(self, value):
self.keywords = _ensure_list(value, 'keywords')
def get_platforms(self):
return self.platforms or ["UNKNOWN"]
def set_platforms(self, value):
self.platforms = _ensure_list(value, 'platforms')
def get_classifiers(self):
return self.classifiers or []
def set_classifiers(self, value):
self.classifiers = _ensure_list(value, 'classifiers')
def get_download_url(self):
return self.download_url or "UNKNOWN"
# PEP 314
def get_requires(self):
return self.requires or []
def set_requires(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.requires = list(value)
def get_provides(self):
return self.provides or []
def set_provides(self, value):
value = [v.strip() for v in value]
for v in value:
import distutils.versionpredicate
distutils.versionpredicate.split_provision(v)
self.provides = value
def get_obsoletes(self):
return self.obsoletes or []
def set_obsoletes(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.obsoletes = list(value)
def fix_help_options(options):
"""Convert a 4-tuple 'help_options' list as found in various command
classes to the 3-tuple form required by FancyGetopt.
"""
new_options = []
for help_tuple in options:
new_options.append(help_tuple[0:3])
return new_options
|
Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/dist.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/dist.py",
"repo_id": "Django-locallibrary",
"token_count": 21364
}
| 22 |
"""
Re-implementation of find_module and get_frozen_object
from the deprecated imp module.
"""
import os
import importlib.util
import importlib.machinery
from .py34compat import module_from_spec
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
C_BUILTIN = 6
PY_FROZEN = 7
def find_spec(module, paths):
finder = (
importlib.machinery.PathFinder().find_spec
if isinstance(paths, list) else
importlib.util.find_spec
)
return finder(module, paths)
def find_module(module, paths=None):
"""Just like 'imp.find_module()', but with package support"""
spec = find_spec(module, paths)
if spec is None:
raise ImportError("Can't find %s" % module)
if not spec.has_location and hasattr(spec, 'submodule_search_locations'):
spec = importlib.util.spec_from_loader('__init__.py', spec.loader)
kind = -1
file = None
static = isinstance(spec.loader, type)
if spec.origin == 'frozen' or static and issubclass(
spec.loader, importlib.machinery.FrozenImporter):
kind = PY_FROZEN
path = None # imp compabilty
suffix = mode = '' # imp compability
elif spec.origin == 'built-in' or static and issubclass(
spec.loader, importlib.machinery.BuiltinImporter):
kind = C_BUILTIN
path = None # imp compabilty
suffix = mode = '' # imp compability
elif spec.has_location:
path = spec.origin
suffix = os.path.splitext(path)[1]
mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'
if suffix in importlib.machinery.SOURCE_SUFFIXES:
kind = PY_SOURCE
elif suffix in importlib.machinery.BYTECODE_SUFFIXES:
kind = PY_COMPILED
elif suffix in importlib.machinery.EXTENSION_SUFFIXES:
kind = C_EXTENSION
if kind in {PY_SOURCE, PY_COMPILED}:
file = open(path, mode)
else:
path = None
suffix = mode = ''
return file, path, (suffix, mode, kind)
def get_frozen_object(module, paths=None):
spec = find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return spec.loader.get_code(module)
def get_module(module, paths, info):
spec = find_spec(module, paths)
if not spec:
raise ImportError("Can't find %s" % module)
return module_from_spec(spec)
|
Django-locallibrary/env/Lib/site-packages/setuptools/_imp.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_imp.py",
"repo_id": "Django-locallibrary",
"token_count": 1017
}
| 23 |
import distutils.command.build_clib as orig
from distutils.errors import DistutilsSetupError
from distutils import log
from setuptools.dep_util import newer_pairwise_group
class build_clib(orig.build_clib):
"""
Override the default build_clib behaviour to do the following:
1. Implement a rudimentary timestamp-based dependency system
so 'compile()' doesn't run every time.
2. Add more keys to the 'build_info' dictionary:
* obj_deps - specify dependencies for each object compiled.
this should be a dictionary mapping a key
with the source filename to a list of
dependencies. Use an empty string for global
dependencies.
* cflags - specify a list of additional flags to pass to
the compiler.
"""
def build_libraries(self, libraries):
for (lib_name, build_info) in libraries:
sources = build_info.get('sources')
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % lib_name)
sources = list(sources)
log.info("building '%s' library", lib_name)
# Make sure everything is the correct type.
# obj_deps should be a dictionary of keys as sources
# and a list/tuple of files that are its dependencies.
obj_deps = build_info.get('obj_deps', dict())
if not isinstance(obj_deps, dict):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
dependencies = []
# Get the global dependencies that are specified by the '' key.
# These will go into every source's dependency list.
global_deps = obj_deps.get('', list())
if not isinstance(global_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
# Build the list to be used by newer_pairwise_group
# each source will be auto-added to its dependencies.
for source in sources:
src_deps = [source]
src_deps.extend(global_deps)
extra_deps = obj_deps.get(source, list())
if not isinstance(extra_deps, (list, tuple)):
raise DistutilsSetupError(
"in 'libraries' option (library '%s'), "
"'obj_deps' must be a dictionary of "
"type 'source: list'" % lib_name)
src_deps.extend(extra_deps)
dependencies.append(src_deps)
expected_objects = self.compiler.object_filenames(
sources,
output_dir=self.build_temp,
)
if (
newer_pairwise_group(dependencies, expected_objects)
!= ([], [])
):
# First, compile the source code to object files in the library
# directory. (This should probably change to putting object
# files in a temporary build directory.)
macros = build_info.get('macros')
include_dirs = build_info.get('include_dirs')
cflags = build_info.get('cflags')
self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=include_dirs,
extra_postargs=cflags,
debug=self.debug
)
# Now "link" the object files together into a static library.
# (On Unix at least, this isn't really linking -- it just
# builds an archive. Whatever.)
self.compiler.create_static_lib(
expected_objects,
lib_name,
output_dir=self.build_clib,
debug=self.debug
)
|
Django-locallibrary/env/Lib/site-packages/setuptools/command/build_clib.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/build_clib.py",
"repo_id": "Django-locallibrary",
"token_count": 2159
}
| 24 |
from distutils import log
import distutils.command.sdist as orig
import os
import sys
import io
import contextlib
from setuptools.extern import six, ordered_set
from .py36compat import sdist_add_defaults
import pkg_resources
_default_revctrl = list
def walk_revctrl(dirname=''):
"""Find all files under revision control"""
for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
for item in ep.load()(dirname):
yield item
class sdist(sdist_add_defaults, orig.sdist):
"""Smart sdist that finds anything supported by revision control"""
user_options = [
('formats=', None,
"formats for source distribution (comma-separated list)"),
('keep-temp', 'k',
"keep the distribution tree around after creating " +
"archive file(s)"),
('dist-dir=', 'd',
"directory to put the source distribution archive(s) in "
"[default: dist]"),
]
negative_opt = {}
README_EXTENSIONS = ['', '.rst', '.txt', '.md']
READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)
def run(self):
self.run_command('egg_info')
ei_cmd = self.get_finalized_command('egg_info')
self.filelist = ei_cmd.filelist
self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
self.check_readme()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
self.make_distribution()
dist_files = getattr(self.distribution, 'dist_files', [])
for file in self.archive_files:
data = ('sdist', '', file)
if data not in dist_files:
dist_files.append(data)
def initialize_options(self):
orig.sdist.initialize_options(self)
self._default_to_gztar()
def _default_to_gztar(self):
# only needed on Python prior to 3.6.
if sys.version_info >= (3, 6, 0, 'beta', 1):
return
self.formats = ['gztar']
def make_distribution(self):
"""
Workaround for #516
"""
with self._remove_os_link():
orig.sdist.make_distribution(self)
@staticmethod
@contextlib.contextmanager
def _remove_os_link():
"""
In a context, remove and restore os.link if it exists
"""
class NoValue:
pass
orig_val = getattr(os, 'link', NoValue)
try:
del os.link
except Exception:
pass
try:
yield
finally:
if orig_val is not NoValue:
setattr(os, 'link', orig_val)
def __read_template_hack(self):
# This grody hack closes the template file (MANIFEST.in) if an
# exception occurs during read_template.
# Doing so prevents an error when easy_install attempts to delete the
# file.
try:
orig.sdist.read_template(self)
except Exception:
_, _, tb = sys.exc_info()
tb.tb_next.tb_frame.f_locals['template'].close()
raise
# Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle
# has been fixed, so only override the method if we're using an earlier
# Python.
has_leaky_handle = (
sys.version_info < (2, 7, 2)
or (3, 0) <= sys.version_info < (3, 1, 4)
or (3, 2) <= sys.version_info < (3, 2, 1)
)
if has_leaky_handle:
read_template = __read_template_hack
def _add_defaults_optional(self):
if six.PY2:
sdist_add_defaults._add_defaults_optional(self)
else:
super()._add_defaults_optional()
if os.path.isfile('pyproject.toml'):
self.filelist.append('pyproject.toml')
def _add_defaults_python(self):
"""getting python files"""
if self.distribution.has_pure_modules():
build_py = self.get_finalized_command('build_py')
self.filelist.extend(build_py.get_source_files())
self._add_data_files(self._safe_data_files(build_py))
def _safe_data_files(self, build_py):
"""
Extracting data_files from build_py is known to cause
infinite recursion errors when `include_package_data`
is enabled, so suppress it in that case.
"""
if self.distribution.include_package_data:
return ()
return build_py.data_files
def _add_data_files(self, data_files):
"""
Add data files as found in build_py.data_files.
"""
self.filelist.extend(
os.path.join(src_dir, name)
for _, src_dir, _, filenames in data_files
for name in filenames
)
def _add_defaults_data_files(self):
try:
if six.PY2:
sdist_add_defaults._add_defaults_data_files(self)
else:
super()._add_defaults_data_files()
except TypeError:
log.warn("data_files contains unexpected objects")
def check_readme(self):
for f in self.READMES:
if os.path.exists(f):
return
else:
self.warn(
"standard file not found: should have one of " +
', '.join(self.READMES)
)
def make_release_tree(self, base_dir, files):
orig.sdist.make_release_tree(self, base_dir, files)
# Save any egg_info command line options used to create this sdist
dest = os.path.join(base_dir, 'setup.cfg')
if hasattr(os, 'link') and os.path.exists(dest):
# unlink and re-copy, since it might be hard-linked, and
# we don't want to change the source version
os.unlink(dest)
self.copy_file('setup.cfg', dest)
self.get_finalized_command('egg_info').save_version_info(dest)
def _manifest_is_not_generated(self):
# check for special comment used in 2.7.1 and higher
if not os.path.isfile(self.manifest):
return False
with io.open(self.manifest, 'rb') as fp:
first_line = fp.readline()
return (first_line !=
'# file GENERATED by distutils, do NOT edit\n'.encode())
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest, 'rb')
for line in manifest:
# The manifest must contain UTF-8. See #303.
if not six.PY2:
try:
line = line.decode('UTF-8')
except UnicodeDecodeError:
log.warn("%r not UTF-8 decodable -- skipping" % line)
continue
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
def check_license(self):
"""Checks if license_file' or 'license_files' is configured and adds any
valid paths to 'self.filelist'.
"""
files = ordered_set.OrderedSet()
opts = self.distribution.get_option_dict('metadata')
# ignore the source of the value
_, license_file = opts.get('license_file', (None, None))
if license_file is None:
log.debug("'license_file' option was not specified")
else:
files.add(license_file)
try:
files.update(self.distribution.metadata.license_files)
except TypeError:
log.warn("warning: 'license_files' option is malformed")
for f in files:
if not os.path.exists(f):
log.warn(
"warning: Failed to find the configured license file '%s'",
f)
files.remove(f)
self.filelist.extend(files)
|
Django-locallibrary/env/Lib/site-packages/setuptools/command/sdist.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/sdist.py",
"repo_id": "Django-locallibrary",
"token_count": 3776
}
| 25 |
import os
import socket
import atexit
import re
import functools
from setuptools.extern.six.moves import urllib, http_client, map, filter
from pkg_resources import ResolutionError, ExtractionError
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
/usr/local/share/certs/ca-root-nss.crt
/etc/ssl/ca-bundle.pem
""".strip().split()
try:
HTTPSHandler = urllib.request.HTTPSHandler
HTTPSConnection = http_client.HTTPSConnection
except AttributeError:
HTTPSHandler = HTTPSConnection = object
is_available = ssl is not None and object not in (
HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
https://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a
# presented identifier in which the wildcard
# character comprises a label other than the
# left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError(
"hostname %r doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError(
"hostname %r doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError(
"no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw),
req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
# change self.host to mean the proxy server host when tunneling is
# being used. Adapt, since we are interested in the destination
# host for the match_hostname() comparison.
actual_host = self._tunnel_host
else:
actual_host = self.host
if hasattr(ssl, 'create_default_context'):
ctx = ssl.create_default_context(cafile=self.ca_bundle)
self.sock = ctx.wrap_socket(sock, server_hostname=actual_host)
else:
# This is for python < 2.7.9 and < 3.4?
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), actual_host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib.request.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
# from jaraco.functools
def once(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(func, 'always_returns'):
func.always_returns = func(*args, **kwargs)
return func.always_returns
return wrapper
@once
def get_win_certfile():
try:
import wincertstore
except ImportError:
return None
class CertFile(wincertstore.CertFile):
def __init__(self):
super(CertFile, self).__init__()
atexit.register(self.close)
def close(self):
try:
super(CertFile, self).close()
except OSError:
pass
_wincerts = CertFile()
_wincerts.addstore('CA')
_wincerts.addstore('ROOT')
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
extant_cert_paths = filter(os.path.isfile, cert_paths)
return (
get_win_certfile()
or next(extant_cert_paths, None)
or _certifi_where()
)
def _certifi_where():
try:
return __import__('certifi').where()
except (ImportError, ResolutionError, ExtractionError):
pass
|
Django-locallibrary/env/Lib/site-packages/setuptools/ssl_support.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/ssl_support.py",
"repo_id": "Django-locallibrary",
"token_count": 3861
}
| 26 |
home = C:\Users\Omega Joctan\AppData\Local\Programs\Python\Python39
include-system-site-packages = false
version = 3.9.4
|
Django-locallibrary/env/pyvenv.cfg/0
|
{
"file_path": "Django-locallibrary/env/pyvenv.cfg",
"repo_id": "Django-locallibrary",
"token_count": 42
}
| 27 |
//+------------------------------------------------------------------+
//| GradientDescent.mqh |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
enum costFx
{
MSE, //Mean Squared erro
BCE, //binary cross entropy
CUSTOM, //for debugging purposes
};
enum Beta
{
Slope,
Intercept
};
#define DBL_MAX_MIN(val) if (val>DBL_MAX) Alert("Function ",__FUNCTION__,"\n Maximum Double value for ",#val," reached"); if (val<DBL_MIN && val>0) Alert("Function ",__FUNCTION__,"\n MInimum Double value for ",#val," reached")
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CGradientDescent
{
private:
double e;
double m_learning_rate;
int m_iterations;
double m_XMatrix[];
double Y[];
int m_rows;
int m_cols;
bool m_debug;
protected:
double Mse(double Bo, double Bi,Beta wrt);
double Bce(double Bo,double B1,Beta wrt);
double CustomCostFunction(double x);
void MatrixColumn(double &Matrix[],double &ColMatrix[],int column);
double Mean(double &data[]);
double std(double &data[]); //Standard deviation
double Sigmoid(double t);
public:
CGradientDescent(int cols, double learning_rate=0.01,int iterations=1000, bool debugInfo=false);
~CGradientDescent(void);
void GradientDescentFunction(double &XMatrix[], double &YMatrix[],costFx costFunction);
void MinMaxScaler(double &Array[]);
void ReadCsvCol(string filename,int col, double &Array[]);
};
//+------------------------------------------------------------------+
CGradientDescent::CGradientDescent(int cols, double learning_rate=0.01,int iterations=1000, bool debugInfo=false)
{
e = 2.718281828;
m_learning_rate = learning_rate;
m_cols = cols;
m_iterations = iterations;
m_debug = debugInfo;
}
//+------------------------------------------------------------------+
CGradientDescent::~CGradientDescent(void)
{
ArrayFree(m_XMatrix);
ArrayFree(Y);
}
//+------------------------------------------------------------------+
void CGradientDescent::GradientDescentFunction(double &XMatrix[], double &YMatrix[],costFx costFunction)
{
ArrayCopy(m_XMatrix,XMatrix);
ArrayCopy(Y,YMatrix);
m_rows = ArraySize(Y);
if (m_cols*m_rows != ArraySize(Y)) Print("There is imbalance in number of rows in the Matrix of X and Y values");
//---
Print("Gradient Descent CostFunction ",EnumToString(costFunction));
double x0 = 0, x1=0;
double b0=0, b1=0;
int iters =0;
if (costFunction == CUSTOM)
{
for (int i=0; i<m_iterations; i++)
{
x1 = x0 - m_learning_rate * CustomCostFunction(x0);
if (m_debug) printf("%d x0 = %.10f x1 = %.10f CostFunction = %.10f",iters,x0,x1,CustomCostFunction(x0));
if (NormalizeDouble(CustomCostFunction(x0),8) == 0) { Print("Local miminum found =",x0); break; }
x0 = x1;
}
}
//---
double cost_B0=0, cost_B1=0;
if (costFunction == MSE)
{
int iterations=0;
for (int i=0; i<m_iterations; i++, iterations++)
{
cost_B0 = Mse(b0,b1,Intercept);
cost_B1 = Mse(b0,b1,Slope);
b0 = b0 - m_learning_rate * cost_B0;
b1 = b1 - m_learning_rate * cost_B1;
if (m_debug) printf("%d b0 = %.8f cost_B0 = %.8f B1 = %.8f cost_B1 = %.8f",iterations,b0,cost_B0,b1,cost_B1);
DBL_MAX_MIN(b0); DBL_MAX_MIN(cost_B0); DBL_MAX_MIN(cost_B1);
if (NormalizeDouble(cost_B0,8) == 0 && NormalizeDouble(cost_B1,8) == 0) break;
}
printf("%d Iterations Local Minima are\nB0(Intercept) = %.5f || B1(Coefficient) = %.5f",iterations,b0,b1);
}
//---
if (costFunction == BCE)
{
int iterations=0;
for (int i=0; i<m_iterations; i++, iterations++)
{
cost_B0 = Bce(b0,b1,Intercept);
cost_B1 = Bce(b0,b1,Slope);
b0 = b0 - m_learning_rate * cost_B0;
b1 = b1 - m_learning_rate * cost_B1;
if (m_debug) printf("%d b0 = %.8f cost_B0 = %.8f B1 = %.8f cost_B1 = %.8f",iterations,b0,cost_B0,b1,cost_B1);
DBL_MAX_MIN(b0); DBL_MAX_MIN(cost_B0); DBL_MAX_MIN(cost_B1);
if (NormalizeDouble(cost_B0,8) == 0 && NormalizeDouble(cost_B1,8) == 0) break;
}
printf("%d Iterations Local Minima are\nB0(Intercept) = %.5f || B1(Coefficient) = %.5f",iterations,b0,b1);
}
}
//+------------------------------------------------------------------+
double CGradientDescent::CustomCostFunction(double x)
{
return(2 * ( x + 5 ));
}
//+------------------------------------------------------------------+
double CGradientDescent::Mse(double Bo, double Bi, Beta wrt)
{
double sum_sqr=0;
double m = ArraySize(Y);
double x[];
MatrixColumn(m_XMatrix,x,2);
//--- dcost wrt B0
if (wrt == Intercept)
{
for (int i=0; i<ArraySize(Y); i++)
{
//printf("y[%d] = %.5f x[%d] = %.5f ",i,Y[i],i,x[i]);
sum_sqr += Y[i] - (Bo + Bi*x[i]);
}
//Print("sum ",sum_sqr," cost ",(-2/m)*sum_sqr);
return((-2/m) * sum_sqr);
}
//--- dcost wrt Bi
if (wrt == Slope)
{
for (int i=0; i<ArraySize(Y); i++)
{
//printf("y[%d] = %.5f x[%d] = %.5f ",i,Y[i],i,x[i]);
sum_sqr += x[i] * (Y[i] - (Bo + Bi*x[i]));
}
return((-2/m) * sum_sqr);
}
return(0);
}
//+------------------------------------------------------------------+
double CGradientDescent::Bce(double Bo,double B1,Beta wrt)
{
double sum_sqr=0;
double m = ArraySize(Y);
double x[];
MatrixColumn(m_XMatrix,x,2);
if (wrt == Slope)
for (int i=0; i<ArraySize(Y); i++)
{
double Yp = Sigmoid(Bo+B1*x[i]);
sum_sqr += (Y[i] - Yp) * x[i];
}
if (wrt == Intercept)
for (int i=0; i<ArraySize(Y); i++)
{
double Yp = Sigmoid(Bo+B1*x[i]);
sum_sqr += (Y[i] - Yp);
}
return((-1/m)*sum_sqr);
}
//+------------------------------------------------------------------+
double CGradientDescent::Sigmoid(double t)
{
return(1.0/(1+MathPow(e,-t)));
}
//+------------------------------------------------------------------+
void CGradientDescent::MinMaxScaler(double &Array[])
{
double mean = Mean(Array);
double max,min;
double Norm[];
ArrayResize(Norm,ArraySize(Array));
max = Array[ArrayMaximum(Array)]; min = Array[ArrayMinimum(Array)];
for (int i=0; i<ArraySize(Array); i++)
Norm[i] = (Array[i] - min) / (max - min);
printf("Scaled data Mean = %.5f Std = %.5f",Mean(Norm),std(Norm));
ArrayFree(Array);
ArrayCopy(Array,Norm);
}
//+------------------------------------------------------------------+
double CGradientDescent::Mean(double &data[])
{
double sum=0;
for (int i=0; i<ArraySize(data); i++)
sum += data[i]; // all values summation
return(sum/ArraySize(data)); //total value after summation divided by total number of elements
}
//+------------------------------------------------------------------+
double CGradientDescent::std(double &data[])
{
double mean = Mean(data);
double sum = 0;
for (int i=0; i<ArraySize(data); i++)
sum += MathPow(data[i] - mean,2);
return(MathSqrt(sum/ArraySize(data)));
}
//+------------------------------------------------------------------+
void CGradientDescent::MatrixColumn(double &Matrix[],double &ColMatrix[],int column)
{
int cols = m_cols, rows = m_rows;
int start = 0;
ArrayResize(ColMatrix,m_rows);
for (int k=0; k<cols+1; k++)
{
if (k+1 != column) continue;
else
{
if (m_cols > 1) start = column-1;
for (int i=0; i<rows; i++)
{
ColMatrix[i] = Matrix[start];
if (m_cols == 1) start++;
else start+=(rows-1);
}
}
}
}
//+------------------------------------------------------------------+
void CGradientDescent::ReadCsvCol(string filename,int col,double &Array[])
{
int counter=0;
int column = 0, rows=0;
int from_column_number = col;
int handle = FileOpen(filename,FILE_READ|FILE_CSV|FILE_ANSI,",",CP_UTF8);
if (handle == INVALID_HANDLE) Print("Invalid csv handle Err =",GetLastError());
else
while (!FileIsEnding(handle))
{
double data = (double)FileReadString(handle);
column++;
//---
if (column==from_column_number)
{
if (rows>=1) //Avoid the first column which contains the column's header
{
counter++;
ArrayResize(Array,counter);
Array[counter-1]=data;
}
}
//---
if (FileIsLineEnding(handle))
{
rows++;
column=0;
}
}
FileClose(handle);
}
//+------------------------------------------------------------------+
|
Gradient-Descent-MQL5/GradientDescent.mqh/0
|
{
"file_path": "Gradient-Descent-MQL5/GradientDescent.mqh",
"repo_id": "Gradient-Descent-MQL5",
"token_count": 5377
}
| 28 |
<jupyter_start><jupyter_code>import pandas as pd
import numpy as np
import sklearn
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
%matplotlib inline
datapath = r"NASDAQ_DATA.csv"
data = pd.read_csv(datapath)
print(data.shape)
data.head()
import seaborn as sns
datasets = data.iloc[:, 0:]
sns.pairplot(datasets,diag_kind="kde")<jupyter_output><empty_output><jupyter_text>The Data has to be converted to a numpy array before it can be used to get for the correlation coefficient<jupyter_code>y = np.array(data["NASDAQ"])
x = np.array(data["S&P500"])
np.corrcoef(x,y)<jupyter_output><empty_output><jupyter_text>Data has to be reshaped into one column but unkown rows (-1,1)<jupyter_code>x = data["S&P500"].values.reshape(-1,1)
y = data["NASDAQ"].values.reshape(-1,1)
reg = LinearRegression()
reg = reg.fit(x,y)
r2 = reg.score(x,y)
print(f"model is y = {reg.coef_[0][0]}x + {reg.intercept_[0]} \n The Rsquared is {reg.score(x,y)}")
predictions = reg.predict(x)
plt.figure(figsize=(10,7))
plt.xlabel("S&P500")
plt.ylabel("NASDAQ")
plt.plot(x,predictions,c='Orange',label='predictions',linewidth=3)
plt.scatter(x,y, c="dodgerblue")
plt.title("Linear Regression model predictions")
plt.legend()<jupyter_output><empty_output>
|
Linear-Regression-python-and-MQL5/LinearRegression.ipynb/0
|
{
"file_path": "Linear-Regression-python-and-MQL5/LinearRegression.ipynb",
"repo_id": "Linear-Regression-python-and-MQL5",
"token_count": 521
}
| 29 |
//+------------------------------------------------------------------+
//| KMeans.mqh |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
#include <Graphics\Graphic.mqh>
CGraphic graph;
#include <MALE5\matrix_utils.mqh>
//+------------------------------------------------------------------+
enum errors
{
KM_ERR001, //clusters not matching in size Error
KM_ERR002
};
//+------------------------------------------------------------------+
class CKMeans
{
CMatrixutils matrix_utils;
private:
ulong n; //number of samples
uint m_clusters;
ulong m_cols;
matrix InitialCentroids;
vector<double> cluster_assign;
protected:
matrix Matrix;
bool ErrMsg(errors err);
bool ScatterCurvePlots(
string obj_name,
double &x[],
double &y[],
double &curveArr[],
string legend,
string x_axis_label = "x-axis",
string y_axis_label = "y-axis",
color clr = clrDodgerBlue,
bool points_fill = true
);
public:
CKMeans(const matrix &_Matrix, int clusters=3);
~CKMeans(void);
void KMeansClustering(matrix &clustered_matrix, matrix ¢roids, int iterations = 1, bool rand_cluster =false);
void ElbowMethod(const int initial_k=1, int total_k=10, bool showPlot = true);
void FilterZero(vector &vec);
void matrixtoArray(matrix &mat, double &Array[]);
};
//+------------------------------------------------------------------+
CKMeans::CKMeans(const matrix &_Matrix, int clusters=3)
{
m_clusters = clusters;
Matrix.Copy(_Matrix);
m_cols = Matrix.Cols();
n = Matrix.Rows(); //number of elements | Matrix Rows
}
//+------------------------------------------------------------------+
CKMeans::~CKMeans(void)
{
ZeroMemory(m_clusters);
ZeroMemory(InitialCentroids);
ZeroMemory(cluster_assign);
}
//+------------------------------------------------------------------+
void CKMeans::KMeansClustering(matrix &clustered_matrix, matrix ¢roids, int iterations = 1, bool rand_cluster =false)
{
InitialCentroids.Resize(m_clusters, m_cols);
cluster_assign.Resize(n);
clustered_matrix.Resize(m_clusters, m_cols*n);
clustered_matrix.Fill(NULL);
vector cluster_comb_v = {};
matrix cluster_comb_m = {};
vector rand_v = {};
ulong rand_ = 0;
for(ulong i=0; i<m_clusters; i++)
{
rand_ = rand_cluster ? (ulong)MathFloor(i*(n/m_clusters)) : i;
rand_v = Matrix.Row(rand_);
InitialCentroids.Row(rand_v, i);
}
//---
vector v_row;
matrix rect_distance = {}; //matrix to store rectilinear distances
rect_distance.Reshape(n, m_clusters);
vector v_matrix = {}, v_centroid = {};
double output = 0;
//---
for(int iter=0; iter<iterations; iter++)
{
for(ulong i=0; i<rect_distance.Rows(); i++)
for(ulong j=0; j<rect_distance.Cols(); j++)
{
v_matrix = Matrix.Row(i);
v_centroid = InitialCentroids.Row(j);
ZeroMemory(output);
for(ulong k=0; k<v_matrix.Size(); k++)
output += MathAbs(v_matrix[k] - v_centroid[k]); //Rectilinear distance
rect_distance[i][j] = output;
}
//--- Assigning the Clusters
matrix cluster_cent = {}; //cluster centroids
ulong cluster = 0;
for(ulong i=0; i<rect_distance.Rows(); i++)
{
v_row = rect_distance.Row(i);
cluster = v_row.ArgMin();
cluster_assign[i] = (double)cluster;
}
vector temp_cluster_assign = cluster_assign;
//--- Combining the clusters
for(ulong i=0, index =0; i<cluster_assign.Size(); i++)
{
ZeroMemory(cluster_cent);
bool classified = false;
for(ulong j=0, count = 0; j<temp_cluster_assign.Size(); j++)
{
if(cluster_assign[i] == temp_cluster_assign[j])
{
classified = true;
count++;
cluster_comb_m.Resize(count, m_cols);
cluster_comb_m.Row(Matrix.Row(j), count-1);
cluster_cent.Resize(count, m_cols);
// New centroids
cluster_cent.Row(Matrix.Row(j), count-1);
temp_cluster_assign[j] = -100; //modify the cluster item so it can no longer be found
}
else
continue;
}
//---
if(classified == true)
{
// solving for new cluster and updtating the old ones
cluster_comb_v = matrix_utils.MatrixToVector(cluster_comb_m);
if(iter == iterations-1)
clustered_matrix.Row(cluster_comb_v, index);
index++;
//---
vector x_y_z = {0, 0};
ZeroMemory(rand_v);
for(ulong k=0; k<cluster_cent.Cols(); k++)
{
x_y_z.Resize(cluster_cent.Cols());
rand_v = cluster_cent.Col(k);
x_y_z[k] = rand_v.Mean();
}
InitialCentroids.Row(x_y_z, i);
}
if(index >= m_clusters)
break;
}
} //end of iterations
ZeroMemory(centroids);
centroids.Copy(InitialCentroids);
}
//+------------------------------------------------------------------+
bool CKMeans::ErrMsg(errors err)
{
switch(err)
{
case KM_ERR001:
printf("%s Clusters not matching in Size ", EnumToString(KM_ERR001));
break;
default:
break;
}
return(true);
}
//+------------------------------------------------------------------+
void CKMeans::ElbowMethod(const int initial_k=1, int total_k=10, bool showPlot = true)
{
matrix clustered_mat, _centroids = {};
if(total_k > (int)n)
total_k = (int)n; //k should always be less than n
vector centroid_v= {}, x_y_z= {};
vector short_v = {}; //vector for each point
vector minus_v = {}; //vector to store the minus operation output
double wcss = 0;
double WCSS[];
ArrayResize(WCSS, total_k);
double kArray[];
ArrayResize(kArray, total_k);
for(int k=initial_k, count_k=0; k<ArraySize(WCSS)+initial_k; k++, count_k++)
{
wcss = 0;
m_clusters = k;
KMeansClustering(clustered_mat, _centroids);
for(ulong i=0; i<_centroids.Rows(); i++)
{
centroid_v = _centroids.Row(i);
x_y_z = clustered_mat.Row(i);
FilterZero(x_y_z);
for(ulong j=0; j<x_y_z.Size()/m_cols; j++)
{
matrix_utils.Copy(x_y_z, short_v, uint(j*m_cols), (uint)m_cols);
//--- WCSS ( within cluster sum of squared residuals )
minus_v = (short_v - centroid_v);
minus_v = MathPow(minus_v, 2);
wcss += minus_v.Sum();
}
}
WCSS[count_k] = wcss;
kArray[count_k] = k;
}
Print("WCSS");
ArrayPrint(WCSS);
Print("kArray");
ArrayPrint(kArray);
//--- Plotting the Elbow on the graph
if(showPlot)
{
ObjectDelete(0, "elbow");
ScatterCurvePlots("elbow", kArray, WCSS, WCSS, "Elbow line", "k", "WCSS");
}
}
//+------------------------------------------------------------------+
void CKMeans::FilterZero(vector &vec)
{
vector new_vec = {};
for(ulong i=0, count =0; i<vec.Size(); i++)
{
if(vec[i] != NULL)
{
count++;
new_vec.Resize(count);
new_vec[count-1] = vec[i];
}
else
continue;
}
vec.Copy(new_vec);
}
//+------------------------------------------------------------------+
//+------------------------------------------------------------------+
bool CKMeans::ScatterCurvePlots(
string obj_name,
double &x[],
double &y[],
double &curveArr[],
string legend,
string x_axis_label = "x-axis",
string y_axis_label = "y-axis",
color clr = clrDodgerBlue,
bool points_fill = true
)
{
if(!graph.Create(0, obj_name, 0, 30, 70, 800, 640))
{
printf("Failed to Create graphical object on the Main chart Err = %d", GetLastError());
return(false);
}
ChartSetInteger(0, CHART_SHOW, true);
//---
//graph.CurveAdd(x,y,clrBlack,CURVE_POINTS,y_axis_label);
graph.CurveAdd(x, curveArr, clr, CURVE_POINTS_AND_LINES, legend);
graph.XAxis().Name(x_axis_label);
graph.XAxis().NameSize(13);
graph.YAxis().Name(y_axis_label);
graph.YAxis().NameSize(13);
graph.FontSet("Lucida Console", 13);
graph.CurvePlotAll();
graph.Update();
return(true);
}
//+------------------------------------------------------------------+
void CKMeans::matrixtoArray(matrix &mat, double &Array[])
{
ArrayFree(Array);
ArrayResize(Array, int(mat.Rows()*mat.Cols()));
int index = 0;
for(ulong i=0; i<mat.Rows(); i++)
for(ulong j=0; j<mat.Cols(); j++, index++)
{
Array[index] = mat[i][j];
}
}
//+------------------------------------------------------------------+
|
MALE5/Clustering/KMeans.mqh/0
|
{
"file_path": "MALE5/Clustering/KMeans.mqh",
"repo_id": "MALE5",
"token_count": 4735
}
| 30 |
//+------------------------------------------------------------------+
//| Polynomial Regression.mqh |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/en/users/omegajoctan"
//+---------------------------------------------------------------------------+
//| Polynomial/Quadratic curve fitting regression model library |
//| Capable of handling as much degree as possible, Thanks to the matrices |
//| polynomial regression degrees depends on the independent variables |
//| hence one independent variable is a second degree, so on and so forth |
//+---------------------------------------------------------------------------+
#include <MALE5\MatrixExtend.mqh>
#include <MALE5\metrics.mqh>
class CPolynomialRegression
{
private:
ulong m_degree; //depends on independent vars
matrix Betas;
vector Betas_v[]; //coefficients of the model stored in Array
public:
CPolynomialRegression(int degree=2);
~CPolynomialRegression(void);
void BIC(ulong k, vector &bic, int &best_degree); //Bayessian information Criterion
void fit(matrix &x, vector &y);
double predict(vector &x);
vector predict(matrix &x);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CPolynomialRegression::CPolynomialRegression(int degree=2)
:m_degree(degree)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
CPolynomialRegression::~CPolynomialRegression(void)
{
ArrayFree(Betas_v);
}
//+------------------------------------------------------------------+
//| |
//| BIC is the Function used to find which degree polynomial fits |
//| best to a the dataset, k is the number of degrees that the |
//| Algorithm will run to try to figure out the best model |
//| This is somehow the learning algorithm of polynomial regression |
//| |
//+------------------------------------------------------------------+
void CPolynomialRegression::BIC(ulong k, vector &bic, int &best_degree)
{
vector Pred;
bic.Resize(k-2);
best_degree = 0;
for (ulong i=2, counter = 0; i<k; i++)
{
fit(i, Pred);
bic[counter] = ( n * log(metrics.rss(Pred)) ) + (i * log(n));
counter++;
}
//---
bool positive = false;
for (ulong i=0; i<bic.Size(); i++)
if (bic[i] > 0) { positive = true; break; }
double low_bic = DBL_MAX;
if (positive == true)
for (ulong i=0; i<bic.Size(); i++)
{
if (bic[i] < low_bic && bic[i] > 0) low_bic = bic[i];
}
else low_bic = bic.Min(); //bic[ best_degree = ArrayMinimum(bic) ];
printf("Best Polynomial Degree(s) is = %d with BIC = %.5f",best_degree = best_degree+2,low_bic);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CPolynomialRegression::fit(matrix &x, vector &y)
{
ulong order_size = degree+1;
matrix PolyNomialsXMatrix(order_size,order_size);
matrix PolynomialsYMatrix(order_size,1);
vector c;
vector x_pow;
for (ulong i=0; i<PolynomialsYMatrix.Rows(); i++)
for (ulong j=0; j<PolynomialsYMatrix.Cols(); j++)
{
if (i+j == 0) PolynomialsYMatrix[i][j] = y.Sum();
else
{
x_pow = MathPow(x,i); c = y*x_pow;
PolynomialsYMatrix[i][j] = c.Sum();
}
}
#ifdef DEBUG_MODE
Print("Polynomials y vector \n",PolynomialsYMatrix);
#endif
//---
PolyNomialsXMatrix.Resize(order_size, order_size);
double power = 0;
ZeroMemory(x_pow);
for (ulong i=0,index = 0; i<PolyNomialsXMatrix.Rows(); i++)
for (ulong j=0; j<PolyNomialsXMatrix.Cols(); j++, index++)
{
power = (double)i+j;
if (power == 0) PolyNomialsXMatrix[i][j] = n;
else
{
x_pow = MathPow(x,power);
PolyNomialsXMatrix[i][j] = x_pow.Sum();
}
}
//---
#ifdef DEBUG_MODE
Print("Polynomial x matrix\n",PolyNomialsXMatrix);
#endif
//---
PolyNomialsXMatrix = PolyNomialsXMatrix.Inv(); //find the inverse of the matrix
Betas = PolyNomialsXMatrix.MatMul(PolynomialsYMatrix);
Betas_v = MatrixExtend::MatrixToVector(Betas);
#ifdef DEBUG_MODE
Print("Betas \n",Betas);
#endif
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
vector CPolynomialRegression::predict(matrix &x)
{
ulong order_size = m_degree+1;
vector preds(x.Rows());
for (ulong i=0; i<(ulong)n; i++)
{
double sum = 0;
for (ulong j=0; j<order_size; j++)
{
if (j == 0) sum += Betas_v[j];
else sum += Betas_v[j] * MathPow(x[i],j);
}
preds[i] = sum;
}
return preds;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
|
MALE5/Linear Models/Polynomial Regression.mqh/0
|
{
"file_path": "MALE5/Linear Models/Polynomial Regression.mqh",
"repo_id": "MALE5",
"token_count": 3132
}
| 31 |
## Linear Support Vector Machine (SVM)
This documentation explains the `CLinearSVM` class in MQL5, which implements a **linear support vector machine (SVM)** for binary classification tasks.
**I. SVM Theory:**
SVMs are a type of **supervised learning** algorithm used for classification. They aim to find a **hyperplane** that best separates data points belonging to different classes.
**II. CLinearSVM Class:**
The `CLinearSVM` class provides functionalities for training and using a linear SVM classifier in MQL5:
**Public Functions:**
* **CLinearSVM(uint batch_size=32, double alpha=0.001, uint epochs= 1000,double lambda=0.1):** Constructor:
* `batch_size`: Size of mini-batches used during training (default: 32).
* `alpha`: Learning rate for updating weights (default: 0.001).
* `epochs`: Number of training epochs (default: 1000).
* `lambda`: Regularization parameter to control overfitting (default: 0.1).
* **~CLinearSVM(void):** Destructor.
* **void fit(matrix &x, vector &y):** Trains the model on the provided data (`x` - features, `y` - binary target labels).
* **int Predict(vector &x):** Predicts the class label (+1 or -1) for a single input vector.
* **vector Predict(matrix &x):** Predicts class labels (+1 or -1) for all rows in the input matrix.
**Internal Variables:**
* `W`: Vector storing the weight coefficients of the hyperplane.
* `B`: Bias term of the hyperplane.
* `is_fitted_already`: Flag indicating if the model is trained.
* `during_training`: Flag indicating if the training process is ongoing.
* `config`: Structure containing hyperparameters for training (batch size, learning rate, epochs, regularization).
**Private Functions:**
* `hyperplane(vector &x)`: Calculates the dot product of the input vector with the weight vector and adds the bias term, representing the hyperplane equation.
**III. Class Functionality:**
1. **Initialization:**
* The constructor sets default hyperparameter values and initializes internal flags.
2. **Training (fit function):**
* Checks if the number of samples in the data (`x`) matches the size of the target label vector (`y`).
* Initializes the weight vector and bias term to zero.
* Iterates through training epochs:
* For each epoch, loops through mini-batches of data:
* Updates the weight and bias terms using the hinge loss function and gradient descent with **stochastic** updates (updating based on a mini-batch).
* Sets the `is_fitted_already` flag to True.
* Prints training progress and performance metrics (loss and accuracy) for each epoch during training (debug mode only).
3. **Prediction:**
* Checks if the model is trained before making predictions.
* For a single input vector:
* Calculates the hyperplane output using the weight vector and bias term.
* Uses the sign of the output to predict the class label (+1 for positive, -1 for negative).
* For a matrix of input vectors:
* Iterates through each row and calls the single-sample prediction function.
**IV. Additional Notes:**
* The class implements a linear SVM, which is only effective for linearly separable data.
* The chosen hyperparameters (learning rate, epochs, etc.) can significantly impact the performance of the model. Experimentation and tuning might be needed for optimal results.
* The class uses mini-batch training for improved efficiency on large datasets.
* During training, the `during_training` flag is set to True to prevent making predictions while the model is being updated.
**Reference**
* [Data Science and Machine Learning(Part 15): SVM, A Must-Have Tool in Every Trader's Toolbox ](https://www.mql5.com/en/articles/13395)
|
MALE5/SVM/README.md/0
|
{
"file_path": "MALE5/SVM/README.md",
"repo_id": "MALE5",
"token_count": 1039
}
| 32 |
//+------------------------------------------------------------------+
//| multipleMatRegTestScript.mq5 |
//| Copyright 2022, Omega Joctan. |
//| https://www.mql5.com/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2022, Omega Joctan."
#property link "https://www.mql5.com/users/omegajoctan"
#property version "1.00"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
#include "multipleMatLinearReg.mqh";
CMultipleMatLinearReg matreg;
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
void OnStart()
{
//---
string filename= "NASDAQ_DATA.csv";
matreg.Init(2,"1,3,4",filename);
matreg.MultipleMatLinearRegMain();
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
|
MatrixRegressionMQL5/multipleMatRegTestScript.mq5/0
|
{
"file_path": "MatrixRegressionMQL5/multipleMatRegTestScript.mq5",
"repo_id": "MatrixRegressionMQL5",
"token_count": 508
}
| 33 |
No licence free of use/copy open source library
all the information about the library and the code used in this repo can be found on my Article about Decision Trees in mql5 linked
here https://www.mql5.com/en/articles/11061/113599
### To start using the library add these line of code to your script
```
#include "decisiontree_2.mqh";
CDecisionTree *tree;
//+------------------------------------------------------------------+
//| Script program start function |
//+------------------------------------------------------------------+
void OnStart()
{
//---
tree = new CDecisionTree();
string file_name = "PlayTennis vs wheather.csv";
tree.Init(6,"2,3,4,5",file_name);
tree.BuildTree();
delete (tree);
}
```
### I welcome any thoughts,
Any contribution to the library will be appreciated
# Support the Project
This is a free and open-source project that has cost me time to figure things out and to present in an easy to use and a friendly manner, kindly donate to the project on this link https://www.buymeacoffee.com/omegajoctan if you appreciate the effort
# Hire me on your next big Project on Machine Learning
hire me to create trading robots, indicators or scripts based on
***
* Neural Networks
* Linear Regressions
* Support Vector Machine
* Logistic Regressions
* Classification trees
* and many more Aspects of machine learning
Using this link https://www.mql5.com/en/job/new?prefered=omegajoctan
|
DecisionTree-Classification-tree-MQL5/Readme.md/0
|
{
"file_path": "DecisionTree-Classification-tree-MQL5/Readme.md",
"repo_id": "DecisionTree-Classification-tree-MQL5",
"token_count": 463
}
| 0 |
{% extends 'base_template.html' %}
{% block content %}
<h1>LocalLibrary Home</h1>
<p>Welcome to LocalLibrary, This Website has been developed by <em> Omega Joctan</em></p>
<h1>Library Records</h1>
<p>Library has the following record counts</p>
<div style="margin-left: 15px;">
<li><b>Books: </b>{{ num_books }}</li>
<li><b>Authors: </b>{{ num_authors }}</li>
<li><b>Books Instances: </b>{{ num_instance }}</li>
<li><b>Books Available: </b>{{ num_instance_available }}</li>
<li><b>Genres Total: </b>{{ genres_total }}</li>
</div>
<p>You have visited this page {{num_visits }} time{{num_visits|pluralize}}.</p>
{% endblock %}
|
Django-locallibrary/LocalLibrary/catalog/Templates/index.html/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/catalog/Templates/index.html",
"repo_id": "Django-locallibrary",
"token_count": 327
}
| 1 |
import datetime
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
class RenewBookForm(forms.Form):
renewal_date = forms.DateField(help_text="Enter a date between now and 4 weeks (default is 3)")
def clean_data(self):
data = self.cleaned_data['renewal_date']
if data < datetime.date.today():
raise ValidationError(_("Invalid date - renewal in the past"))
if data > datetime.date.today() + datetime.timedelta(weeks=4):
raise ValidationError(_("Invalid date - renewal more than 4 weeks ahead"))
return data
|
Django-locallibrary/LocalLibrary/catalog/forms.py/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/catalog/forms.py",
"repo_id": "Django-locallibrary",
"token_count": 228
}
| 2 |
<svg width="16" height="32" viewBox="0 0 1792 3584" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<g id="icon">
<path d="M1024 544v448q0 14-9 23t-23 9h-320q-14 0-23-9t-9-23v-64q0-14 9-23t23-9h224v-352q0-14 9-23t23-9h64q14 0 23 9t9 23zm416 352q0-148-73-273t-198-198-273-73-273 73-198 198-73 273 73 273 198 198 273 73 273-73 198-198 73-273zm224 0q0 209-103 385.5t-279.5 279.5-385.5 103-385.5-103-279.5-279.5-103-385.5 103-385.5 279.5-279.5 385.5-103 385.5 103 279.5 279.5 103 385.5z"/>
</g>
</defs>
<use xlink:href="#icon" x="0" y="0" fill="#447e9b" />
<use xlink:href="#icon" x="0" y="1792" fill="#003366" />
</svg>
|
Django-locallibrary/LocalLibrary/staticfiles/admin/img/icon-clock.e1d4dfac3f2b.svg/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/img/icon-clock.e1d4dfac3f2b.svg",
"repo_id": "Django-locallibrary",
"token_count": 352
}
| 3 |
<svg width="13" height="13" viewBox="0 0 1792 1792" xmlns="http://www.w3.org/2000/svg">
<path fill="#ffffff" d="M1363 877l-742 742q-19 19-45 19t-45-19l-166-166q-19-19-19-45t19-45l531-531-531-531q-19-19-19-45t19-45l166-166q19-19 45-19t45 19l742 742q19 19 19 45t-19 45z"/>
</svg>
|
Django-locallibrary/LocalLibrary/staticfiles/admin/img/tooltag-arrowright.bbfb788a849e.svg/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/img/tooltag-arrowright.bbfb788a849e.svg",
"repo_id": "Django-locallibrary",
"token_count": 151
}
| 4 |
/*global Calendar, findPosX, findPosY, get_format, gettext, gettext_noop, interpolate, ngettext, quickElement*/
// Inserts shortcut buttons after all of the following:
// <input type="text" class="vDateField">
// <input type="text" class="vTimeField">
'use strict';
{
const DateTimeShortcuts = {
calendars: [],
calendarInputs: [],
clockInputs: [],
clockHours: {
default_: [
[gettext_noop('Now'), -1],
[gettext_noop('Midnight'), 0],
[gettext_noop('6 a.m.'), 6],
[gettext_noop('Noon'), 12],
[gettext_noop('6 p.m.'), 18]
]
},
dismissClockFunc: [],
dismissCalendarFunc: [],
calendarDivName1: 'calendarbox', // name of calendar <div> that gets toggled
calendarDivName2: 'calendarin', // name of <div> that contains calendar
calendarLinkName: 'calendarlink', // name of the link that is used to toggle
clockDivName: 'clockbox', // name of clock <div> that gets toggled
clockLinkName: 'clocklink', // name of the link that is used to toggle
shortCutsClass: 'datetimeshortcuts', // class of the clock and cal shortcuts
timezoneWarningClass: 'timezonewarning', // class of the warning for timezone mismatch
timezoneOffset: 0,
init: function() {
const serverOffset = document.body.dataset.adminUtcOffset;
if (serverOffset) {
const localOffset = new Date().getTimezoneOffset() * -60;
DateTimeShortcuts.timezoneOffset = localOffset - serverOffset;
}
for (const inp of document.getElementsByTagName('input')) {
if (inp.type === 'text' && inp.classList.contains('vTimeField')) {
DateTimeShortcuts.addClock(inp);
DateTimeShortcuts.addTimezoneWarning(inp);
}
else if (inp.type === 'text' && inp.classList.contains('vDateField')) {
DateTimeShortcuts.addCalendar(inp);
DateTimeShortcuts.addTimezoneWarning(inp);
}
}
},
// Return the current time while accounting for the server timezone.
now: function() {
const serverOffset = document.body.dataset.adminUtcOffset;
if (serverOffset) {
const localNow = new Date();
const localOffset = localNow.getTimezoneOffset() * -60;
localNow.setTime(localNow.getTime() + 1000 * (serverOffset - localOffset));
return localNow;
} else {
return new Date();
}
},
// Add a warning when the time zone in the browser and backend do not match.
addTimezoneWarning: function(inp) {
const warningClass = DateTimeShortcuts.timezoneWarningClass;
let timezoneOffset = DateTimeShortcuts.timezoneOffset / 3600;
// Only warn if there is a time zone mismatch.
if (!timezoneOffset) {
return;
}
// Check if warning is already there.
if (inp.parentNode.querySelectorAll('.' + warningClass).length) {
return;
}
let message;
if (timezoneOffset > 0) {
message = ngettext(
'Note: You are %s hour ahead of server time.',
'Note: You are %s hours ahead of server time.',
timezoneOffset
);
}
else {
timezoneOffset *= -1;
message = ngettext(
'Note: You are %s hour behind server time.',
'Note: You are %s hours behind server time.',
timezoneOffset
);
}
message = interpolate(message, [timezoneOffset]);
const warning = document.createElement('span');
warning.className = warningClass;
warning.textContent = message;
inp.parentNode.appendChild(document.createElement('br'));
inp.parentNode.appendChild(warning);
},
// Add clock widget to a given field
addClock: function(inp) {
const num = DateTimeShortcuts.clockInputs.length;
DateTimeShortcuts.clockInputs[num] = inp;
DateTimeShortcuts.dismissClockFunc[num] = function() { DateTimeShortcuts.dismissClock(num); return true; };
// Shortcut links (clock icon and "Now" link)
const shortcuts_span = document.createElement('span');
shortcuts_span.className = DateTimeShortcuts.shortCutsClass;
inp.parentNode.insertBefore(shortcuts_span, inp.nextSibling);
const now_link = document.createElement('a');
now_link.href = "#";
now_link.textContent = gettext('Now');
now_link.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.handleClockQuicklink(num, -1);
});
const clock_link = document.createElement('a');
clock_link.href = '#';
clock_link.id = DateTimeShortcuts.clockLinkName + num;
clock_link.addEventListener('click', function(e) {
e.preventDefault();
// avoid triggering the document click handler to dismiss the clock
e.stopPropagation();
DateTimeShortcuts.openClock(num);
});
quickElement(
'span', clock_link, '',
'class', 'clock-icon',
'title', gettext('Choose a Time')
);
shortcuts_span.appendChild(document.createTextNode('\u00A0'));
shortcuts_span.appendChild(now_link);
shortcuts_span.appendChild(document.createTextNode('\u00A0|\u00A0'));
shortcuts_span.appendChild(clock_link);
// Create clock link div
//
// Markup looks like:
// <div id="clockbox1" class="clockbox module">
// <h2>Choose a time</h2>
// <ul class="timelist">
// <li><a href="#">Now</a></li>
// <li><a href="#">Midnight</a></li>
// <li><a href="#">6 a.m.</a></li>
// <li><a href="#">Noon</a></li>
// <li><a href="#">6 p.m.</a></li>
// </ul>
// <p class="calendar-cancel"><a href="#">Cancel</a></p>
// </div>
const clock_box = document.createElement('div');
clock_box.style.display = 'none';
clock_box.style.position = 'absolute';
clock_box.className = 'clockbox module';
clock_box.id = DateTimeShortcuts.clockDivName + num;
document.body.appendChild(clock_box);
clock_box.addEventListener('click', function(e) { e.stopPropagation(); });
quickElement('h2', clock_box, gettext('Choose a time'));
const time_list = quickElement('ul', clock_box);
time_list.className = 'timelist';
// The list of choices can be overridden in JavaScript like this:
// DateTimeShortcuts.clockHours.name = [['3 a.m.', 3]];
// where name is the name attribute of the <input>.
const name = typeof DateTimeShortcuts.clockHours[inp.name] === 'undefined' ? 'default_' : inp.name;
DateTimeShortcuts.clockHours[name].forEach(function(element) {
const time_link = quickElement('a', quickElement('li', time_list), gettext(element[0]), 'href', '#');
time_link.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.handleClockQuicklink(num, element[1]);
});
});
const cancel_p = quickElement('p', clock_box);
cancel_p.className = 'calendar-cancel';
const cancel_link = quickElement('a', cancel_p, gettext('Cancel'), 'href', '#');
cancel_link.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.dismissClock(num);
});
document.addEventListener('keyup', function(event) {
if (event.which === 27) {
// ESC key closes popup
DateTimeShortcuts.dismissClock(num);
event.preventDefault();
}
});
},
openClock: function(num) {
const clock_box = document.getElementById(DateTimeShortcuts.clockDivName + num);
const clock_link = document.getElementById(DateTimeShortcuts.clockLinkName + num);
// Recalculate the clockbox position
// is it left-to-right or right-to-left layout ?
if (window.getComputedStyle(document.body).direction !== 'rtl') {
clock_box.style.left = findPosX(clock_link) + 17 + 'px';
}
else {
// since style's width is in em, it'd be tough to calculate
// px value of it. let's use an estimated px for now
clock_box.style.left = findPosX(clock_link) - 110 + 'px';
}
clock_box.style.top = Math.max(0, findPosY(clock_link) - 30) + 'px';
// Show the clock box
clock_box.style.display = 'block';
document.addEventListener('click', DateTimeShortcuts.dismissClockFunc[num]);
},
dismissClock: function(num) {
document.getElementById(DateTimeShortcuts.clockDivName + num).style.display = 'none';
document.removeEventListener('click', DateTimeShortcuts.dismissClockFunc[num]);
},
handleClockQuicklink: function(num, val) {
let d;
if (val === -1) {
d = DateTimeShortcuts.now();
}
else {
d = new Date(1970, 1, 1, val, 0, 0, 0);
}
DateTimeShortcuts.clockInputs[num].value = d.strftime(get_format('TIME_INPUT_FORMATS')[0]);
DateTimeShortcuts.clockInputs[num].focus();
DateTimeShortcuts.dismissClock(num);
},
// Add calendar widget to a given field.
addCalendar: function(inp) {
const num = DateTimeShortcuts.calendars.length;
DateTimeShortcuts.calendarInputs[num] = inp;
DateTimeShortcuts.dismissCalendarFunc[num] = function() { DateTimeShortcuts.dismissCalendar(num); return true; };
// Shortcut links (calendar icon and "Today" link)
const shortcuts_span = document.createElement('span');
shortcuts_span.className = DateTimeShortcuts.shortCutsClass;
inp.parentNode.insertBefore(shortcuts_span, inp.nextSibling);
const today_link = document.createElement('a');
today_link.href = '#';
today_link.appendChild(document.createTextNode(gettext('Today')));
today_link.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.handleCalendarQuickLink(num, 0);
});
const cal_link = document.createElement('a');
cal_link.href = '#';
cal_link.id = DateTimeShortcuts.calendarLinkName + num;
cal_link.addEventListener('click', function(e) {
e.preventDefault();
// avoid triggering the document click handler to dismiss the calendar
e.stopPropagation();
DateTimeShortcuts.openCalendar(num);
});
quickElement(
'span', cal_link, '',
'class', 'date-icon',
'title', gettext('Choose a Date')
);
shortcuts_span.appendChild(document.createTextNode('\u00A0'));
shortcuts_span.appendChild(today_link);
shortcuts_span.appendChild(document.createTextNode('\u00A0|\u00A0'));
shortcuts_span.appendChild(cal_link);
// Create calendarbox div.
//
// Markup looks like:
//
// <div id="calendarbox3" class="calendarbox module">
// <h2>
// <a href="#" class="link-previous">‹</a>
// <a href="#" class="link-next">›</a> February 2003
// </h2>
// <div class="calendar" id="calendarin3">
// <!-- (cal) -->
// </div>
// <div class="calendar-shortcuts">
// <a href="#">Yesterday</a> | <a href="#">Today</a> | <a href="#">Tomorrow</a>
// </div>
// <p class="calendar-cancel"><a href="#">Cancel</a></p>
// </div>
const cal_box = document.createElement('div');
cal_box.style.display = 'none';
cal_box.style.position = 'absolute';
cal_box.className = 'calendarbox module';
cal_box.id = DateTimeShortcuts.calendarDivName1 + num;
document.body.appendChild(cal_box);
cal_box.addEventListener('click', function(e) { e.stopPropagation(); });
// next-prev links
const cal_nav = quickElement('div', cal_box);
const cal_nav_prev = quickElement('a', cal_nav, '<', 'href', '#');
cal_nav_prev.className = 'calendarnav-previous';
cal_nav_prev.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.drawPrev(num);
});
const cal_nav_next = quickElement('a', cal_nav, '>', 'href', '#');
cal_nav_next.className = 'calendarnav-next';
cal_nav_next.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.drawNext(num);
});
// main box
const cal_main = quickElement('div', cal_box, '', 'id', DateTimeShortcuts.calendarDivName2 + num);
cal_main.className = 'calendar';
DateTimeShortcuts.calendars[num] = new Calendar(DateTimeShortcuts.calendarDivName2 + num, DateTimeShortcuts.handleCalendarCallback(num));
DateTimeShortcuts.calendars[num].drawCurrent();
// calendar shortcuts
const shortcuts = quickElement('div', cal_box);
shortcuts.className = 'calendar-shortcuts';
let day_link = quickElement('a', shortcuts, gettext('Yesterday'), 'href', '#');
day_link.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.handleCalendarQuickLink(num, -1);
});
shortcuts.appendChild(document.createTextNode('\u00A0|\u00A0'));
day_link = quickElement('a', shortcuts, gettext('Today'), 'href', '#');
day_link.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.handleCalendarQuickLink(num, 0);
});
shortcuts.appendChild(document.createTextNode('\u00A0|\u00A0'));
day_link = quickElement('a', shortcuts, gettext('Tomorrow'), 'href', '#');
day_link.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.handleCalendarQuickLink(num, +1);
});
// cancel bar
const cancel_p = quickElement('p', cal_box);
cancel_p.className = 'calendar-cancel';
const cancel_link = quickElement('a', cancel_p, gettext('Cancel'), 'href', '#');
cancel_link.addEventListener('click', function(e) {
e.preventDefault();
DateTimeShortcuts.dismissCalendar(num);
});
document.addEventListener('keyup', function(event) {
if (event.which === 27) {
// ESC key closes popup
DateTimeShortcuts.dismissCalendar(num);
event.preventDefault();
}
});
},
openCalendar: function(num) {
const cal_box = document.getElementById(DateTimeShortcuts.calendarDivName1 + num);
const cal_link = document.getElementById(DateTimeShortcuts.calendarLinkName + num);
const inp = DateTimeShortcuts.calendarInputs[num];
// Determine if the current value in the input has a valid date.
// If so, draw the calendar with that date's year and month.
if (inp.value) {
const format = get_format('DATE_INPUT_FORMATS')[0];
const selected = inp.value.strptime(format);
const year = selected.getUTCFullYear();
const month = selected.getUTCMonth() + 1;
const re = /\d{4}/;
if (re.test(year.toString()) && month >= 1 && month <= 12) {
DateTimeShortcuts.calendars[num].drawDate(month, year, selected);
}
}
// Recalculate the clockbox position
// is it left-to-right or right-to-left layout ?
if (window.getComputedStyle(document.body).direction !== 'rtl') {
cal_box.style.left = findPosX(cal_link) + 17 + 'px';
}
else {
// since style's width is in em, it'd be tough to calculate
// px value of it. let's use an estimated px for now
cal_box.style.left = findPosX(cal_link) - 180 + 'px';
}
cal_box.style.top = Math.max(0, findPosY(cal_link) - 75) + 'px';
cal_box.style.display = 'block';
document.addEventListener('click', DateTimeShortcuts.dismissCalendarFunc[num]);
},
dismissCalendar: function(num) {
document.getElementById(DateTimeShortcuts.calendarDivName1 + num).style.display = 'none';
document.removeEventListener('click', DateTimeShortcuts.dismissCalendarFunc[num]);
},
drawPrev: function(num) {
DateTimeShortcuts.calendars[num].drawPreviousMonth();
},
drawNext: function(num) {
DateTimeShortcuts.calendars[num].drawNextMonth();
},
handleCalendarCallback: function(num) {
let format = get_format('DATE_INPUT_FORMATS')[0];
// the format needs to be escaped a little
format = format.replace('\\', '\\\\')
.replace('\r', '\\r')
.replace('\n', '\\n')
.replace('\t', '\\t')
.replace("'", "\\'");
return function(y, m, d) {
DateTimeShortcuts.calendarInputs[num].value = new Date(y, m - 1, d).strftime(format);
DateTimeShortcuts.calendarInputs[num].focus();
document.getElementById(DateTimeShortcuts.calendarDivName1 + num).style.display = 'none';
};
},
handleCalendarQuickLink: function(num, offset) {
const d = DateTimeShortcuts.now();
d.setDate(d.getDate() + offset);
DateTimeShortcuts.calendarInputs[num].value = d.strftime(get_format('DATE_INPUT_FORMATS')[0]);
DateTimeShortcuts.calendarInputs[num].focus();
DateTimeShortcuts.dismissCalendar(num);
}
};
window.addEventListener('load', DateTimeShortcuts.init);
window.DateTimeShortcuts = DateTimeShortcuts;
}
|
Django-locallibrary/LocalLibrary/staticfiles/admin/js/admin/DateTimeShortcuts.5548f99471bf.js/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/admin/DateTimeShortcuts.5548f99471bf.js",
"repo_id": "Django-locallibrary",
"token_count": 9480
}
| 5 |
'use strict';
{
// Call function fn when the DOM is loaded and ready. If it is already
// loaded, call the function now.
// http://youmightnotneedjquery.com/#ready
function ready(fn) {
if (document.readyState !== 'loading') {
fn();
} else {
document.addEventListener('DOMContentLoaded', fn);
}
}
ready(function() {
function handleClick(event) {
event.preventDefault();
const params = new URLSearchParams(window.location.search);
if (params.has('_popup')) {
window.close(); // Close the popup.
} else {
window.history.back(); // Otherwise, go back.
}
}
document.querySelectorAll('.cancel-link').forEach(function(el) {
el.addEventListener('click', handleClick);
});
});
}
|
Django-locallibrary/LocalLibrary/staticfiles/admin/js/cancel.ecc4c5ca7b32.js/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/cancel.ecc4c5ca7b32.js",
"repo_id": "Django-locallibrary",
"token_count": 402
}
| 6 |
/*global DateTimeShortcuts, SelectFilter*/
/**
* Django admin inlines
*
* Based on jQuery Formset 1.1
* @author Stanislaus Madueke (stan DOT madueke AT gmail DOT com)
* @requires jQuery 1.2.6 or later
*
* Copyright (c) 2009, Stanislaus Madueke
* All rights reserved.
*
* Spiced up with Code from Zain Memon's GSoC project 2009
* and modified for Django by Jannis Leidel, Travis Swicegood and Julien Phalip.
*
* Licensed under the New BSD License
* See: https://opensource.org/licenses/bsd-license.php
*/
'use strict';
{
const $ = django.jQuery;
$.fn.formset = function(opts) {
const options = $.extend({}, $.fn.formset.defaults, opts);
const $this = $(this);
const $parent = $this.parent();
const updateElementIndex = function(el, prefix, ndx) {
const id_regex = new RegExp("(" + prefix + "-(\\d+|__prefix__))");
const replacement = prefix + "-" + ndx;
if ($(el).prop("for")) {
$(el).prop("for", $(el).prop("for").replace(id_regex, replacement));
}
if (el.id) {
el.id = el.id.replace(id_regex, replacement);
}
if (el.name) {
el.name = el.name.replace(id_regex, replacement);
}
};
const totalForms = $("#id_" + options.prefix + "-TOTAL_FORMS").prop("autocomplete", "off");
let nextIndex = parseInt(totalForms.val(), 10);
const maxForms = $("#id_" + options.prefix + "-MAX_NUM_FORMS").prop("autocomplete", "off");
const minForms = $("#id_" + options.prefix + "-MIN_NUM_FORMS").prop("autocomplete", "off");
let addButton;
/**
* The "Add another MyModel" button below the inline forms.
*/
const addInlineAddButton = function() {
if (addButton === null) {
if ($this.prop("tagName") === "TR") {
// If forms are laid out as table rows, insert the
// "add" button in a new table row:
const numCols = $this.eq(-1).children().length;
$parent.append('<tr class="' + options.addCssClass + '"><td colspan="' + numCols + '"><a href="#">' + options.addText + "</a></tr>");
addButton = $parent.find("tr:last a");
} else {
// Otherwise, insert it immediately after the last form:
$this.filter(":last").after('<div class="' + options.addCssClass + '"><a href="#">' + options.addText + "</a></div>");
addButton = $this.filter(":last").next().find("a");
}
}
addButton.on('click', addInlineClickHandler);
};
const addInlineClickHandler = function(e) {
e.preventDefault();
const template = $("#" + options.prefix + "-empty");
const row = template.clone(true);
row.removeClass(options.emptyCssClass)
.addClass(options.formCssClass)
.attr("id", options.prefix + "-" + nextIndex);
addInlineDeleteButton(row);
row.find("*").each(function() {
updateElementIndex(this, options.prefix, totalForms.val());
});
// Insert the new form when it has been fully edited.
row.insertBefore($(template));
// Update number of total forms.
$(totalForms).val(parseInt(totalForms.val(), 10) + 1);
nextIndex += 1;
// Hide the add button if there's a limit and it's been reached.
if ((maxForms.val() !== '') && (maxForms.val() - totalForms.val()) <= 0) {
addButton.parent().hide();
}
// Show the remove buttons if there are more than min_num.
toggleDeleteButtonVisibility(row.closest('.inline-group'));
// Pass the new form to the post-add callback, if provided.
if (options.added) {
options.added(row);
}
$(document).trigger('formset:added', [row, options.prefix]);
};
/**
* The "X" button that is part of every unsaved inline.
* (When saved, it is replaced with a "Delete" checkbox.)
*/
const addInlineDeleteButton = function(row) {
if (row.is("tr")) {
// If the forms are laid out in table rows, insert
// the remove button into the last table cell:
row.children(":last").append('<div><a class="' + options.deleteCssClass + '" href="#">' + options.deleteText + "</a></div>");
} else if (row.is("ul") || row.is("ol")) {
// If they're laid out as an ordered/unordered list,
// insert an <li> after the last list item:
row.append('<li><a class="' + options.deleteCssClass + '" href="#">' + options.deleteText + "</a></li>");
} else {
// Otherwise, just insert the remove button as the
// last child element of the form's container:
row.children(":first").append('<span><a class="' + options.deleteCssClass + '" href="#">' + options.deleteText + "</a></span>");
}
// Add delete handler for each row.
row.find("a." + options.deleteCssClass).on('click', inlineDeleteHandler.bind(this));
};
const inlineDeleteHandler = function(e1) {
e1.preventDefault();
const deleteButton = $(e1.target);
const row = deleteButton.closest('.' + options.formCssClass);
const inlineGroup = row.closest('.inline-group');
// Remove the parent form containing this button,
// and also remove the relevant row with non-field errors:
const prevRow = row.prev();
if (prevRow.length && prevRow.hasClass('row-form-errors')) {
prevRow.remove();
}
row.remove();
nextIndex -= 1;
// Pass the deleted form to the post-delete callback, if provided.
if (options.removed) {
options.removed(row);
}
$(document).trigger('formset:removed', [row, options.prefix]);
// Update the TOTAL_FORMS form count.
const forms = $("." + options.formCssClass);
$("#id_" + options.prefix + "-TOTAL_FORMS").val(forms.length);
// Show add button again once below maximum number.
if ((maxForms.val() === '') || (maxForms.val() - forms.length) > 0) {
addButton.parent().show();
}
// Hide the remove buttons if at min_num.
toggleDeleteButtonVisibility(inlineGroup);
// Also, update names and ids for all remaining form controls so
// they remain in sequence:
let i, formCount;
const updateElementCallback = function() {
updateElementIndex(this, options.prefix, i);
};
for (i = 0, formCount = forms.length; i < formCount; i++) {
updateElementIndex($(forms).get(i), options.prefix, i);
$(forms.get(i)).find("*").each(updateElementCallback);
}
};
const toggleDeleteButtonVisibility = function(inlineGroup) {
if ((minForms.val() !== '') && (minForms.val() - totalForms.val()) >= 0) {
inlineGroup.find('.inline-deletelink').hide();
} else {
inlineGroup.find('.inline-deletelink').show();
}
};
$this.each(function(i) {
$(this).not("." + options.emptyCssClass).addClass(options.formCssClass);
});
// Create the delete buttons for all unsaved inlines:
$this.filter('.' + options.formCssClass + ':not(.has_original):not(.' + options.emptyCssClass + ')').each(function() {
addInlineDeleteButton($(this));
});
toggleDeleteButtonVisibility($this);
// Create the add button, initially hidden.
addButton = options.addButton;
addInlineAddButton();
// Show the add button if allowed to add more items.
// Note that max_num = None translates to a blank string.
const showAddButton = maxForms.val() === '' || (maxForms.val() - totalForms.val()) > 0;
if ($this.length && showAddButton) {
addButton.parent().show();
} else {
addButton.parent().hide();
}
return this;
};
/* Setup plugin defaults */
$.fn.formset.defaults = {
prefix: "form", // The form prefix for your django formset
addText: "add another", // Text for the add link
deleteText: "remove", // Text for the delete link
addCssClass: "add-row", // CSS class applied to the add link
deleteCssClass: "delete-row", // CSS class applied to the delete link
emptyCssClass: "empty-row", // CSS class applied to the empty row
formCssClass: "dynamic-form", // CSS class applied to each form in a formset
added: null, // Function called each time a new form is added
removed: null, // Function called each time a form is deleted
addButton: null // Existing add button to use
};
// Tabular inlines ---------------------------------------------------------
$.fn.tabularFormset = function(selector, options) {
const $rows = $(this);
const reinitDateTimeShortCuts = function() {
// Reinitialize the calendar and clock widgets by force
if (typeof DateTimeShortcuts !== "undefined") {
$(".datetimeshortcuts").remove();
DateTimeShortcuts.init();
}
};
const updateSelectFilter = function() {
// If any SelectFilter widgets are a part of the new form,
// instantiate a new SelectFilter instance for it.
if (typeof SelectFilter !== 'undefined') {
$('.selectfilter').each(function(index, value) {
const namearr = value.name.split('-');
SelectFilter.init(value.id, namearr[namearr.length - 1], false);
});
$('.selectfilterstacked').each(function(index, value) {
const namearr = value.name.split('-');
SelectFilter.init(value.id, namearr[namearr.length - 1], true);
});
}
};
const initPrepopulatedFields = function(row) {
row.find('.prepopulated_field').each(function() {
const field = $(this),
input = field.find('input, select, textarea'),
dependency_list = input.data('dependency_list') || [],
dependencies = [];
$.each(dependency_list, function(i, field_name) {
dependencies.push('#' + row.find('.field-' + field_name).find('input, select, textarea').attr('id'));
});
if (dependencies.length) {
input.prepopulate(dependencies, input.attr('maxlength'));
}
});
};
$rows.formset({
prefix: options.prefix,
addText: options.addText,
formCssClass: "dynamic-" + options.prefix,
deleteCssClass: "inline-deletelink",
deleteText: options.deleteText,
emptyCssClass: "empty-form",
added: function(row) {
initPrepopulatedFields(row);
reinitDateTimeShortCuts();
updateSelectFilter();
},
addButton: options.addButton
});
return $rows;
};
// Stacked inlines ---------------------------------------------------------
$.fn.stackedFormset = function(selector, options) {
const $rows = $(this);
const updateInlineLabel = function(row) {
$(selector).find(".inline_label").each(function(i) {
const count = i + 1;
$(this).html($(this).html().replace(/(#\d+)/g, "#" + count));
});
};
const reinitDateTimeShortCuts = function() {
// Reinitialize the calendar and clock widgets by force, yuck.
if (typeof DateTimeShortcuts !== "undefined") {
$(".datetimeshortcuts").remove();
DateTimeShortcuts.init();
}
};
const updateSelectFilter = function() {
// If any SelectFilter widgets were added, instantiate a new instance.
if (typeof SelectFilter !== "undefined") {
$(".selectfilter").each(function(index, value) {
const namearr = value.name.split('-');
SelectFilter.init(value.id, namearr[namearr.length - 1], false);
});
$(".selectfilterstacked").each(function(index, value) {
const namearr = value.name.split('-');
SelectFilter.init(value.id, namearr[namearr.length - 1], true);
});
}
};
const initPrepopulatedFields = function(row) {
row.find('.prepopulated_field').each(function() {
const field = $(this),
input = field.find('input, select, textarea'),
dependency_list = input.data('dependency_list') || [],
dependencies = [];
$.each(dependency_list, function(i, field_name) {
dependencies.push('#' + row.find('.form-row .field-' + field_name).find('input, select, textarea').attr('id'));
});
if (dependencies.length) {
input.prepopulate(dependencies, input.attr('maxlength'));
}
});
};
$rows.formset({
prefix: options.prefix,
addText: options.addText,
formCssClass: "dynamic-" + options.prefix,
deleteCssClass: "inline-deletelink",
deleteText: options.deleteText,
emptyCssClass: "empty-form",
removed: updateInlineLabel,
added: function(row) {
initPrepopulatedFields(row);
reinitDateTimeShortCuts();
updateSelectFilter();
updateInlineLabel(row);
},
addButton: options.addButton
});
return $rows;
};
$(document).ready(function() {
$(".js-inline-admin-formset").each(function() {
const data = $(this).data(),
inlineOptions = data.inlineFormset;
let selector;
switch(data.inlineType) {
case "stacked":
selector = inlineOptions.name + "-group .inline-related";
$(selector).stackedFormset(selector, inlineOptions.options);
break;
case "tabular":
selector = inlineOptions.name + "-group .tabular.inline-related tbody:first > tr.form-row";
$(selector).tabularFormset(selector, inlineOptions.options);
break;
}
});
});
}
|
Django-locallibrary/LocalLibrary/staticfiles/admin/js/inlines.7596b7fd289e.js/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/inlines.7596b7fd289e.js",
"repo_id": "Django-locallibrary",
"token_count": 7095
}
| 7 |
/*global URLify*/
'use strict';
{
const $ = django.jQuery;
$.fn.prepopulate = function(dependencies, maxLength, allowUnicode) {
/*
Depends on urlify.js
Populates a selected field with the values of the dependent fields,
URLifies and shortens the string.
dependencies - array of dependent fields ids
maxLength - maximum length of the URLify'd string
allowUnicode - Unicode support of the URLify'd string
*/
return this.each(function() {
const prepopulatedField = $(this);
const populate = function() {
// Bail if the field's value has been changed by the user
if (prepopulatedField.data('_changed')) {
return;
}
const values = [];
$.each(dependencies, function(i, field) {
field = $(field);
if (field.val().length > 0) {
values.push(field.val());
}
});
prepopulatedField.val(URLify(values.join(' '), maxLength, allowUnicode));
};
prepopulatedField.data('_changed', false);
prepopulatedField.on('change', function() {
prepopulatedField.data('_changed', true);
});
if (!prepopulatedField.val()) {
$(dependencies.join(',')).on('keyup change focus', populate);
}
});
};
}
|
Django-locallibrary/LocalLibrary/staticfiles/admin/js/prepopulate.bd2361dfd64d.js/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/prepopulate.bd2361dfd64d.js",
"repo_id": "Django-locallibrary",
"token_count": 759
}
| 8 |
/*!
* jQuery JavaScript Library v3.5.1
* https://jquery.com/
*
* Includes Sizzle.js
* https://sizzlejs.com/
*
* Copyright JS Foundation and other contributors
* Released under the MIT license
* https://jquery.org/license
*
* Date: 2020-05-04T22:49Z
*/
( function( global, factory ) {
"use strict";
if ( typeof module === "object" && typeof module.exports === "object" ) {
// For CommonJS and CommonJS-like environments where a proper `window`
// is present, execute the factory and get jQuery.
// For environments that do not have a `window` with a `document`
// (such as Node.js), expose a factory as module.exports.
// This accentuates the need for the creation of a real `window`.
// e.g. var jQuery = require("jquery")(window);
// See ticket #14549 for more info.
module.exports = global.document ?
factory( global, true ) :
function( w ) {
if ( !w.document ) {
throw new Error( "jQuery requires a window with a document" );
}
return factory( w );
};
} else {
factory( global );
}
// Pass this if window is not defined yet
} )( typeof window !== "undefined" ? window : this, function( window, noGlobal ) {
// Edge <= 12 - 13+, Firefox <=18 - 45+, IE 10 - 11, Safari 5.1 - 9+, iOS 6 - 9.1
// throw exceptions when non-strict code (e.g., ASP.NET 4.5) accesses strict mode
// arguments.callee.caller (trac-13335). But as of jQuery 3.0 (2016), strict mode should be common
// enough that all such attempts are guarded in a try block.
"use strict";
var arr = [];
var getProto = Object.getPrototypeOf;
var slice = arr.slice;
var flat = arr.flat ? function( array ) {
return arr.flat.call( array );
} : function( array ) {
return arr.concat.apply( [], array );
};
var push = arr.push;
var indexOf = arr.indexOf;
var class2type = {};
var toString = class2type.toString;
var hasOwn = class2type.hasOwnProperty;
var fnToString = hasOwn.toString;
var ObjectFunctionString = fnToString.call( Object );
var support = {};
var isFunction = function isFunction( obj ) {
// Support: Chrome <=57, Firefox <=52
// In some browsers, typeof returns "function" for HTML <object> elements
// (i.e., `typeof document.createElement( "object" ) === "function"`).
// We don't want to classify *any* DOM node as a function.
return typeof obj === "function" && typeof obj.nodeType !== "number";
};
var isWindow = function isWindow( obj ) {
return obj != null && obj === obj.window;
};
var document = window.document;
var preservedScriptAttributes = {
type: true,
src: true,
nonce: true,
noModule: true
};
function DOMEval( code, node, doc ) {
doc = doc || document;
var i, val,
script = doc.createElement( "script" );
script.text = code;
if ( node ) {
for ( i in preservedScriptAttributes ) {
// Support: Firefox 64+, Edge 18+
// Some browsers don't support the "nonce" property on scripts.
// On the other hand, just using `getAttribute` is not enough as
// the `nonce` attribute is reset to an empty string whenever it
// becomes browsing-context connected.
// See https://github.com/whatwg/html/issues/2369
// See https://html.spec.whatwg.org/#nonce-attributes
// The `node.getAttribute` check was added for the sake of
// `jQuery.globalEval` so that it can fake a nonce-containing node
// via an object.
val = node[ i ] || node.getAttribute && node.getAttribute( i );
if ( val ) {
script.setAttribute( i, val );
}
}
}
doc.head.appendChild( script ).parentNode.removeChild( script );
}
function toType( obj ) {
if ( obj == null ) {
return obj + "";
}
// Support: Android <=2.3 only (functionish RegExp)
return typeof obj === "object" || typeof obj === "function" ?
class2type[ toString.call( obj ) ] || "object" :
typeof obj;
}
/* global Symbol */
// Defining this global in .eslintrc.json would create a danger of using the global
// unguarded in another place, it seems safer to define global only for this module
var
version = "3.5.1",
// Define a local copy of jQuery
jQuery = function( selector, context ) {
// The jQuery object is actually just the init constructor 'enhanced'
// Need init if jQuery is called (just allow error to be thrown if not included)
return new jQuery.fn.init( selector, context );
};
jQuery.fn = jQuery.prototype = {
// The current version of jQuery being used
jquery: version,
constructor: jQuery,
// The default length of a jQuery object is 0
length: 0,
toArray: function() {
return slice.call( this );
},
// Get the Nth element in the matched element set OR
// Get the whole matched element set as a clean array
get: function( num ) {
// Return all the elements in a clean array
if ( num == null ) {
return slice.call( this );
}
// Return just the one element from the set
return num < 0 ? this[ num + this.length ] : this[ num ];
},
// Take an array of elements and push it onto the stack
// (returning the new matched element set)
pushStack: function( elems ) {
// Build a new jQuery matched element set
var ret = jQuery.merge( this.constructor(), elems );
// Add the old object onto the stack (as a reference)
ret.prevObject = this;
// Return the newly-formed element set
return ret;
},
// Execute a callback for every element in the matched set.
each: function( callback ) {
return jQuery.each( this, callback );
},
map: function( callback ) {
return this.pushStack( jQuery.map( this, function( elem, i ) {
return callback.call( elem, i, elem );
} ) );
},
slice: function() {
return this.pushStack( slice.apply( this, arguments ) );
},
first: function() {
return this.eq( 0 );
},
last: function() {
return this.eq( -1 );
},
even: function() {
return this.pushStack( jQuery.grep( this, function( _elem, i ) {
return ( i + 1 ) % 2;
} ) );
},
odd: function() {
return this.pushStack( jQuery.grep( this, function( _elem, i ) {
return i % 2;
} ) );
},
eq: function( i ) {
var len = this.length,
j = +i + ( i < 0 ? len : 0 );
return this.pushStack( j >= 0 && j < len ? [ this[ j ] ] : [] );
},
end: function() {
return this.prevObject || this.constructor();
},
// For internal use only.
// Behaves like an Array's method, not like a jQuery method.
push: push,
sort: arr.sort,
splice: arr.splice
};
jQuery.extend = jQuery.fn.extend = function() {
var options, name, src, copy, copyIsArray, clone,
target = arguments[ 0 ] || {},
i = 1,
length = arguments.length,
deep = false;
// Handle a deep copy situation
if ( typeof target === "boolean" ) {
deep = target;
// Skip the boolean and the target
target = arguments[ i ] || {};
i++;
}
// Handle case when target is a string or something (possible in deep copy)
if ( typeof target !== "object" && !isFunction( target ) ) {
target = {};
}
// Extend jQuery itself if only one argument is passed
if ( i === length ) {
target = this;
i--;
}
for ( ; i < length; i++ ) {
// Only deal with non-null/undefined values
if ( ( options = arguments[ i ] ) != null ) {
// Extend the base object
for ( name in options ) {
copy = options[ name ];
// Prevent Object.prototype pollution
// Prevent never-ending loop
if ( name === "__proto__" || target === copy ) {
continue;
}
// Recurse if we're merging plain objects or arrays
if ( deep && copy && ( jQuery.isPlainObject( copy ) ||
( copyIsArray = Array.isArray( copy ) ) ) ) {
src = target[ name ];
// Ensure proper type for the source value
if ( copyIsArray && !Array.isArray( src ) ) {
clone = [];
} else if ( !copyIsArray && !jQuery.isPlainObject( src ) ) {
clone = {};
} else {
clone = src;
}
copyIsArray = false;
// Never move original objects, clone them
target[ name ] = jQuery.extend( deep, clone, copy );
// Don't bring in undefined values
} else if ( copy !== undefined ) {
target[ name ] = copy;
}
}
}
}
// Return the modified object
return target;
};
jQuery.extend( {
// Unique for each copy of jQuery on the page
expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ),
// Assume jQuery is ready without the ready module
isReady: true,
error: function( msg ) {
throw new Error( msg );
},
noop: function() {},
isPlainObject: function( obj ) {
var proto, Ctor;
// Detect obvious negatives
// Use toString instead of jQuery.type to catch host objects
if ( !obj || toString.call( obj ) !== "[object Object]" ) {
return false;
}
proto = getProto( obj );
// Objects with no prototype (e.g., `Object.create( null )`) are plain
if ( !proto ) {
return true;
}
// Objects with prototype are plain iff they were constructed by a global Object function
Ctor = hasOwn.call( proto, "constructor" ) && proto.constructor;
return typeof Ctor === "function" && fnToString.call( Ctor ) === ObjectFunctionString;
},
isEmptyObject: function( obj ) {
var name;
for ( name in obj ) {
return false;
}
return true;
},
// Evaluates a script in a provided context; falls back to the global one
// if not specified.
globalEval: function( code, options, doc ) {
DOMEval( code, { nonce: options && options.nonce }, doc );
},
each: function( obj, callback ) {
var length, i = 0;
if ( isArrayLike( obj ) ) {
length = obj.length;
for ( ; i < length; i++ ) {
if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
break;
}
}
} else {
for ( i in obj ) {
if ( callback.call( obj[ i ], i, obj[ i ] ) === false ) {
break;
}
}
}
return obj;
},
// results is for internal usage only
makeArray: function( arr, results ) {
var ret = results || [];
if ( arr != null ) {
if ( isArrayLike( Object( arr ) ) ) {
jQuery.merge( ret,
typeof arr === "string" ?
[ arr ] : arr
);
} else {
push.call( ret, arr );
}
}
return ret;
},
inArray: function( elem, arr, i ) {
return arr == null ? -1 : indexOf.call( arr, elem, i );
},
// Support: Android <=4.0 only, PhantomJS 1 only
// push.apply(_, arraylike) throws on ancient WebKit
merge: function( first, second ) {
var len = +second.length,
j = 0,
i = first.length;
for ( ; j < len; j++ ) {
first[ i++ ] = second[ j ];
}
first.length = i;
return first;
},
grep: function( elems, callback, invert ) {
var callbackInverse,
matches = [],
i = 0,
length = elems.length,
callbackExpect = !invert;
// Go through the array, only saving the items
// that pass the validator function
for ( ; i < length; i++ ) {
callbackInverse = !callback( elems[ i ], i );
if ( callbackInverse !== callbackExpect ) {
matches.push( elems[ i ] );
}
}
return matches;
},
// arg is for internal usage only
map: function( elems, callback, arg ) {
var length, value,
i = 0,
ret = [];
// Go through the array, translating each of the items to their new values
if ( isArrayLike( elems ) ) {
length = elems.length;
for ( ; i < length; i++ ) {
value = callback( elems[ i ], i, arg );
if ( value != null ) {
ret.push( value );
}
}
// Go through every key on the object,
} else {
for ( i in elems ) {
value = callback( elems[ i ], i, arg );
if ( value != null ) {
ret.push( value );
}
}
}
// Flatten any nested arrays
return flat( ret );
},
// A global GUID counter for objects
guid: 1,
// jQuery.support is not used in Core but other projects attach their
// properties to it so it needs to exist.
support: support
} );
if ( typeof Symbol === "function" ) {
jQuery.fn[ Symbol.iterator ] = arr[ Symbol.iterator ];
}
// Populate the class2type map
jQuery.each( "Boolean Number String Function Array Date RegExp Object Error Symbol".split( " " ),
function( _i, name ) {
class2type[ "[object " + name + "]" ] = name.toLowerCase();
} );
function isArrayLike( obj ) {
// Support: real iOS 8.2 only (not reproducible in simulator)
// `in` check used to prevent JIT error (gh-2145)
// hasOwn isn't used here due to false negatives
// regarding Nodelist length in IE
var length = !!obj && "length" in obj && obj.length,
type = toType( obj );
if ( isFunction( obj ) || isWindow( obj ) ) {
return false;
}
return type === "array" || length === 0 ||
typeof length === "number" && length > 0 && ( length - 1 ) in obj;
}
var Sizzle =
/*!
* Sizzle CSS Selector Engine v2.3.5
* https://sizzlejs.com/
*
* Copyright JS Foundation and other contributors
* Released under the MIT license
* https://js.foundation/
*
* Date: 2020-03-14
*/
( function( window ) {
var i,
support,
Expr,
getText,
isXML,
tokenize,
compile,
select,
outermostContext,
sortInput,
hasDuplicate,
// Local document vars
setDocument,
document,
docElem,
documentIsHTML,
rbuggyQSA,
rbuggyMatches,
matches,
contains,
// Instance-specific data
expando = "sizzle" + 1 * new Date(),
preferredDoc = window.document,
dirruns = 0,
done = 0,
classCache = createCache(),
tokenCache = createCache(),
compilerCache = createCache(),
nonnativeSelectorCache = createCache(),
sortOrder = function( a, b ) {
if ( a === b ) {
hasDuplicate = true;
}
return 0;
},
// Instance methods
hasOwn = ( {} ).hasOwnProperty,
arr = [],
pop = arr.pop,
pushNative = arr.push,
push = arr.push,
slice = arr.slice,
// Use a stripped-down indexOf as it's faster than native
// https://jsperf.com/thor-indexof-vs-for/5
indexOf = function( list, elem ) {
var i = 0,
len = list.length;
for ( ; i < len; i++ ) {
if ( list[ i ] === elem ) {
return i;
}
}
return -1;
},
booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|" +
"ismap|loop|multiple|open|readonly|required|scoped",
// Regular expressions
// http://www.w3.org/TR/css3-selectors/#whitespace
whitespace = "[\\x20\\t\\r\\n\\f]",
// https://www.w3.org/TR/css-syntax-3/#ident-token-diagram
identifier = "(?:\\\\[\\da-fA-F]{1,6}" + whitespace +
"?|\\\\[^\\r\\n\\f]|[\\w-]|[^\0-\\x7f])+",
// Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors
attributes = "\\[" + whitespace + "*(" + identifier + ")(?:" + whitespace +
// Operator (capture 2)
"*([*^$|!~]?=)" + whitespace +
// "Attribute values must be CSS identifiers [capture 5]
// or strings [capture 3 or capture 4]"
"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" +
whitespace + "*\\]",
pseudos = ":(" + identifier + ")(?:\\((" +
// To reduce the number of selectors needing tokenize in the preFilter, prefer arguments:
// 1. quoted (capture 3; capture 4 or capture 5)
"('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" +
// 2. simple (capture 6)
"((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" +
// 3. anything else (capture 2)
".*" +
")\\)|)",
// Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter
rwhitespace = new RegExp( whitespace + "+", "g" ),
rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" +
whitespace + "+$", "g" ),
rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ),
rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace +
"*" ),
rdescend = new RegExp( whitespace + "|>" ),
rpseudo = new RegExp( pseudos ),
ridentifier = new RegExp( "^" + identifier + "$" ),
matchExpr = {
"ID": new RegExp( "^#(" + identifier + ")" ),
"CLASS": new RegExp( "^\\.(" + identifier + ")" ),
"TAG": new RegExp( "^(" + identifier + "|[*])" ),
"ATTR": new RegExp( "^" + attributes ),
"PSEUDO": new RegExp( "^" + pseudos ),
"CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" +
whitespace + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" +
whitespace + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ),
"bool": new RegExp( "^(?:" + booleans + ")$", "i" ),
// For use in libraries implementing .is()
// We use this for POS matching in `select`
"needsContext": new RegExp( "^" + whitespace +
"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace +
"*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" )
},
rhtml = /HTML$/i,
rinputs = /^(?:input|select|textarea|button)$/i,
rheader = /^h\d$/i,
rnative = /^[^{]+\{\s*\[native \w/,
// Easily-parseable/retrievable ID or TAG or CLASS selectors
rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,
rsibling = /[+~]/,
// CSS escapes
// http://www.w3.org/TR/CSS21/syndata.html#escaped-characters
runescape = new RegExp( "\\\\[\\da-fA-F]{1,6}" + whitespace + "?|\\\\([^\\r\\n\\f])", "g" ),
funescape = function( escape, nonHex ) {
var high = "0x" + escape.slice( 1 ) - 0x10000;
return nonHex ?
// Strip the backslash prefix from a non-hex escape sequence
nonHex :
// Replace a hexadecimal escape sequence with the encoded Unicode code point
// Support: IE <=11+
// For values outside the Basic Multilingual Plane (BMP), manually construct a
// surrogate pair
high < 0 ?
String.fromCharCode( high + 0x10000 ) :
String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 );
},
// CSS string/identifier serialization
// https://drafts.csswg.org/cssom/#common-serializing-idioms
rcssescape = /([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,
fcssescape = function( ch, asCodePoint ) {
if ( asCodePoint ) {
// U+0000 NULL becomes U+FFFD REPLACEMENT CHARACTER
if ( ch === "\0" ) {
return "\uFFFD";
}
// Control characters and (dependent upon position) numbers get escaped as code points
return ch.slice( 0, -1 ) + "\\" +
ch.charCodeAt( ch.length - 1 ).toString( 16 ) + " ";
}
// Other potentially-special ASCII characters get backslash-escaped
return "\\" + ch;
},
// Used for iframes
// See setDocument()
// Removing the function wrapper causes a "Permission Denied"
// error in IE
unloadHandler = function() {
setDocument();
},
inDisabledFieldset = addCombinator(
function( elem ) {
return elem.disabled === true && elem.nodeName.toLowerCase() === "fieldset";
},
{ dir: "parentNode", next: "legend" }
);
// Optimize for push.apply( _, NodeList )
try {
push.apply(
( arr = slice.call( preferredDoc.childNodes ) ),
preferredDoc.childNodes
);
// Support: Android<4.0
// Detect silently failing push.apply
// eslint-disable-next-line no-unused-expressions
arr[ preferredDoc.childNodes.length ].nodeType;
} catch ( e ) {
push = { apply: arr.length ?
// Leverage slice if possible
function( target, els ) {
pushNative.apply( target, slice.call( els ) );
} :
// Support: IE<9
// Otherwise append directly
function( target, els ) {
var j = target.length,
i = 0;
// Can't trust NodeList.length
while ( ( target[ j++ ] = els[ i++ ] ) ) {}
target.length = j - 1;
}
};
}
function Sizzle( selector, context, results, seed ) {
var m, i, elem, nid, match, groups, newSelector,
newContext = context && context.ownerDocument,
// nodeType defaults to 9, since context defaults to document
nodeType = context ? context.nodeType : 9;
results = results || [];
// Return early from calls with invalid selector or context
if ( typeof selector !== "string" || !selector ||
nodeType !== 1 && nodeType !== 9 && nodeType !== 11 ) {
return results;
}
// Try to shortcut find operations (as opposed to filters) in HTML documents
if ( !seed ) {
setDocument( context );
context = context || document;
if ( documentIsHTML ) {
// If the selector is sufficiently simple, try using a "get*By*" DOM method
// (excepting DocumentFragment context, where the methods don't exist)
if ( nodeType !== 11 && ( match = rquickExpr.exec( selector ) ) ) {
// ID selector
if ( ( m = match[ 1 ] ) ) {
// Document context
if ( nodeType === 9 ) {
if ( ( elem = context.getElementById( m ) ) ) {
// Support: IE, Opera, Webkit
// TODO: identify versions
// getElementById can match elements by name instead of ID
if ( elem.id === m ) {
results.push( elem );
return results;
}
} else {
return results;
}
// Element context
} else {
// Support: IE, Opera, Webkit
// TODO: identify versions
// getElementById can match elements by name instead of ID
if ( newContext && ( elem = newContext.getElementById( m ) ) &&
contains( context, elem ) &&
elem.id === m ) {
results.push( elem );
return results;
}
}
// Type selector
} else if ( match[ 2 ] ) {
push.apply( results, context.getElementsByTagName( selector ) );
return results;
// Class selector
} else if ( ( m = match[ 3 ] ) && support.getElementsByClassName &&
context.getElementsByClassName ) {
push.apply( results, context.getElementsByClassName( m ) );
return results;
}
}
// Take advantage of querySelectorAll
if ( support.qsa &&
!nonnativeSelectorCache[ selector + " " ] &&
( !rbuggyQSA || !rbuggyQSA.test( selector ) ) &&
// Support: IE 8 only
// Exclude object elements
( nodeType !== 1 || context.nodeName.toLowerCase() !== "object" ) ) {
newSelector = selector;
newContext = context;
// qSA considers elements outside a scoping root when evaluating child or
// descendant combinators, which is not what we want.
// In such cases, we work around the behavior by prefixing every selector in the
// list with an ID selector referencing the scope context.
// The technique has to be used as well when a leading combinator is used
// as such selectors are not recognized by querySelectorAll.
// Thanks to Andrew Dupont for this technique.
if ( nodeType === 1 &&
( rdescend.test( selector ) || rcombinators.test( selector ) ) ) {
// Expand context for sibling selectors
newContext = rsibling.test( selector ) && testContext( context.parentNode ) ||
context;
// We can use :scope instead of the ID hack if the browser
// supports it & if we're not changing the context.
if ( newContext !== context || !support.scope ) {
// Capture the context ID, setting it first if necessary
if ( ( nid = context.getAttribute( "id" ) ) ) {
nid = nid.replace( rcssescape, fcssescape );
} else {
context.setAttribute( "id", ( nid = expando ) );
}
}
// Prefix every selector in the list
groups = tokenize( selector );
i = groups.length;
while ( i-- ) {
groups[ i ] = ( nid ? "#" + nid : ":scope" ) + " " +
toSelector( groups[ i ] );
}
newSelector = groups.join( "," );
}
try {
push.apply( results,
newContext.querySelectorAll( newSelector )
);
return results;
} catch ( qsaError ) {
nonnativeSelectorCache( selector, true );
} finally {
if ( nid === expando ) {
context.removeAttribute( "id" );
}
}
}
}
}
// All others
return select( selector.replace( rtrim, "$1" ), context, results, seed );
}
/**
* Create key-value caches of limited size
* @returns {function(string, object)} Returns the Object data after storing it on itself with
* property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength)
* deleting the oldest entry
*/
function createCache() {
var keys = [];
function cache( key, value ) {
// Use (key + " ") to avoid collision with native prototype properties (see Issue #157)
if ( keys.push( key + " " ) > Expr.cacheLength ) {
// Only keep the most recent entries
delete cache[ keys.shift() ];
}
return ( cache[ key + " " ] = value );
}
return cache;
}
/**
* Mark a function for special use by Sizzle
* @param {Function} fn The function to mark
*/
function markFunction( fn ) {
fn[ expando ] = true;
return fn;
}
/**
* Support testing using an element
* @param {Function} fn Passed the created element and returns a boolean result
*/
function assert( fn ) {
var el = document.createElement( "fieldset" );
try {
return !!fn( el );
} catch ( e ) {
return false;
} finally {
// Remove from its parent by default
if ( el.parentNode ) {
el.parentNode.removeChild( el );
}
// release memory in IE
el = null;
}
}
/**
* Adds the same handler for all of the specified attrs
* @param {String} attrs Pipe-separated list of attributes
* @param {Function} handler The method that will be applied
*/
function addHandle( attrs, handler ) {
var arr = attrs.split( "|" ),
i = arr.length;
while ( i-- ) {
Expr.attrHandle[ arr[ i ] ] = handler;
}
}
/**
* Checks document order of two siblings
* @param {Element} a
* @param {Element} b
* @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b
*/
function siblingCheck( a, b ) {
var cur = b && a,
diff = cur && a.nodeType === 1 && b.nodeType === 1 &&
a.sourceIndex - b.sourceIndex;
// Use IE sourceIndex if available on both nodes
if ( diff ) {
return diff;
}
// Check if b follows a
if ( cur ) {
while ( ( cur = cur.nextSibling ) ) {
if ( cur === b ) {
return -1;
}
}
}
return a ? 1 : -1;
}
/**
* Returns a function to use in pseudos for input types
* @param {String} type
*/
function createInputPseudo( type ) {
return function( elem ) {
var name = elem.nodeName.toLowerCase();
return name === "input" && elem.type === type;
};
}
/**
* Returns a function to use in pseudos for buttons
* @param {String} type
*/
function createButtonPseudo( type ) {
return function( elem ) {
var name = elem.nodeName.toLowerCase();
return ( name === "input" || name === "button" ) && elem.type === type;
};
}
/**
* Returns a function to use in pseudos for :enabled/:disabled
* @param {Boolean} disabled true for :disabled; false for :enabled
*/
function createDisabledPseudo( disabled ) {
// Known :disabled false positives: fieldset[disabled] > legend:nth-of-type(n+2) :can-disable
return function( elem ) {
// Only certain elements can match :enabled or :disabled
// https://html.spec.whatwg.org/multipage/scripting.html#selector-enabled
// https://html.spec.whatwg.org/multipage/scripting.html#selector-disabled
if ( "form" in elem ) {
// Check for inherited disabledness on relevant non-disabled elements:
// * listed form-associated elements in a disabled fieldset
// https://html.spec.whatwg.org/multipage/forms.html#category-listed
// https://html.spec.whatwg.org/multipage/forms.html#concept-fe-disabled
// * option elements in a disabled optgroup
// https://html.spec.whatwg.org/multipage/forms.html#concept-option-disabled
// All such elements have a "form" property.
if ( elem.parentNode && elem.disabled === false ) {
// Option elements defer to a parent optgroup if present
if ( "label" in elem ) {
if ( "label" in elem.parentNode ) {
return elem.parentNode.disabled === disabled;
} else {
return elem.disabled === disabled;
}
}
// Support: IE 6 - 11
// Use the isDisabled shortcut property to check for disabled fieldset ancestors
return elem.isDisabled === disabled ||
// Where there is no isDisabled, check manually
/* jshint -W018 */
elem.isDisabled !== !disabled &&
inDisabledFieldset( elem ) === disabled;
}
return elem.disabled === disabled;
// Try to winnow out elements that can't be disabled before trusting the disabled property.
// Some victims get caught in our net (label, legend, menu, track), but it shouldn't
// even exist on them, let alone have a boolean value.
} else if ( "label" in elem ) {
return elem.disabled === disabled;
}
// Remaining elements are neither :enabled nor :disabled
return false;
};
}
/**
* Returns a function to use in pseudos for positionals
* @param {Function} fn
*/
function createPositionalPseudo( fn ) {
return markFunction( function( argument ) {
argument = +argument;
return markFunction( function( seed, matches ) {
var j,
matchIndexes = fn( [], seed.length, argument ),
i = matchIndexes.length;
// Match elements found at the specified indexes
while ( i-- ) {
if ( seed[ ( j = matchIndexes[ i ] ) ] ) {
seed[ j ] = !( matches[ j ] = seed[ j ] );
}
}
} );
} );
}
/**
* Checks a node for validity as a Sizzle context
* @param {Element|Object=} context
* @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value
*/
function testContext( context ) {
return context && typeof context.getElementsByTagName !== "undefined" && context;
}
// Expose support vars for convenience
support = Sizzle.support = {};
/**
* Detects XML nodes
* @param {Element|Object} elem An element or a document
* @returns {Boolean} True iff elem is a non-HTML XML node
*/
isXML = Sizzle.isXML = function( elem ) {
var namespace = elem.namespaceURI,
docElem = ( elem.ownerDocument || elem ).documentElement;
// Support: IE <=8
// Assume HTML when documentElement doesn't yet exist, such as inside loading iframes
// https://bugs.jquery.com/ticket/4833
return !rhtml.test( namespace || docElem && docElem.nodeName || "HTML" );
};
/**
* Sets document-related variables once based on the current document
* @param {Element|Object} [doc] An element or document object to use to set the document
* @returns {Object} Returns the current document
*/
setDocument = Sizzle.setDocument = function( node ) {
var hasCompare, subWindow,
doc = node ? node.ownerDocument || node : preferredDoc;
// Return early if doc is invalid or already selected
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
// eslint-disable-next-line eqeqeq
if ( doc == document || doc.nodeType !== 9 || !doc.documentElement ) {
return document;
}
// Update global variables
document = doc;
docElem = document.documentElement;
documentIsHTML = !isXML( document );
// Support: IE 9 - 11+, Edge 12 - 18+
// Accessing iframe documents after unload throws "permission denied" errors (jQuery #13936)
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
// eslint-disable-next-line eqeqeq
if ( preferredDoc != document &&
( subWindow = document.defaultView ) && subWindow.top !== subWindow ) {
// Support: IE 11, Edge
if ( subWindow.addEventListener ) {
subWindow.addEventListener( "unload", unloadHandler, false );
// Support: IE 9 - 10 only
} else if ( subWindow.attachEvent ) {
subWindow.attachEvent( "onunload", unloadHandler );
}
}
// Support: IE 8 - 11+, Edge 12 - 18+, Chrome <=16 - 25 only, Firefox <=3.6 - 31 only,
// Safari 4 - 5 only, Opera <=11.6 - 12.x only
// IE/Edge & older browsers don't support the :scope pseudo-class.
// Support: Safari 6.0 only
// Safari 6.0 supports :scope but it's an alias of :root there.
support.scope = assert( function( el ) {
docElem.appendChild( el ).appendChild( document.createElement( "div" ) );
return typeof el.querySelectorAll !== "undefined" &&
!el.querySelectorAll( ":scope fieldset div" ).length;
} );
/* Attributes
---------------------------------------------------------------------- */
// Support: IE<8
// Verify that getAttribute really returns attributes and not properties
// (excepting IE8 booleans)
support.attributes = assert( function( el ) {
el.className = "i";
return !el.getAttribute( "className" );
} );
/* getElement(s)By*
---------------------------------------------------------------------- */
// Check if getElementsByTagName("*") returns only elements
support.getElementsByTagName = assert( function( el ) {
el.appendChild( document.createComment( "" ) );
return !el.getElementsByTagName( "*" ).length;
} );
// Support: IE<9
support.getElementsByClassName = rnative.test( document.getElementsByClassName );
// Support: IE<10
// Check if getElementById returns elements by name
// The broken getElementById methods don't pick up programmatically-set names,
// so use a roundabout getElementsByName test
support.getById = assert( function( el ) {
docElem.appendChild( el ).id = expando;
return !document.getElementsByName || !document.getElementsByName( expando ).length;
} );
// ID filter and find
if ( support.getById ) {
Expr.filter[ "ID" ] = function( id ) {
var attrId = id.replace( runescape, funescape );
return function( elem ) {
return elem.getAttribute( "id" ) === attrId;
};
};
Expr.find[ "ID" ] = function( id, context ) {
if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
var elem = context.getElementById( id );
return elem ? [ elem ] : [];
}
};
} else {
Expr.filter[ "ID" ] = function( id ) {
var attrId = id.replace( runescape, funescape );
return function( elem ) {
var node = typeof elem.getAttributeNode !== "undefined" &&
elem.getAttributeNode( "id" );
return node && node.value === attrId;
};
};
// Support: IE 6 - 7 only
// getElementById is not reliable as a find shortcut
Expr.find[ "ID" ] = function( id, context ) {
if ( typeof context.getElementById !== "undefined" && documentIsHTML ) {
var node, i, elems,
elem = context.getElementById( id );
if ( elem ) {
// Verify the id attribute
node = elem.getAttributeNode( "id" );
if ( node && node.value === id ) {
return [ elem ];
}
// Fall back on getElementsByName
elems = context.getElementsByName( id );
i = 0;
while ( ( elem = elems[ i++ ] ) ) {
node = elem.getAttributeNode( "id" );
if ( node && node.value === id ) {
return [ elem ];
}
}
}
return [];
}
};
}
// Tag
Expr.find[ "TAG" ] = support.getElementsByTagName ?
function( tag, context ) {
if ( typeof context.getElementsByTagName !== "undefined" ) {
return context.getElementsByTagName( tag );
// DocumentFragment nodes don't have gEBTN
} else if ( support.qsa ) {
return context.querySelectorAll( tag );
}
} :
function( tag, context ) {
var elem,
tmp = [],
i = 0,
// By happy coincidence, a (broken) gEBTN appears on DocumentFragment nodes too
results = context.getElementsByTagName( tag );
// Filter out possible comments
if ( tag === "*" ) {
while ( ( elem = results[ i++ ] ) ) {
if ( elem.nodeType === 1 ) {
tmp.push( elem );
}
}
return tmp;
}
return results;
};
// Class
Expr.find[ "CLASS" ] = support.getElementsByClassName && function( className, context ) {
if ( typeof context.getElementsByClassName !== "undefined" && documentIsHTML ) {
return context.getElementsByClassName( className );
}
};
/* QSA/matchesSelector
---------------------------------------------------------------------- */
// QSA and matchesSelector support
// matchesSelector(:active) reports false when true (IE9/Opera 11.5)
rbuggyMatches = [];
// qSa(:focus) reports false when true (Chrome 21)
// We allow this because of a bug in IE8/9 that throws an error
// whenever `document.activeElement` is accessed on an iframe
// So, we allow :focus to pass through QSA all the time to avoid the IE error
// See https://bugs.jquery.com/ticket/13378
rbuggyQSA = [];
if ( ( support.qsa = rnative.test( document.querySelectorAll ) ) ) {
// Build QSA regex
// Regex strategy adopted from Diego Perini
assert( function( el ) {
var input;
// Select is set to empty string on purpose
// This is to test IE's treatment of not explicitly
// setting a boolean content attribute,
// since its presence should be enough
// https://bugs.jquery.com/ticket/12359
docElem.appendChild( el ).innerHTML = "<a id='" + expando + "'></a>" +
"<select id='" + expando + "-\r\\' msallowcapture=''>" +
"<option selected=''></option></select>";
// Support: IE8, Opera 11-12.16
// Nothing should be selected when empty strings follow ^= or $= or *=
// The test attribute must be unknown in Opera but "safe" for WinRT
// https://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section
if ( el.querySelectorAll( "[msallowcapture^='']" ).length ) {
rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" );
}
// Support: IE8
// Boolean attributes and "value" are not treated correctly
if ( !el.querySelectorAll( "[selected]" ).length ) {
rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" );
}
// Support: Chrome<29, Android<4.4, Safari<7.0+, iOS<7.0+, PhantomJS<1.9.8+
if ( !el.querySelectorAll( "[id~=" + expando + "-]" ).length ) {
rbuggyQSA.push( "~=" );
}
// Support: IE 11+, Edge 15 - 18+
// IE 11/Edge don't find elements on a `[name='']` query in some cases.
// Adding a temporary attribute to the document before the selection works
// around the issue.
// Interestingly, IE 10 & older don't seem to have the issue.
input = document.createElement( "input" );
input.setAttribute( "name", "" );
el.appendChild( input );
if ( !el.querySelectorAll( "[name='']" ).length ) {
rbuggyQSA.push( "\\[" + whitespace + "*name" + whitespace + "*=" +
whitespace + "*(?:''|\"\")" );
}
// Webkit/Opera - :checked should return selected option elements
// http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
// IE8 throws error here and will not see later tests
if ( !el.querySelectorAll( ":checked" ).length ) {
rbuggyQSA.push( ":checked" );
}
// Support: Safari 8+, iOS 8+
// https://bugs.webkit.org/show_bug.cgi?id=136851
// In-page `selector#id sibling-combinator selector` fails
if ( !el.querySelectorAll( "a#" + expando + "+*" ).length ) {
rbuggyQSA.push( ".#.+[+~]" );
}
// Support: Firefox <=3.6 - 5 only
// Old Firefox doesn't throw on a badly-escaped identifier.
el.querySelectorAll( "\\\f" );
rbuggyQSA.push( "[\\r\\n\\f]" );
} );
assert( function( el ) {
el.innerHTML = "<a href='' disabled='disabled'></a>" +
"<select disabled='disabled'><option/></select>";
// Support: Windows 8 Native Apps
// The type and name attributes are restricted during .innerHTML assignment
var input = document.createElement( "input" );
input.setAttribute( "type", "hidden" );
el.appendChild( input ).setAttribute( "name", "D" );
// Support: IE8
// Enforce case-sensitivity of name attribute
if ( el.querySelectorAll( "[name=d]" ).length ) {
rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" );
}
// FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled)
// IE8 throws error here and will not see later tests
if ( el.querySelectorAll( ":enabled" ).length !== 2 ) {
rbuggyQSA.push( ":enabled", ":disabled" );
}
// Support: IE9-11+
// IE's :disabled selector does not pick up the children of disabled fieldsets
docElem.appendChild( el ).disabled = true;
if ( el.querySelectorAll( ":disabled" ).length !== 2 ) {
rbuggyQSA.push( ":enabled", ":disabled" );
}
// Support: Opera 10 - 11 only
// Opera 10-11 does not throw on post-comma invalid pseudos
el.querySelectorAll( "*,:x" );
rbuggyQSA.push( ",.*:" );
} );
}
if ( ( support.matchesSelector = rnative.test( ( matches = docElem.matches ||
docElem.webkitMatchesSelector ||
docElem.mozMatchesSelector ||
docElem.oMatchesSelector ||
docElem.msMatchesSelector ) ) ) ) {
assert( function( el ) {
// Check to see if it's possible to do matchesSelector
// on a disconnected node (IE 9)
support.disconnectedMatch = matches.call( el, "*" );
// This should fail with an exception
// Gecko does not error, returns false instead
matches.call( el, "[s!='']:x" );
rbuggyMatches.push( "!=", pseudos );
} );
}
rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join( "|" ) );
rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join( "|" ) );
/* Contains
---------------------------------------------------------------------- */
hasCompare = rnative.test( docElem.compareDocumentPosition );
// Element contains another
// Purposefully self-exclusive
// As in, an element does not contain itself
contains = hasCompare || rnative.test( docElem.contains ) ?
function( a, b ) {
var adown = a.nodeType === 9 ? a.documentElement : a,
bup = b && b.parentNode;
return a === bup || !!( bup && bup.nodeType === 1 && (
adown.contains ?
adown.contains( bup ) :
a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16
) );
} :
function( a, b ) {
if ( b ) {
while ( ( b = b.parentNode ) ) {
if ( b === a ) {
return true;
}
}
}
return false;
};
/* Sorting
---------------------------------------------------------------------- */
// Document order sorting
sortOrder = hasCompare ?
function( a, b ) {
// Flag for duplicate removal
if ( a === b ) {
hasDuplicate = true;
return 0;
}
// Sort on method existence if only one input has compareDocumentPosition
var compare = !a.compareDocumentPosition - !b.compareDocumentPosition;
if ( compare ) {
return compare;
}
// Calculate position if both inputs belong to the same document
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
// eslint-disable-next-line eqeqeq
compare = ( a.ownerDocument || a ) == ( b.ownerDocument || b ) ?
a.compareDocumentPosition( b ) :
// Otherwise we know they are disconnected
1;
// Disconnected nodes
if ( compare & 1 ||
( !support.sortDetached && b.compareDocumentPosition( a ) === compare ) ) {
// Choose the first element that is related to our preferred document
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
// eslint-disable-next-line eqeqeq
if ( a == document || a.ownerDocument == preferredDoc &&
contains( preferredDoc, a ) ) {
return -1;
}
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
// eslint-disable-next-line eqeqeq
if ( b == document || b.ownerDocument == preferredDoc &&
contains( preferredDoc, b ) ) {
return 1;
}
// Maintain original order
return sortInput ?
( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
0;
}
return compare & 4 ? -1 : 1;
} :
function( a, b ) {
// Exit early if the nodes are identical
if ( a === b ) {
hasDuplicate = true;
return 0;
}
var cur,
i = 0,
aup = a.parentNode,
bup = b.parentNode,
ap = [ a ],
bp = [ b ];
// Parentless nodes are either documents or disconnected
if ( !aup || !bup ) {
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
/* eslint-disable eqeqeq */
return a == document ? -1 :
b == document ? 1 :
/* eslint-enable eqeqeq */
aup ? -1 :
bup ? 1 :
sortInput ?
( indexOf( sortInput, a ) - indexOf( sortInput, b ) ) :
0;
// If the nodes are siblings, we can do a quick check
} else if ( aup === bup ) {
return siblingCheck( a, b );
}
// Otherwise we need full lists of their ancestors for comparison
cur = a;
while ( ( cur = cur.parentNode ) ) {
ap.unshift( cur );
}
cur = b;
while ( ( cur = cur.parentNode ) ) {
bp.unshift( cur );
}
// Walk down the tree looking for a discrepancy
while ( ap[ i ] === bp[ i ] ) {
i++;
}
return i ?
// Do a sibling check if the nodes have a common ancestor
siblingCheck( ap[ i ], bp[ i ] ) :
// Otherwise nodes in our document sort first
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
/* eslint-disable eqeqeq */
ap[ i ] == preferredDoc ? -1 :
bp[ i ] == preferredDoc ? 1 :
/* eslint-enable eqeqeq */
0;
};
return document;
};
Sizzle.matches = function( expr, elements ) {
return Sizzle( expr, null, null, elements );
};
Sizzle.matchesSelector = function( elem, expr ) {
setDocument( elem );
if ( support.matchesSelector && documentIsHTML &&
!nonnativeSelectorCache[ expr + " " ] &&
( !rbuggyMatches || !rbuggyMatches.test( expr ) ) &&
( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) {
try {
var ret = matches.call( elem, expr );
// IE 9's matchesSelector returns false on disconnected nodes
if ( ret || support.disconnectedMatch ||
// As well, disconnected nodes are said to be in a document
// fragment in IE 9
elem.document && elem.document.nodeType !== 11 ) {
return ret;
}
} catch ( e ) {
nonnativeSelectorCache( expr, true );
}
}
return Sizzle( expr, document, null, [ elem ] ).length > 0;
};
Sizzle.contains = function( context, elem ) {
// Set document vars if needed
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
// eslint-disable-next-line eqeqeq
if ( ( context.ownerDocument || context ) != document ) {
setDocument( context );
}
return contains( context, elem );
};
Sizzle.attr = function( elem, name ) {
// Set document vars if needed
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
// eslint-disable-next-line eqeqeq
if ( ( elem.ownerDocument || elem ) != document ) {
setDocument( elem );
}
var fn = Expr.attrHandle[ name.toLowerCase() ],
// Don't get fooled by Object.prototype properties (jQuery #13807)
val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ?
fn( elem, name, !documentIsHTML ) :
undefined;
return val !== undefined ?
val :
support.attributes || !documentIsHTML ?
elem.getAttribute( name ) :
( val = elem.getAttributeNode( name ) ) && val.specified ?
val.value :
null;
};
Sizzle.escape = function( sel ) {
return ( sel + "" ).replace( rcssescape, fcssescape );
};
Sizzle.error = function( msg ) {
throw new Error( "Syntax error, unrecognized expression: " + msg );
};
/**
* Document sorting and removing duplicates
* @param {ArrayLike} results
*/
Sizzle.uniqueSort = function( results ) {
var elem,
duplicates = [],
j = 0,
i = 0;
// Unless we *know* we can detect duplicates, assume their presence
hasDuplicate = !support.detectDuplicates;
sortInput = !support.sortStable && results.slice( 0 );
results.sort( sortOrder );
if ( hasDuplicate ) {
while ( ( elem = results[ i++ ] ) ) {
if ( elem === results[ i ] ) {
j = duplicates.push( i );
}
}
while ( j-- ) {
results.splice( duplicates[ j ], 1 );
}
}
// Clear input after sorting to release objects
// See https://github.com/jquery/sizzle/pull/225
sortInput = null;
return results;
};
/**
* Utility function for retrieving the text value of an array of DOM nodes
* @param {Array|Element} elem
*/
getText = Sizzle.getText = function( elem ) {
var node,
ret = "",
i = 0,
nodeType = elem.nodeType;
if ( !nodeType ) {
// If no nodeType, this is expected to be an array
while ( ( node = elem[ i++ ] ) ) {
// Do not traverse comment nodes
ret += getText( node );
}
} else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) {
// Use textContent for elements
// innerText usage removed for consistency of new lines (jQuery #11153)
if ( typeof elem.textContent === "string" ) {
return elem.textContent;
} else {
// Traverse its children
for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
ret += getText( elem );
}
}
} else if ( nodeType === 3 || nodeType === 4 ) {
return elem.nodeValue;
}
// Do not include comment or processing instruction nodes
return ret;
};
Expr = Sizzle.selectors = {
// Can be adjusted by the user
cacheLength: 50,
createPseudo: markFunction,
match: matchExpr,
attrHandle: {},
find: {},
relative: {
">": { dir: "parentNode", first: true },
" ": { dir: "parentNode" },
"+": { dir: "previousSibling", first: true },
"~": { dir: "previousSibling" }
},
preFilter: {
"ATTR": function( match ) {
match[ 1 ] = match[ 1 ].replace( runescape, funescape );
// Move the given value to match[3] whether quoted or unquoted
match[ 3 ] = ( match[ 3 ] || match[ 4 ] ||
match[ 5 ] || "" ).replace( runescape, funescape );
if ( match[ 2 ] === "~=" ) {
match[ 3 ] = " " + match[ 3 ] + " ";
}
return match.slice( 0, 4 );
},
"CHILD": function( match ) {
/* matches from matchExpr["CHILD"]
1 type (only|nth|...)
2 what (child|of-type)
3 argument (even|odd|\d*|\d*n([+-]\d+)?|...)
4 xn-component of xn+y argument ([+-]?\d*n|)
5 sign of xn-component
6 x of xn-component
7 sign of y-component
8 y of y-component
*/
match[ 1 ] = match[ 1 ].toLowerCase();
if ( match[ 1 ].slice( 0, 3 ) === "nth" ) {
// nth-* requires argument
if ( !match[ 3 ] ) {
Sizzle.error( match[ 0 ] );
}
// numeric x and y parameters for Expr.filter.CHILD
// remember that false/true cast respectively to 0/1
match[ 4 ] = +( match[ 4 ] ?
match[ 5 ] + ( match[ 6 ] || 1 ) :
2 * ( match[ 3 ] === "even" || match[ 3 ] === "odd" ) );
match[ 5 ] = +( ( match[ 7 ] + match[ 8 ] ) || match[ 3 ] === "odd" );
// other types prohibit arguments
} else if ( match[ 3 ] ) {
Sizzle.error( match[ 0 ] );
}
return match;
},
"PSEUDO": function( match ) {
var excess,
unquoted = !match[ 6 ] && match[ 2 ];
if ( matchExpr[ "CHILD" ].test( match[ 0 ] ) ) {
return null;
}
// Accept quoted arguments as-is
if ( match[ 3 ] ) {
match[ 2 ] = match[ 4 ] || match[ 5 ] || "";
// Strip excess characters from unquoted arguments
} else if ( unquoted && rpseudo.test( unquoted ) &&
// Get excess from tokenize (recursively)
( excess = tokenize( unquoted, true ) ) &&
// advance to the next closing parenthesis
( excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length ) ) {
// excess is a negative index
match[ 0 ] = match[ 0 ].slice( 0, excess );
match[ 2 ] = unquoted.slice( 0, excess );
}
// Return only captures needed by the pseudo filter method (type and argument)
return match.slice( 0, 3 );
}
},
filter: {
"TAG": function( nodeNameSelector ) {
var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase();
return nodeNameSelector === "*" ?
function() {
return true;
} :
function( elem ) {
return elem.nodeName && elem.nodeName.toLowerCase() === nodeName;
};
},
"CLASS": function( className ) {
var pattern = classCache[ className + " " ];
return pattern ||
( pattern = new RegExp( "(^|" + whitespace +
")" + className + "(" + whitespace + "|$)" ) ) && classCache(
className, function( elem ) {
return pattern.test(
typeof elem.className === "string" && elem.className ||
typeof elem.getAttribute !== "undefined" &&
elem.getAttribute( "class" ) ||
""
);
} );
},
"ATTR": function( name, operator, check ) {
return function( elem ) {
var result = Sizzle.attr( elem, name );
if ( result == null ) {
return operator === "!=";
}
if ( !operator ) {
return true;
}
result += "";
/* eslint-disable max-len */
return operator === "=" ? result === check :
operator === "!=" ? result !== check :
operator === "^=" ? check && result.indexOf( check ) === 0 :
operator === "*=" ? check && result.indexOf( check ) > -1 :
operator === "$=" ? check && result.slice( -check.length ) === check :
operator === "~=" ? ( " " + result.replace( rwhitespace, " " ) + " " ).indexOf( check ) > -1 :
operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" :
false;
/* eslint-enable max-len */
};
},
"CHILD": function( type, what, _argument, first, last ) {
var simple = type.slice( 0, 3 ) !== "nth",
forward = type.slice( -4 ) !== "last",
ofType = what === "of-type";
return first === 1 && last === 0 ?
// Shortcut for :nth-*(n)
function( elem ) {
return !!elem.parentNode;
} :
function( elem, _context, xml ) {
var cache, uniqueCache, outerCache, node, nodeIndex, start,
dir = simple !== forward ? "nextSibling" : "previousSibling",
parent = elem.parentNode,
name = ofType && elem.nodeName.toLowerCase(),
useCache = !xml && !ofType,
diff = false;
if ( parent ) {
// :(first|last|only)-(child|of-type)
if ( simple ) {
while ( dir ) {
node = elem;
while ( ( node = node[ dir ] ) ) {
if ( ofType ?
node.nodeName.toLowerCase() === name :
node.nodeType === 1 ) {
return false;
}
}
// Reverse direction for :only-* (if we haven't yet done so)
start = dir = type === "only" && !start && "nextSibling";
}
return true;
}
start = [ forward ? parent.firstChild : parent.lastChild ];
// non-xml :nth-child(...) stores cache data on `parent`
if ( forward && useCache ) {
// Seek `elem` from a previously-cached index
// ...in a gzip-friendly way
node = parent;
outerCache = node[ expando ] || ( node[ expando ] = {} );
// Support: IE <9 only
// Defend against cloned attroperties (jQuery gh-1709)
uniqueCache = outerCache[ node.uniqueID ] ||
( outerCache[ node.uniqueID ] = {} );
cache = uniqueCache[ type ] || [];
nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
diff = nodeIndex && cache[ 2 ];
node = nodeIndex && parent.childNodes[ nodeIndex ];
while ( ( node = ++nodeIndex && node && node[ dir ] ||
// Fallback to seeking `elem` from the start
( diff = nodeIndex = 0 ) || start.pop() ) ) {
// When found, cache indexes on `parent` and break
if ( node.nodeType === 1 && ++diff && node === elem ) {
uniqueCache[ type ] = [ dirruns, nodeIndex, diff ];
break;
}
}
} else {
// Use previously-cached element index if available
if ( useCache ) {
// ...in a gzip-friendly way
node = elem;
outerCache = node[ expando ] || ( node[ expando ] = {} );
// Support: IE <9 only
// Defend against cloned attroperties (jQuery gh-1709)
uniqueCache = outerCache[ node.uniqueID ] ||
( outerCache[ node.uniqueID ] = {} );
cache = uniqueCache[ type ] || [];
nodeIndex = cache[ 0 ] === dirruns && cache[ 1 ];
diff = nodeIndex;
}
// xml :nth-child(...)
// or :nth-last-child(...) or :nth(-last)?-of-type(...)
if ( diff === false ) {
// Use the same loop as above to seek `elem` from the start
while ( ( node = ++nodeIndex && node && node[ dir ] ||
( diff = nodeIndex = 0 ) || start.pop() ) ) {
if ( ( ofType ?
node.nodeName.toLowerCase() === name :
node.nodeType === 1 ) &&
++diff ) {
// Cache the index of each encountered element
if ( useCache ) {
outerCache = node[ expando ] ||
( node[ expando ] = {} );
// Support: IE <9 only
// Defend against cloned attroperties (jQuery gh-1709)
uniqueCache = outerCache[ node.uniqueID ] ||
( outerCache[ node.uniqueID ] = {} );
uniqueCache[ type ] = [ dirruns, diff ];
}
if ( node === elem ) {
break;
}
}
}
}
}
// Incorporate the offset, then check against cycle size
diff -= last;
return diff === first || ( diff % first === 0 && diff / first >= 0 );
}
};
},
"PSEUDO": function( pseudo, argument ) {
// pseudo-class names are case-insensitive
// http://www.w3.org/TR/selectors/#pseudo-classes
// Prioritize by case sensitivity in case custom pseudos are added with uppercase letters
// Remember that setFilters inherits from pseudos
var args,
fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] ||
Sizzle.error( "unsupported pseudo: " + pseudo );
// The user may use createPseudo to indicate that
// arguments are needed to create the filter function
// just as Sizzle does
if ( fn[ expando ] ) {
return fn( argument );
}
// But maintain support for old signatures
if ( fn.length > 1 ) {
args = [ pseudo, pseudo, "", argument ];
return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ?
markFunction( function( seed, matches ) {
var idx,
matched = fn( seed, argument ),
i = matched.length;
while ( i-- ) {
idx = indexOf( seed, matched[ i ] );
seed[ idx ] = !( matches[ idx ] = matched[ i ] );
}
} ) :
function( elem ) {
return fn( elem, 0, args );
};
}
return fn;
}
},
pseudos: {
// Potentially complex pseudos
"not": markFunction( function( selector ) {
// Trim the selector passed to compile
// to avoid treating leading and trailing
// spaces as combinators
var input = [],
results = [],
matcher = compile( selector.replace( rtrim, "$1" ) );
return matcher[ expando ] ?
markFunction( function( seed, matches, _context, xml ) {
var elem,
unmatched = matcher( seed, null, xml, [] ),
i = seed.length;
// Match elements unmatched by `matcher`
while ( i-- ) {
if ( ( elem = unmatched[ i ] ) ) {
seed[ i ] = !( matches[ i ] = elem );
}
}
} ) :
function( elem, _context, xml ) {
input[ 0 ] = elem;
matcher( input, null, xml, results );
// Don't keep the element (issue #299)
input[ 0 ] = null;
return !results.pop();
};
} ),
"has": markFunction( function( selector ) {
return function( elem ) {
return Sizzle( selector, elem ).length > 0;
};
} ),
"contains": markFunction( function( text ) {
text = text.replace( runescape, funescape );
return function( elem ) {
return ( elem.textContent || getText( elem ) ).indexOf( text ) > -1;
};
} ),
// "Whether an element is represented by a :lang() selector
// is based solely on the element's language value
// being equal to the identifier C,
// or beginning with the identifier C immediately followed by "-".
// The matching of C against the element's language value is performed case-insensitively.
// The identifier C does not have to be a valid language name."
// http://www.w3.org/TR/selectors/#lang-pseudo
"lang": markFunction( function( lang ) {
// lang value must be a valid identifier
if ( !ridentifier.test( lang || "" ) ) {
Sizzle.error( "unsupported lang: " + lang );
}
lang = lang.replace( runescape, funescape ).toLowerCase();
return function( elem ) {
var elemLang;
do {
if ( ( elemLang = documentIsHTML ?
elem.lang :
elem.getAttribute( "xml:lang" ) || elem.getAttribute( "lang" ) ) ) {
elemLang = elemLang.toLowerCase();
return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0;
}
} while ( ( elem = elem.parentNode ) && elem.nodeType === 1 );
return false;
};
} ),
// Miscellaneous
"target": function( elem ) {
var hash = window.location && window.location.hash;
return hash && hash.slice( 1 ) === elem.id;
},
"root": function( elem ) {
return elem === docElem;
},
"focus": function( elem ) {
return elem === document.activeElement &&
( !document.hasFocus || document.hasFocus() ) &&
!!( elem.type || elem.href || ~elem.tabIndex );
},
// Boolean properties
"enabled": createDisabledPseudo( false ),
"disabled": createDisabledPseudo( true ),
"checked": function( elem ) {
// In CSS3, :checked should return both checked and selected elements
// http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked
var nodeName = elem.nodeName.toLowerCase();
return ( nodeName === "input" && !!elem.checked ) ||
( nodeName === "option" && !!elem.selected );
},
"selected": function( elem ) {
// Accessing this property makes selected-by-default
// options in Safari work properly
if ( elem.parentNode ) {
// eslint-disable-next-line no-unused-expressions
elem.parentNode.selectedIndex;
}
return elem.selected === true;
},
// Contents
"empty": function( elem ) {
// http://www.w3.org/TR/selectors/#empty-pseudo
// :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5),
// but not by others (comment: 8; processing instruction: 7; etc.)
// nodeType < 6 works because attributes (2) do not appear as children
for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) {
if ( elem.nodeType < 6 ) {
return false;
}
}
return true;
},
"parent": function( elem ) {
return !Expr.pseudos[ "empty" ]( elem );
},
// Element/input types
"header": function( elem ) {
return rheader.test( elem.nodeName );
},
"input": function( elem ) {
return rinputs.test( elem.nodeName );
},
"button": function( elem ) {
var name = elem.nodeName.toLowerCase();
return name === "input" && elem.type === "button" || name === "button";
},
"text": function( elem ) {
var attr;
return elem.nodeName.toLowerCase() === "input" &&
elem.type === "text" &&
// Support: IE<8
// New HTML5 attribute values (e.g., "search") appear with elem.type === "text"
( ( attr = elem.getAttribute( "type" ) ) == null ||
attr.toLowerCase() === "text" );
},
// Position-in-collection
"first": createPositionalPseudo( function() {
return [ 0 ];
} ),
"last": createPositionalPseudo( function( _matchIndexes, length ) {
return [ length - 1 ];
} ),
"eq": createPositionalPseudo( function( _matchIndexes, length, argument ) {
return [ argument < 0 ? argument + length : argument ];
} ),
"even": createPositionalPseudo( function( matchIndexes, length ) {
var i = 0;
for ( ; i < length; i += 2 ) {
matchIndexes.push( i );
}
return matchIndexes;
} ),
"odd": createPositionalPseudo( function( matchIndexes, length ) {
var i = 1;
for ( ; i < length; i += 2 ) {
matchIndexes.push( i );
}
return matchIndexes;
} ),
"lt": createPositionalPseudo( function( matchIndexes, length, argument ) {
var i = argument < 0 ?
argument + length :
argument > length ?
length :
argument;
for ( ; --i >= 0; ) {
matchIndexes.push( i );
}
return matchIndexes;
} ),
"gt": createPositionalPseudo( function( matchIndexes, length, argument ) {
var i = argument < 0 ? argument + length : argument;
for ( ; ++i < length; ) {
matchIndexes.push( i );
}
return matchIndexes;
} )
}
};
Expr.pseudos[ "nth" ] = Expr.pseudos[ "eq" ];
// Add button/input type pseudos
for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) {
Expr.pseudos[ i ] = createInputPseudo( i );
}
for ( i in { submit: true, reset: true } ) {
Expr.pseudos[ i ] = createButtonPseudo( i );
}
// Easy API for creating new setFilters
function setFilters() {}
setFilters.prototype = Expr.filters = Expr.pseudos;
Expr.setFilters = new setFilters();
tokenize = Sizzle.tokenize = function( selector, parseOnly ) {
var matched, match, tokens, type,
soFar, groups, preFilters,
cached = tokenCache[ selector + " " ];
if ( cached ) {
return parseOnly ? 0 : cached.slice( 0 );
}
soFar = selector;
groups = [];
preFilters = Expr.preFilter;
while ( soFar ) {
// Comma and first run
if ( !matched || ( match = rcomma.exec( soFar ) ) ) {
if ( match ) {
// Don't consume trailing commas as valid
soFar = soFar.slice( match[ 0 ].length ) || soFar;
}
groups.push( ( tokens = [] ) );
}
matched = false;
// Combinators
if ( ( match = rcombinators.exec( soFar ) ) ) {
matched = match.shift();
tokens.push( {
value: matched,
// Cast descendant combinators to space
type: match[ 0 ].replace( rtrim, " " )
} );
soFar = soFar.slice( matched.length );
}
// Filters
for ( type in Expr.filter ) {
if ( ( match = matchExpr[ type ].exec( soFar ) ) && ( !preFilters[ type ] ||
( match = preFilters[ type ]( match ) ) ) ) {
matched = match.shift();
tokens.push( {
value: matched,
type: type,
matches: match
} );
soFar = soFar.slice( matched.length );
}
}
if ( !matched ) {
break;
}
}
// Return the length of the invalid excess
// if we're just parsing
// Otherwise, throw an error or return tokens
return parseOnly ?
soFar.length :
soFar ?
Sizzle.error( selector ) :
// Cache the tokens
tokenCache( selector, groups ).slice( 0 );
};
function toSelector( tokens ) {
var i = 0,
len = tokens.length,
selector = "";
for ( ; i < len; i++ ) {
selector += tokens[ i ].value;
}
return selector;
}
function addCombinator( matcher, combinator, base ) {
var dir = combinator.dir,
skip = combinator.next,
key = skip || dir,
checkNonElements = base && key === "parentNode",
doneName = done++;
return combinator.first ?
// Check against closest ancestor/preceding element
function( elem, context, xml ) {
while ( ( elem = elem[ dir ] ) ) {
if ( elem.nodeType === 1 || checkNonElements ) {
return matcher( elem, context, xml );
}
}
return false;
} :
// Check against all ancestor/preceding elements
function( elem, context, xml ) {
var oldCache, uniqueCache, outerCache,
newCache = [ dirruns, doneName ];
// We can't set arbitrary data on XML nodes, so they don't benefit from combinator caching
if ( xml ) {
while ( ( elem = elem[ dir ] ) ) {
if ( elem.nodeType === 1 || checkNonElements ) {
if ( matcher( elem, context, xml ) ) {
return true;
}
}
}
} else {
while ( ( elem = elem[ dir ] ) ) {
if ( elem.nodeType === 1 || checkNonElements ) {
outerCache = elem[ expando ] || ( elem[ expando ] = {} );
// Support: IE <9 only
// Defend against cloned attroperties (jQuery gh-1709)
uniqueCache = outerCache[ elem.uniqueID ] ||
( outerCache[ elem.uniqueID ] = {} );
if ( skip && skip === elem.nodeName.toLowerCase() ) {
elem = elem[ dir ] || elem;
} else if ( ( oldCache = uniqueCache[ key ] ) &&
oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) {
// Assign to newCache so results back-propagate to previous elements
return ( newCache[ 2 ] = oldCache[ 2 ] );
} else {
// Reuse newcache so results back-propagate to previous elements
uniqueCache[ key ] = newCache;
// A match means we're done; a fail means we have to keep checking
if ( ( newCache[ 2 ] = matcher( elem, context, xml ) ) ) {
return true;
}
}
}
}
}
return false;
};
}
function elementMatcher( matchers ) {
return matchers.length > 1 ?
function( elem, context, xml ) {
var i = matchers.length;
while ( i-- ) {
if ( !matchers[ i ]( elem, context, xml ) ) {
return false;
}
}
return true;
} :
matchers[ 0 ];
}
function multipleContexts( selector, contexts, results ) {
var i = 0,
len = contexts.length;
for ( ; i < len; i++ ) {
Sizzle( selector, contexts[ i ], results );
}
return results;
}
function condense( unmatched, map, filter, context, xml ) {
var elem,
newUnmatched = [],
i = 0,
len = unmatched.length,
mapped = map != null;
for ( ; i < len; i++ ) {
if ( ( elem = unmatched[ i ] ) ) {
if ( !filter || filter( elem, context, xml ) ) {
newUnmatched.push( elem );
if ( mapped ) {
map.push( i );
}
}
}
}
return newUnmatched;
}
function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) {
if ( postFilter && !postFilter[ expando ] ) {
postFilter = setMatcher( postFilter );
}
if ( postFinder && !postFinder[ expando ] ) {
postFinder = setMatcher( postFinder, postSelector );
}
return markFunction( function( seed, results, context, xml ) {
var temp, i, elem,
preMap = [],
postMap = [],
preexisting = results.length,
// Get initial elements from seed or context
elems = seed || multipleContexts(
selector || "*",
context.nodeType ? [ context ] : context,
[]
),
// Prefilter to get matcher input, preserving a map for seed-results synchronization
matcherIn = preFilter && ( seed || !selector ) ?
condense( elems, preMap, preFilter, context, xml ) :
elems,
matcherOut = matcher ?
// If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results,
postFinder || ( seed ? preFilter : preexisting || postFilter ) ?
// ...intermediate processing is necessary
[] :
// ...otherwise use results directly
results :
matcherIn;
// Find primary matches
if ( matcher ) {
matcher( matcherIn, matcherOut, context, xml );
}
// Apply postFilter
if ( postFilter ) {
temp = condense( matcherOut, postMap );
postFilter( temp, [], context, xml );
// Un-match failing elements by moving them back to matcherIn
i = temp.length;
while ( i-- ) {
if ( ( elem = temp[ i ] ) ) {
matcherOut[ postMap[ i ] ] = !( matcherIn[ postMap[ i ] ] = elem );
}
}
}
if ( seed ) {
if ( postFinder || preFilter ) {
if ( postFinder ) {
// Get the final matcherOut by condensing this intermediate into postFinder contexts
temp = [];
i = matcherOut.length;
while ( i-- ) {
if ( ( elem = matcherOut[ i ] ) ) {
// Restore matcherIn since elem is not yet a final match
temp.push( ( matcherIn[ i ] = elem ) );
}
}
postFinder( null, ( matcherOut = [] ), temp, xml );
}
// Move matched elements from seed to results to keep them synchronized
i = matcherOut.length;
while ( i-- ) {
if ( ( elem = matcherOut[ i ] ) &&
( temp = postFinder ? indexOf( seed, elem ) : preMap[ i ] ) > -1 ) {
seed[ temp ] = !( results[ temp ] = elem );
}
}
}
// Add elements to results, through postFinder if defined
} else {
matcherOut = condense(
matcherOut === results ?
matcherOut.splice( preexisting, matcherOut.length ) :
matcherOut
);
if ( postFinder ) {
postFinder( null, results, matcherOut, xml );
} else {
push.apply( results, matcherOut );
}
}
} );
}
function matcherFromTokens( tokens ) {
var checkContext, matcher, j,
len = tokens.length,
leadingRelative = Expr.relative[ tokens[ 0 ].type ],
implicitRelative = leadingRelative || Expr.relative[ " " ],
i = leadingRelative ? 1 : 0,
// The foundational matcher ensures that elements are reachable from top-level context(s)
matchContext = addCombinator( function( elem ) {
return elem === checkContext;
}, implicitRelative, true ),
matchAnyContext = addCombinator( function( elem ) {
return indexOf( checkContext, elem ) > -1;
}, implicitRelative, true ),
matchers = [ function( elem, context, xml ) {
var ret = ( !leadingRelative && ( xml || context !== outermostContext ) ) || (
( checkContext = context ).nodeType ?
matchContext( elem, context, xml ) :
matchAnyContext( elem, context, xml ) );
// Avoid hanging onto element (issue #299)
checkContext = null;
return ret;
} ];
for ( ; i < len; i++ ) {
if ( ( matcher = Expr.relative[ tokens[ i ].type ] ) ) {
matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ];
} else {
matcher = Expr.filter[ tokens[ i ].type ].apply( null, tokens[ i ].matches );
// Return special upon seeing a positional matcher
if ( matcher[ expando ] ) {
// Find the next relative operator (if any) for proper handling
j = ++i;
for ( ; j < len; j++ ) {
if ( Expr.relative[ tokens[ j ].type ] ) {
break;
}
}
return setMatcher(
i > 1 && elementMatcher( matchers ),
i > 1 && toSelector(
// If the preceding token was a descendant combinator, insert an implicit any-element `*`
tokens
.slice( 0, i - 1 )
.concat( { value: tokens[ i - 2 ].type === " " ? "*" : "" } )
).replace( rtrim, "$1" ),
matcher,
i < j && matcherFromTokens( tokens.slice( i, j ) ),
j < len && matcherFromTokens( ( tokens = tokens.slice( j ) ) ),
j < len && toSelector( tokens )
);
}
matchers.push( matcher );
}
}
return elementMatcher( matchers );
}
function matcherFromGroupMatchers( elementMatchers, setMatchers ) {
var bySet = setMatchers.length > 0,
byElement = elementMatchers.length > 0,
superMatcher = function( seed, context, xml, results, outermost ) {
var elem, j, matcher,
matchedCount = 0,
i = "0",
unmatched = seed && [],
setMatched = [],
contextBackup = outermostContext,
// We must always have either seed elements or outermost context
elems = seed || byElement && Expr.find[ "TAG" ]( "*", outermost ),
// Use integer dirruns iff this is the outermost matcher
dirrunsUnique = ( dirruns += contextBackup == null ? 1 : Math.random() || 0.1 ),
len = elems.length;
if ( outermost ) {
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
// eslint-disable-next-line eqeqeq
outermostContext = context == document || context || outermost;
}
// Add elements passing elementMatchers directly to results
// Support: IE<9, Safari
// Tolerate NodeList properties (IE: "length"; Safari: <number>) matching elements by id
for ( ; i !== len && ( elem = elems[ i ] ) != null; i++ ) {
if ( byElement && elem ) {
j = 0;
// Support: IE 11+, Edge 17 - 18+
// IE/Edge sometimes throw a "Permission denied" error when strict-comparing
// two documents; shallow comparisons work.
// eslint-disable-next-line eqeqeq
if ( !context && elem.ownerDocument != document ) {
setDocument( elem );
xml = !documentIsHTML;
}
while ( ( matcher = elementMatchers[ j++ ] ) ) {
if ( matcher( elem, context || document, xml ) ) {
results.push( elem );
break;
}
}
if ( outermost ) {
dirruns = dirrunsUnique;
}
}
// Track unmatched elements for set filters
if ( bySet ) {
// They will have gone through all possible matchers
if ( ( elem = !matcher && elem ) ) {
matchedCount--;
}
// Lengthen the array for every element, matched or not
if ( seed ) {
unmatched.push( elem );
}
}
}
// `i` is now the count of elements visited above, and adding it to `matchedCount`
// makes the latter nonnegative.
matchedCount += i;
// Apply set filters to unmatched elements
// NOTE: This can be skipped if there are no unmatched elements (i.e., `matchedCount`
// equals `i`), unless we didn't visit _any_ elements in the above loop because we have
// no element matchers and no seed.
// Incrementing an initially-string "0" `i` allows `i` to remain a string only in that
// case, which will result in a "00" `matchedCount` that differs from `i` but is also
// numerically zero.
if ( bySet && i !== matchedCount ) {
j = 0;
while ( ( matcher = setMatchers[ j++ ] ) ) {
matcher( unmatched, setMatched, context, xml );
}
if ( seed ) {
// Reintegrate element matches to eliminate the need for sorting
if ( matchedCount > 0 ) {
while ( i-- ) {
if ( !( unmatched[ i ] || setMatched[ i ] ) ) {
setMatched[ i ] = pop.call( results );
}
}
}
// Discard index placeholder values to get only actual matches
setMatched = condense( setMatched );
}
// Add matches to results
push.apply( results, setMatched );
// Seedless set matches succeeding multiple successful matchers stipulate sorting
if ( outermost && !seed && setMatched.length > 0 &&
( matchedCount + setMatchers.length ) > 1 ) {
Sizzle.uniqueSort( results );
}
}
// Override manipulation of globals by nested matchers
if ( outermost ) {
dirruns = dirrunsUnique;
outermostContext = contextBackup;
}
return unmatched;
};
return bySet ?
markFunction( superMatcher ) :
superMatcher;
}
compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) {
var i,
setMatchers = [],
elementMatchers = [],
cached = compilerCache[ selector + " " ];
if ( !cached ) {
// Generate a function of recursive functions that can be used to check each element
if ( !match ) {
match = tokenize( selector );
}
i = match.length;
while ( i-- ) {
cached = matcherFromTokens( match[ i ] );
if ( cached[ expando ] ) {
setMatchers.push( cached );
} else {
elementMatchers.push( cached );
}
}
// Cache the compiled function
cached = compilerCache(
selector,
matcherFromGroupMatchers( elementMatchers, setMatchers )
);
// Save selector and tokenization
cached.selector = selector;
}
return cached;
};
/**
* A low-level selection function that works with Sizzle's compiled
* selector functions
* @param {String|Function} selector A selector or a pre-compiled
* selector function built with Sizzle.compile
* @param {Element} context
* @param {Array} [results]
* @param {Array} [seed] A set of elements to match against
*/
select = Sizzle.select = function( selector, context, results, seed ) {
var i, tokens, token, type, find,
compiled = typeof selector === "function" && selector,
match = !seed && tokenize( ( selector = compiled.selector || selector ) );
results = results || [];
// Try to minimize operations if there is only one selector in the list and no seed
// (the latter of which guarantees us context)
if ( match.length === 1 ) {
// Reduce context if the leading compound selector is an ID
tokens = match[ 0 ] = match[ 0 ].slice( 0 );
if ( tokens.length > 2 && ( token = tokens[ 0 ] ).type === "ID" &&
context.nodeType === 9 && documentIsHTML && Expr.relative[ tokens[ 1 ].type ] ) {
context = ( Expr.find[ "ID" ]( token.matches[ 0 ]
.replace( runescape, funescape ), context ) || [] )[ 0 ];
if ( !context ) {
return results;
// Precompiled matchers will still verify ancestry, so step up a level
} else if ( compiled ) {
context = context.parentNode;
}
selector = selector.slice( tokens.shift().value.length );
}
// Fetch a seed set for right-to-left matching
i = matchExpr[ "needsContext" ].test( selector ) ? 0 : tokens.length;
while ( i-- ) {
token = tokens[ i ];
// Abort if we hit a combinator
if ( Expr.relative[ ( type = token.type ) ] ) {
break;
}
if ( ( find = Expr.find[ type ] ) ) {
// Search, expanding context for leading sibling combinators
if ( ( seed = find(
token.matches[ 0 ].replace( runescape, funescape ),
rsibling.test( tokens[ 0 ].type ) && testContext( context.parentNode ) ||
context
) ) ) {
// If seed is empty or no tokens remain, we can return early
tokens.splice( i, 1 );
selector = seed.length && toSelector( tokens );
if ( !selector ) {
push.apply( results, seed );
return results;
}
break;
}
}
}
}
// Compile and execute a filtering function if one is not provided
// Provide `match` to avoid retokenization if we modified the selector above
( compiled || compile( selector, match ) )(
seed,
context,
!documentIsHTML,
results,
!context || rsibling.test( selector ) && testContext( context.parentNode ) || context
);
return results;
};
// One-time assignments
// Sort stability
support.sortStable = expando.split( "" ).sort( sortOrder ).join( "" ) === expando;
// Support: Chrome 14-35+
// Always assume duplicates if they aren't passed to the comparison function
support.detectDuplicates = !!hasDuplicate;
// Initialize against the default document
setDocument();
// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27)
// Detached nodes confoundingly follow *each other*
support.sortDetached = assert( function( el ) {
// Should return 1, but returns 4 (following)
return el.compareDocumentPosition( document.createElement( "fieldset" ) ) & 1;
} );
// Support: IE<8
// Prevent attribute/property "interpolation"
// https://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx
if ( !assert( function( el ) {
el.innerHTML = "<a href='#'></a>";
return el.firstChild.getAttribute( "href" ) === "#";
} ) ) {
addHandle( "type|href|height|width", function( elem, name, isXML ) {
if ( !isXML ) {
return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 );
}
} );
}
// Support: IE<9
// Use defaultValue in place of getAttribute("value")
if ( !support.attributes || !assert( function( el ) {
el.innerHTML = "<input/>";
el.firstChild.setAttribute( "value", "" );
return el.firstChild.getAttribute( "value" ) === "";
} ) ) {
addHandle( "value", function( elem, _name, isXML ) {
if ( !isXML && elem.nodeName.toLowerCase() === "input" ) {
return elem.defaultValue;
}
} );
}
// Support: IE<9
// Use getAttributeNode to fetch booleans when getAttribute lies
if ( !assert( function( el ) {
return el.getAttribute( "disabled" ) == null;
} ) ) {
addHandle( booleans, function( elem, name, isXML ) {
var val;
if ( !isXML ) {
return elem[ name ] === true ? name.toLowerCase() :
( val = elem.getAttributeNode( name ) ) && val.specified ?
val.value :
null;
}
} );
}
return Sizzle;
} )( window );
jQuery.find = Sizzle;
jQuery.expr = Sizzle.selectors;
// Deprecated
jQuery.expr[ ":" ] = jQuery.expr.pseudos;
jQuery.uniqueSort = jQuery.unique = Sizzle.uniqueSort;
jQuery.text = Sizzle.getText;
jQuery.isXMLDoc = Sizzle.isXML;
jQuery.contains = Sizzle.contains;
jQuery.escapeSelector = Sizzle.escape;
var dir = function( elem, dir, until ) {
var matched = [],
truncate = until !== undefined;
while ( ( elem = elem[ dir ] ) && elem.nodeType !== 9 ) {
if ( elem.nodeType === 1 ) {
if ( truncate && jQuery( elem ).is( until ) ) {
break;
}
matched.push( elem );
}
}
return matched;
};
var siblings = function( n, elem ) {
var matched = [];
for ( ; n; n = n.nextSibling ) {
if ( n.nodeType === 1 && n !== elem ) {
matched.push( n );
}
}
return matched;
};
var rneedsContext = jQuery.expr.match.needsContext;
function nodeName( elem, name ) {
return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase();
};
var rsingleTag = ( /^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i );
// Implement the identical functionality for filter and not
function winnow( elements, qualifier, not ) {
if ( isFunction( qualifier ) ) {
return jQuery.grep( elements, function( elem, i ) {
return !!qualifier.call( elem, i, elem ) !== not;
} );
}
// Single element
if ( qualifier.nodeType ) {
return jQuery.grep( elements, function( elem ) {
return ( elem === qualifier ) !== not;
} );
}
// Arraylike of elements (jQuery, arguments, Array)
if ( typeof qualifier !== "string" ) {
return jQuery.grep( elements, function( elem ) {
return ( indexOf.call( qualifier, elem ) > -1 ) !== not;
} );
}
// Filtered directly for both simple and complex selectors
return jQuery.filter( qualifier, elements, not );
}
jQuery.filter = function( expr, elems, not ) {
var elem = elems[ 0 ];
if ( not ) {
expr = ":not(" + expr + ")";
}
if ( elems.length === 1 && elem.nodeType === 1 ) {
return jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [];
}
return jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) {
return elem.nodeType === 1;
} ) );
};
jQuery.fn.extend( {
find: function( selector ) {
var i, ret,
len = this.length,
self = this;
if ( typeof selector !== "string" ) {
return this.pushStack( jQuery( selector ).filter( function() {
for ( i = 0; i < len; i++ ) {
if ( jQuery.contains( self[ i ], this ) ) {
return true;
}
}
} ) );
}
ret = this.pushStack( [] );
for ( i = 0; i < len; i++ ) {
jQuery.find( selector, self[ i ], ret );
}
return len > 1 ? jQuery.uniqueSort( ret ) : ret;
},
filter: function( selector ) {
return this.pushStack( winnow( this, selector || [], false ) );
},
not: function( selector ) {
return this.pushStack( winnow( this, selector || [], true ) );
},
is: function( selector ) {
return !!winnow(
this,
// If this is a positional/relative selector, check membership in the returned set
// so $("p:first").is("p:last") won't return true for a doc with two "p".
typeof selector === "string" && rneedsContext.test( selector ) ?
jQuery( selector ) :
selector || [],
false
).length;
}
} );
// Initialize a jQuery object
// A central reference to the root jQuery(document)
var rootjQuery,
// A simple way to check for HTML strings
// Prioritize #id over <tag> to avoid XSS via location.hash (#9521)
// Strict HTML recognition (#11290: must start with <)
// Shortcut simple #id case for speed
rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/,
init = jQuery.fn.init = function( selector, context, root ) {
var match, elem;
// HANDLE: $(""), $(null), $(undefined), $(false)
if ( !selector ) {
return this;
}
// Method init() accepts an alternate rootjQuery
// so migrate can support jQuery.sub (gh-2101)
root = root || rootjQuery;
// Handle HTML strings
if ( typeof selector === "string" ) {
if ( selector[ 0 ] === "<" &&
selector[ selector.length - 1 ] === ">" &&
selector.length >= 3 ) {
// Assume that strings that start and end with <> are HTML and skip the regex check
match = [ null, selector, null ];
} else {
match = rquickExpr.exec( selector );
}
// Match html or make sure no context is specified for #id
if ( match && ( match[ 1 ] || !context ) ) {
// HANDLE: $(html) -> $(array)
if ( match[ 1 ] ) {
context = context instanceof jQuery ? context[ 0 ] : context;
// Option to run scripts is true for back-compat
// Intentionally let the error be thrown if parseHTML is not present
jQuery.merge( this, jQuery.parseHTML(
match[ 1 ],
context && context.nodeType ? context.ownerDocument || context : document,
true
) );
// HANDLE: $(html, props)
if ( rsingleTag.test( match[ 1 ] ) && jQuery.isPlainObject( context ) ) {
for ( match in context ) {
// Properties of context are called as methods if possible
if ( isFunction( this[ match ] ) ) {
this[ match ]( context[ match ] );
// ...and otherwise set as attributes
} else {
this.attr( match, context[ match ] );
}
}
}
return this;
// HANDLE: $(#id)
} else {
elem = document.getElementById( match[ 2 ] );
if ( elem ) {
// Inject the element directly into the jQuery object
this[ 0 ] = elem;
this.length = 1;
}
return this;
}
// HANDLE: $(expr, $(...))
} else if ( !context || context.jquery ) {
return ( context || root ).find( selector );
// HANDLE: $(expr, context)
// (which is just equivalent to: $(context).find(expr)
} else {
return this.constructor( context ).find( selector );
}
// HANDLE: $(DOMElement)
} else if ( selector.nodeType ) {
this[ 0 ] = selector;
this.length = 1;
return this;
// HANDLE: $(function)
// Shortcut for document ready
} else if ( isFunction( selector ) ) {
return root.ready !== undefined ?
root.ready( selector ) :
// Execute immediately if ready is not present
selector( jQuery );
}
return jQuery.makeArray( selector, this );
};
// Give the init function the jQuery prototype for later instantiation
init.prototype = jQuery.fn;
// Initialize central reference
rootjQuery = jQuery( document );
var rparentsprev = /^(?:parents|prev(?:Until|All))/,
// Methods guaranteed to produce a unique set when starting from a unique set
guaranteedUnique = {
children: true,
contents: true,
next: true,
prev: true
};
jQuery.fn.extend( {
has: function( target ) {
var targets = jQuery( target, this ),
l = targets.length;
return this.filter( function() {
var i = 0;
for ( ; i < l; i++ ) {
if ( jQuery.contains( this, targets[ i ] ) ) {
return true;
}
}
} );
},
closest: function( selectors, context ) {
var cur,
i = 0,
l = this.length,
matched = [],
targets = typeof selectors !== "string" && jQuery( selectors );
// Positional selectors never match, since there's no _selection_ context
if ( !rneedsContext.test( selectors ) ) {
for ( ; i < l; i++ ) {
for ( cur = this[ i ]; cur && cur !== context; cur = cur.parentNode ) {
// Always skip document fragments
if ( cur.nodeType < 11 && ( targets ?
targets.index( cur ) > -1 :
// Don't pass non-elements to Sizzle
cur.nodeType === 1 &&
jQuery.find.matchesSelector( cur, selectors ) ) ) {
matched.push( cur );
break;
}
}
}
}
return this.pushStack( matched.length > 1 ? jQuery.uniqueSort( matched ) : matched );
},
// Determine the position of an element within the set
index: function( elem ) {
// No argument, return index in parent
if ( !elem ) {
return ( this[ 0 ] && this[ 0 ].parentNode ) ? this.first().prevAll().length : -1;
}
// Index in selector
if ( typeof elem === "string" ) {
return indexOf.call( jQuery( elem ), this[ 0 ] );
}
// Locate the position of the desired element
return indexOf.call( this,
// If it receives a jQuery object, the first element is used
elem.jquery ? elem[ 0 ] : elem
);
},
add: function( selector, context ) {
return this.pushStack(
jQuery.uniqueSort(
jQuery.merge( this.get(), jQuery( selector, context ) )
)
);
},
addBack: function( selector ) {
return this.add( selector == null ?
this.prevObject : this.prevObject.filter( selector )
);
}
} );
function sibling( cur, dir ) {
while ( ( cur = cur[ dir ] ) && cur.nodeType !== 1 ) {}
return cur;
}
jQuery.each( {
parent: function( elem ) {
var parent = elem.parentNode;
return parent && parent.nodeType !== 11 ? parent : null;
},
parents: function( elem ) {
return dir( elem, "parentNode" );
},
parentsUntil: function( elem, _i, until ) {
return dir( elem, "parentNode", until );
},
next: function( elem ) {
return sibling( elem, "nextSibling" );
},
prev: function( elem ) {
return sibling( elem, "previousSibling" );
},
nextAll: function( elem ) {
return dir( elem, "nextSibling" );
},
prevAll: function( elem ) {
return dir( elem, "previousSibling" );
},
nextUntil: function( elem, _i, until ) {
return dir( elem, "nextSibling", until );
},
prevUntil: function( elem, _i, until ) {
return dir( elem, "previousSibling", until );
},
siblings: function( elem ) {
return siblings( ( elem.parentNode || {} ).firstChild, elem );
},
children: function( elem ) {
return siblings( elem.firstChild );
},
contents: function( elem ) {
if ( elem.contentDocument != null &&
// Support: IE 11+
// <object> elements with no `data` attribute has an object
// `contentDocument` with a `null` prototype.
getProto( elem.contentDocument ) ) {
return elem.contentDocument;
}
// Support: IE 9 - 11 only, iOS 7 only, Android Browser <=4.3 only
// Treat the template element as a regular one in browsers that
// don't support it.
if ( nodeName( elem, "template" ) ) {
elem = elem.content || elem;
}
return jQuery.merge( [], elem.childNodes );
}
}, function( name, fn ) {
jQuery.fn[ name ] = function( until, selector ) {
var matched = jQuery.map( this, fn, until );
if ( name.slice( -5 ) !== "Until" ) {
selector = until;
}
if ( selector && typeof selector === "string" ) {
matched = jQuery.filter( selector, matched );
}
if ( this.length > 1 ) {
// Remove duplicates
if ( !guaranteedUnique[ name ] ) {
jQuery.uniqueSort( matched );
}
// Reverse order for parents* and prev-derivatives
if ( rparentsprev.test( name ) ) {
matched.reverse();
}
}
return this.pushStack( matched );
};
} );
var rnothtmlwhite = ( /[^\x20\t\r\n\f]+/g );
// Convert String-formatted options into Object-formatted ones
function createOptions( options ) {
var object = {};
jQuery.each( options.match( rnothtmlwhite ) || [], function( _, flag ) {
object[ flag ] = true;
} );
return object;
}
/*
* Create a callback list using the following parameters:
*
* options: an optional list of space-separated options that will change how
* the callback list behaves or a more traditional option object
*
* By default a callback list will act like an event callback list and can be
* "fired" multiple times.
*
* Possible options:
*
* once: will ensure the callback list can only be fired once (like a Deferred)
*
* memory: will keep track of previous values and will call any callback added
* after the list has been fired right away with the latest "memorized"
* values (like a Deferred)
*
* unique: will ensure a callback can only be added once (no duplicate in the list)
*
* stopOnFalse: interrupt callings when a callback returns false
*
*/
jQuery.Callbacks = function( options ) {
// Convert options from String-formatted to Object-formatted if needed
// (we check in cache first)
options = typeof options === "string" ?
createOptions( options ) :
jQuery.extend( {}, options );
var // Flag to know if list is currently firing
firing,
// Last fire value for non-forgettable lists
memory,
// Flag to know if list was already fired
fired,
// Flag to prevent firing
locked,
// Actual callback list
list = [],
// Queue of execution data for repeatable lists
queue = [],
// Index of currently firing callback (modified by add/remove as needed)
firingIndex = -1,
// Fire callbacks
fire = function() {
// Enforce single-firing
locked = locked || options.once;
// Execute callbacks for all pending executions,
// respecting firingIndex overrides and runtime changes
fired = firing = true;
for ( ; queue.length; firingIndex = -1 ) {
memory = queue.shift();
while ( ++firingIndex < list.length ) {
// Run callback and check for early termination
if ( list[ firingIndex ].apply( memory[ 0 ], memory[ 1 ] ) === false &&
options.stopOnFalse ) {
// Jump to end and forget the data so .add doesn't re-fire
firingIndex = list.length;
memory = false;
}
}
}
// Forget the data if we're done with it
if ( !options.memory ) {
memory = false;
}
firing = false;
// Clean up if we're done firing for good
if ( locked ) {
// Keep an empty list if we have data for future add calls
if ( memory ) {
list = [];
// Otherwise, this object is spent
} else {
list = "";
}
}
},
// Actual Callbacks object
self = {
// Add a callback or a collection of callbacks to the list
add: function() {
if ( list ) {
// If we have memory from a past run, we should fire after adding
if ( memory && !firing ) {
firingIndex = list.length - 1;
queue.push( memory );
}
( function add( args ) {
jQuery.each( args, function( _, arg ) {
if ( isFunction( arg ) ) {
if ( !options.unique || !self.has( arg ) ) {
list.push( arg );
}
} else if ( arg && arg.length && toType( arg ) !== "string" ) {
// Inspect recursively
add( arg );
}
} );
} )( arguments );
if ( memory && !firing ) {
fire();
}
}
return this;
},
// Remove a callback from the list
remove: function() {
jQuery.each( arguments, function( _, arg ) {
var index;
while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) {
list.splice( index, 1 );
// Handle firing indexes
if ( index <= firingIndex ) {
firingIndex--;
}
}
} );
return this;
},
// Check if a given callback is in the list.
// If no argument is given, return whether or not list has callbacks attached.
has: function( fn ) {
return fn ?
jQuery.inArray( fn, list ) > -1 :
list.length > 0;
},
// Remove all callbacks from the list
empty: function() {
if ( list ) {
list = [];
}
return this;
},
// Disable .fire and .add
// Abort any current/pending executions
// Clear all callbacks and values
disable: function() {
locked = queue = [];
list = memory = "";
return this;
},
disabled: function() {
return !list;
},
// Disable .fire
// Also disable .add unless we have memory (since it would have no effect)
// Abort any pending executions
lock: function() {
locked = queue = [];
if ( !memory && !firing ) {
list = memory = "";
}
return this;
},
locked: function() {
return !!locked;
},
// Call all callbacks with the given context and arguments
fireWith: function( context, args ) {
if ( !locked ) {
args = args || [];
args = [ context, args.slice ? args.slice() : args ];
queue.push( args );
if ( !firing ) {
fire();
}
}
return this;
},
// Call all the callbacks with the given arguments
fire: function() {
self.fireWith( this, arguments );
return this;
},
// To know if the callbacks have already been called at least once
fired: function() {
return !!fired;
}
};
return self;
};
function Identity( v ) {
return v;
}
function Thrower( ex ) {
throw ex;
}
function adoptValue( value, resolve, reject, noValue ) {
var method;
try {
// Check for promise aspect first to privilege synchronous behavior
if ( value && isFunction( ( method = value.promise ) ) ) {
method.call( value ).done( resolve ).fail( reject );
// Other thenables
} else if ( value && isFunction( ( method = value.then ) ) ) {
method.call( value, resolve, reject );
// Other non-thenables
} else {
// Control `resolve` arguments by letting Array#slice cast boolean `noValue` to integer:
// * false: [ value ].slice( 0 ) => resolve( value )
// * true: [ value ].slice( 1 ) => resolve()
resolve.apply( undefined, [ value ].slice( noValue ) );
}
// For Promises/A+, convert exceptions into rejections
// Since jQuery.when doesn't unwrap thenables, we can skip the extra checks appearing in
// Deferred#then to conditionally suppress rejection.
} catch ( value ) {
// Support: Android 4.0 only
// Strict mode functions invoked without .call/.apply get global-object context
reject.apply( undefined, [ value ] );
}
}
jQuery.extend( {
Deferred: function( func ) {
var tuples = [
// action, add listener, callbacks,
// ... .then handlers, argument index, [final state]
[ "notify", "progress", jQuery.Callbacks( "memory" ),
jQuery.Callbacks( "memory" ), 2 ],
[ "resolve", "done", jQuery.Callbacks( "once memory" ),
jQuery.Callbacks( "once memory" ), 0, "resolved" ],
[ "reject", "fail", jQuery.Callbacks( "once memory" ),
jQuery.Callbacks( "once memory" ), 1, "rejected" ]
],
state = "pending",
promise = {
state: function() {
return state;
},
always: function() {
deferred.done( arguments ).fail( arguments );
return this;
},
"catch": function( fn ) {
return promise.then( null, fn );
},
// Keep pipe for back-compat
pipe: function( /* fnDone, fnFail, fnProgress */ ) {
var fns = arguments;
return jQuery.Deferred( function( newDefer ) {
jQuery.each( tuples, function( _i, tuple ) {
// Map tuples (progress, done, fail) to arguments (done, fail, progress)
var fn = isFunction( fns[ tuple[ 4 ] ] ) && fns[ tuple[ 4 ] ];
// deferred.progress(function() { bind to newDefer or newDefer.notify })
// deferred.done(function() { bind to newDefer or newDefer.resolve })
// deferred.fail(function() { bind to newDefer or newDefer.reject })
deferred[ tuple[ 1 ] ]( function() {
var returned = fn && fn.apply( this, arguments );
if ( returned && isFunction( returned.promise ) ) {
returned.promise()
.progress( newDefer.notify )
.done( newDefer.resolve )
.fail( newDefer.reject );
} else {
newDefer[ tuple[ 0 ] + "With" ](
this,
fn ? [ returned ] : arguments
);
}
} );
} );
fns = null;
} ).promise();
},
then: function( onFulfilled, onRejected, onProgress ) {
var maxDepth = 0;
function resolve( depth, deferred, handler, special ) {
return function() {
var that = this,
args = arguments,
mightThrow = function() {
var returned, then;
// Support: Promises/A+ section 2.3.3.3.3
// https://promisesaplus.com/#point-59
// Ignore double-resolution attempts
if ( depth < maxDepth ) {
return;
}
returned = handler.apply( that, args );
// Support: Promises/A+ section 2.3.1
// https://promisesaplus.com/#point-48
if ( returned === deferred.promise() ) {
throw new TypeError( "Thenable self-resolution" );
}
// Support: Promises/A+ sections 2.3.3.1, 3.5
// https://promisesaplus.com/#point-54
// https://promisesaplus.com/#point-75
// Retrieve `then` only once
then = returned &&
// Support: Promises/A+ section 2.3.4
// https://promisesaplus.com/#point-64
// Only check objects and functions for thenability
( typeof returned === "object" ||
typeof returned === "function" ) &&
returned.then;
// Handle a returned thenable
if ( isFunction( then ) ) {
// Special processors (notify) just wait for resolution
if ( special ) {
then.call(
returned,
resolve( maxDepth, deferred, Identity, special ),
resolve( maxDepth, deferred, Thrower, special )
);
// Normal processors (resolve) also hook into progress
} else {
// ...and disregard older resolution values
maxDepth++;
then.call(
returned,
resolve( maxDepth, deferred, Identity, special ),
resolve( maxDepth, deferred, Thrower, special ),
resolve( maxDepth, deferred, Identity,
deferred.notifyWith )
);
}
// Handle all other returned values
} else {
// Only substitute handlers pass on context
// and multiple values (non-spec behavior)
if ( handler !== Identity ) {
that = undefined;
args = [ returned ];
}
// Process the value(s)
// Default process is resolve
( special || deferred.resolveWith )( that, args );
}
},
// Only normal processors (resolve) catch and reject exceptions
process = special ?
mightThrow :
function() {
try {
mightThrow();
} catch ( e ) {
if ( jQuery.Deferred.exceptionHook ) {
jQuery.Deferred.exceptionHook( e,
process.stackTrace );
}
// Support: Promises/A+ section 2.3.3.3.4.1
// https://promisesaplus.com/#point-61
// Ignore post-resolution exceptions
if ( depth + 1 >= maxDepth ) {
// Only substitute handlers pass on context
// and multiple values (non-spec behavior)
if ( handler !== Thrower ) {
that = undefined;
args = [ e ];
}
deferred.rejectWith( that, args );
}
}
};
// Support: Promises/A+ section 2.3.3.3.1
// https://promisesaplus.com/#point-57
// Re-resolve promises immediately to dodge false rejection from
// subsequent errors
if ( depth ) {
process();
} else {
// Call an optional hook to record the stack, in case of exception
// since it's otherwise lost when execution goes async
if ( jQuery.Deferred.getStackHook ) {
process.stackTrace = jQuery.Deferred.getStackHook();
}
window.setTimeout( process );
}
};
}
return jQuery.Deferred( function( newDefer ) {
// progress_handlers.add( ... )
tuples[ 0 ][ 3 ].add(
resolve(
0,
newDefer,
isFunction( onProgress ) ?
onProgress :
Identity,
newDefer.notifyWith
)
);
// fulfilled_handlers.add( ... )
tuples[ 1 ][ 3 ].add(
resolve(
0,
newDefer,
isFunction( onFulfilled ) ?
onFulfilled :
Identity
)
);
// rejected_handlers.add( ... )
tuples[ 2 ][ 3 ].add(
resolve(
0,
newDefer,
isFunction( onRejected ) ?
onRejected :
Thrower
)
);
} ).promise();
},
// Get a promise for this deferred
// If obj is provided, the promise aspect is added to the object
promise: function( obj ) {
return obj != null ? jQuery.extend( obj, promise ) : promise;
}
},
deferred = {};
// Add list-specific methods
jQuery.each( tuples, function( i, tuple ) {
var list = tuple[ 2 ],
stateString = tuple[ 5 ];
// promise.progress = list.add
// promise.done = list.add
// promise.fail = list.add
promise[ tuple[ 1 ] ] = list.add;
// Handle state
if ( stateString ) {
list.add(
function() {
// state = "resolved" (i.e., fulfilled)
// state = "rejected"
state = stateString;
},
// rejected_callbacks.disable
// fulfilled_callbacks.disable
tuples[ 3 - i ][ 2 ].disable,
// rejected_handlers.disable
// fulfilled_handlers.disable
tuples[ 3 - i ][ 3 ].disable,
// progress_callbacks.lock
tuples[ 0 ][ 2 ].lock,
// progress_handlers.lock
tuples[ 0 ][ 3 ].lock
);
}
// progress_handlers.fire
// fulfilled_handlers.fire
// rejected_handlers.fire
list.add( tuple[ 3 ].fire );
// deferred.notify = function() { deferred.notifyWith(...) }
// deferred.resolve = function() { deferred.resolveWith(...) }
// deferred.reject = function() { deferred.rejectWith(...) }
deferred[ tuple[ 0 ] ] = function() {
deferred[ tuple[ 0 ] + "With" ]( this === deferred ? undefined : this, arguments );
return this;
};
// deferred.notifyWith = list.fireWith
// deferred.resolveWith = list.fireWith
// deferred.rejectWith = list.fireWith
deferred[ tuple[ 0 ] + "With" ] = list.fireWith;
} );
// Make the deferred a promise
promise.promise( deferred );
// Call given func if any
if ( func ) {
func.call( deferred, deferred );
}
// All done!
return deferred;
},
// Deferred helper
when: function( singleValue ) {
var
// count of uncompleted subordinates
remaining = arguments.length,
// count of unprocessed arguments
i = remaining,
// subordinate fulfillment data
resolveContexts = Array( i ),
resolveValues = slice.call( arguments ),
// the master Deferred
master = jQuery.Deferred(),
// subordinate callback factory
updateFunc = function( i ) {
return function( value ) {
resolveContexts[ i ] = this;
resolveValues[ i ] = arguments.length > 1 ? slice.call( arguments ) : value;
if ( !( --remaining ) ) {
master.resolveWith( resolveContexts, resolveValues );
}
};
};
// Single- and empty arguments are adopted like Promise.resolve
if ( remaining <= 1 ) {
adoptValue( singleValue, master.done( updateFunc( i ) ).resolve, master.reject,
!remaining );
// Use .then() to unwrap secondary thenables (cf. gh-3000)
if ( master.state() === "pending" ||
isFunction( resolveValues[ i ] && resolveValues[ i ].then ) ) {
return master.then();
}
}
// Multiple arguments are aggregated like Promise.all array elements
while ( i-- ) {
adoptValue( resolveValues[ i ], updateFunc( i ), master.reject );
}
return master.promise();
}
} );
// These usually indicate a programmer mistake during development,
// warn about them ASAP rather than swallowing them by default.
var rerrorNames = /^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;
jQuery.Deferred.exceptionHook = function( error, stack ) {
// Support: IE 8 - 9 only
// Console exists when dev tools are open, which can happen at any time
if ( window.console && window.console.warn && error && rerrorNames.test( error.name ) ) {
window.console.warn( "jQuery.Deferred exception: " + error.message, error.stack, stack );
}
};
jQuery.readyException = function( error ) {
window.setTimeout( function() {
throw error;
} );
};
// The deferred used on DOM ready
var readyList = jQuery.Deferred();
jQuery.fn.ready = function( fn ) {
readyList
.then( fn )
// Wrap jQuery.readyException in a function so that the lookup
// happens at the time of error handling instead of callback
// registration.
.catch( function( error ) {
jQuery.readyException( error );
} );
return this;
};
jQuery.extend( {
// Is the DOM ready to be used? Set to true once it occurs.
isReady: false,
// A counter to track how many items to wait for before
// the ready event fires. See #6781
readyWait: 1,
// Handle when the DOM is ready
ready: function( wait ) {
// Abort if there are pending holds or we're already ready
if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) {
return;
}
// Remember that the DOM is ready
jQuery.isReady = true;
// If a normal DOM Ready event fired, decrement, and wait if need be
if ( wait !== true && --jQuery.readyWait > 0 ) {
return;
}
// If there are functions bound, to execute
readyList.resolveWith( document, [ jQuery ] );
}
} );
jQuery.ready.then = readyList.then;
// The ready event handler and self cleanup method
function completed() {
document.removeEventListener( "DOMContentLoaded", completed );
window.removeEventListener( "load", completed );
jQuery.ready();
}
// Catch cases where $(document).ready() is called
// after the browser event has already occurred.
// Support: IE <=9 - 10 only
// Older IE sometimes signals "interactive" too soon
if ( document.readyState === "complete" ||
( document.readyState !== "loading" && !document.documentElement.doScroll ) ) {
// Handle it asynchronously to allow scripts the opportunity to delay ready
window.setTimeout( jQuery.ready );
} else {
// Use the handy event callback
document.addEventListener( "DOMContentLoaded", completed );
// A fallback to window.onload, that will always work
window.addEventListener( "load", completed );
}
// Multifunctional method to get and set values of a collection
// The value/s can optionally be executed if it's a function
var access = function( elems, fn, key, value, chainable, emptyGet, raw ) {
var i = 0,
len = elems.length,
bulk = key == null;
// Sets many values
if ( toType( key ) === "object" ) {
chainable = true;
for ( i in key ) {
access( elems, fn, i, key[ i ], true, emptyGet, raw );
}
// Sets one value
} else if ( value !== undefined ) {
chainable = true;
if ( !isFunction( value ) ) {
raw = true;
}
if ( bulk ) {
// Bulk operations run against the entire set
if ( raw ) {
fn.call( elems, value );
fn = null;
// ...except when executing function values
} else {
bulk = fn;
fn = function( elem, _key, value ) {
return bulk.call( jQuery( elem ), value );
};
}
}
if ( fn ) {
for ( ; i < len; i++ ) {
fn(
elems[ i ], key, raw ?
value :
value.call( elems[ i ], i, fn( elems[ i ], key ) )
);
}
}
}
if ( chainable ) {
return elems;
}
// Gets
if ( bulk ) {
return fn.call( elems );
}
return len ? fn( elems[ 0 ], key ) : emptyGet;
};
// Matches dashed string for camelizing
var rmsPrefix = /^-ms-/,
rdashAlpha = /-([a-z])/g;
// Used by camelCase as callback to replace()
function fcamelCase( _all, letter ) {
return letter.toUpperCase();
}
// Convert dashed to camelCase; used by the css and data modules
// Support: IE <=9 - 11, Edge 12 - 15
// Microsoft forgot to hump their vendor prefix (#9572)
function camelCase( string ) {
return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase );
}
var acceptData = function( owner ) {
// Accepts only:
// - Node
// - Node.ELEMENT_NODE
// - Node.DOCUMENT_NODE
// - Object
// - Any
return owner.nodeType === 1 || owner.nodeType === 9 || !( +owner.nodeType );
};
function Data() {
this.expando = jQuery.expando + Data.uid++;
}
Data.uid = 1;
Data.prototype = {
cache: function( owner ) {
// Check if the owner object already has a cache
var value = owner[ this.expando ];
// If not, create one
if ( !value ) {
value = {};
// We can accept data for non-element nodes in modern browsers,
// but we should not, see #8335.
// Always return an empty object.
if ( acceptData( owner ) ) {
// If it is a node unlikely to be stringify-ed or looped over
// use plain assignment
if ( owner.nodeType ) {
owner[ this.expando ] = value;
// Otherwise secure it in a non-enumerable property
// configurable must be true to allow the property to be
// deleted when data is removed
} else {
Object.defineProperty( owner, this.expando, {
value: value,
configurable: true
} );
}
}
}
return value;
},
set: function( owner, data, value ) {
var prop,
cache = this.cache( owner );
// Handle: [ owner, key, value ] args
// Always use camelCase key (gh-2257)
if ( typeof data === "string" ) {
cache[ camelCase( data ) ] = value;
// Handle: [ owner, { properties } ] args
} else {
// Copy the properties one-by-one to the cache object
for ( prop in data ) {
cache[ camelCase( prop ) ] = data[ prop ];
}
}
return cache;
},
get: function( owner, key ) {
return key === undefined ?
this.cache( owner ) :
// Always use camelCase key (gh-2257)
owner[ this.expando ] && owner[ this.expando ][ camelCase( key ) ];
},
access: function( owner, key, value ) {
// In cases where either:
//
// 1. No key was specified
// 2. A string key was specified, but no value provided
//
// Take the "read" path and allow the get method to determine
// which value to return, respectively either:
//
// 1. The entire cache object
// 2. The data stored at the key
//
if ( key === undefined ||
( ( key && typeof key === "string" ) && value === undefined ) ) {
return this.get( owner, key );
}
// When the key is not a string, or both a key and value
// are specified, set or extend (existing objects) with either:
//
// 1. An object of properties
// 2. A key and value
//
this.set( owner, key, value );
// Since the "set" path can have two possible entry points
// return the expected data based on which path was taken[*]
return value !== undefined ? value : key;
},
remove: function( owner, key ) {
var i,
cache = owner[ this.expando ];
if ( cache === undefined ) {
return;
}
if ( key !== undefined ) {
// Support array or space separated string of keys
if ( Array.isArray( key ) ) {
// If key is an array of keys...
// We always set camelCase keys, so remove that.
key = key.map( camelCase );
} else {
key = camelCase( key );
// If a key with the spaces exists, use it.
// Otherwise, create an array by matching non-whitespace
key = key in cache ?
[ key ] :
( key.match( rnothtmlwhite ) || [] );
}
i = key.length;
while ( i-- ) {
delete cache[ key[ i ] ];
}
}
// Remove the expando if there's no more data
if ( key === undefined || jQuery.isEmptyObject( cache ) ) {
// Support: Chrome <=35 - 45
// Webkit & Blink performance suffers when deleting properties
// from DOM nodes, so set to undefined instead
// https://bugs.chromium.org/p/chromium/issues/detail?id=378607 (bug restricted)
if ( owner.nodeType ) {
owner[ this.expando ] = undefined;
} else {
delete owner[ this.expando ];
}
}
},
hasData: function( owner ) {
var cache = owner[ this.expando ];
return cache !== undefined && !jQuery.isEmptyObject( cache );
}
};
var dataPriv = new Data();
var dataUser = new Data();
// Implementation Summary
//
// 1. Enforce API surface and semantic compatibility with 1.9.x branch
// 2. Improve the module's maintainability by reducing the storage
// paths to a single mechanism.
// 3. Use the same single mechanism to support "private" and "user" data.
// 4. _Never_ expose "private" data to user code (TODO: Drop _data, _removeData)
// 5. Avoid exposing implementation details on user objects (eg. expando properties)
// 6. Provide a clear path for implementation upgrade to WeakMap in 2014
var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,
rmultiDash = /[A-Z]/g;
function getData( data ) {
if ( data === "true" ) {
return true;
}
if ( data === "false" ) {
return false;
}
if ( data === "null" ) {
return null;
}
// Only convert to a number if it doesn't change the string
if ( data === +data + "" ) {
return +data;
}
if ( rbrace.test( data ) ) {
return JSON.parse( data );
}
return data;
}
function dataAttr( elem, key, data ) {
var name;
// If nothing was found internally, try to fetch any
// data from the HTML5 data-* attribute
if ( data === undefined && elem.nodeType === 1 ) {
name = "data-" + key.replace( rmultiDash, "-$&" ).toLowerCase();
data = elem.getAttribute( name );
if ( typeof data === "string" ) {
try {
data = getData( data );
} catch ( e ) {}
// Make sure we set the data so it isn't changed later
dataUser.set( elem, key, data );
} else {
data = undefined;
}
}
return data;
}
jQuery.extend( {
hasData: function( elem ) {
return dataUser.hasData( elem ) || dataPriv.hasData( elem );
},
data: function( elem, name, data ) {
return dataUser.access( elem, name, data );
},
removeData: function( elem, name ) {
dataUser.remove( elem, name );
},
// TODO: Now that all calls to _data and _removeData have been replaced
// with direct calls to dataPriv methods, these can be deprecated.
_data: function( elem, name, data ) {
return dataPriv.access( elem, name, data );
},
_removeData: function( elem, name ) {
dataPriv.remove( elem, name );
}
} );
jQuery.fn.extend( {
data: function( key, value ) {
var i, name, data,
elem = this[ 0 ],
attrs = elem && elem.attributes;
// Gets all values
if ( key === undefined ) {
if ( this.length ) {
data = dataUser.get( elem );
if ( elem.nodeType === 1 && !dataPriv.get( elem, "hasDataAttrs" ) ) {
i = attrs.length;
while ( i-- ) {
// Support: IE 11 only
// The attrs elements can be null (#14894)
if ( attrs[ i ] ) {
name = attrs[ i ].name;
if ( name.indexOf( "data-" ) === 0 ) {
name = camelCase( name.slice( 5 ) );
dataAttr( elem, name, data[ name ] );
}
}
}
dataPriv.set( elem, "hasDataAttrs", true );
}
}
return data;
}
// Sets multiple values
if ( typeof key === "object" ) {
return this.each( function() {
dataUser.set( this, key );
} );
}
return access( this, function( value ) {
var data;
// The calling jQuery object (element matches) is not empty
// (and therefore has an element appears at this[ 0 ]) and the
// `value` parameter was not undefined. An empty jQuery object
// will result in `undefined` for elem = this[ 0 ] which will
// throw an exception if an attempt to read a data cache is made.
if ( elem && value === undefined ) {
// Attempt to get data from the cache
// The key will always be camelCased in Data
data = dataUser.get( elem, key );
if ( data !== undefined ) {
return data;
}
// Attempt to "discover" the data in
// HTML5 custom data-* attrs
data = dataAttr( elem, key );
if ( data !== undefined ) {
return data;
}
// We tried really hard, but the data doesn't exist.
return;
}
// Set the data...
this.each( function() {
// We always store the camelCased key
dataUser.set( this, key, value );
} );
}, null, value, arguments.length > 1, null, true );
},
removeData: function( key ) {
return this.each( function() {
dataUser.remove( this, key );
} );
}
} );
jQuery.extend( {
queue: function( elem, type, data ) {
var queue;
if ( elem ) {
type = ( type || "fx" ) + "queue";
queue = dataPriv.get( elem, type );
// Speed up dequeue by getting out quickly if this is just a lookup
if ( data ) {
if ( !queue || Array.isArray( data ) ) {
queue = dataPriv.access( elem, type, jQuery.makeArray( data ) );
} else {
queue.push( data );
}
}
return queue || [];
}
},
dequeue: function( elem, type ) {
type = type || "fx";
var queue = jQuery.queue( elem, type ),
startLength = queue.length,
fn = queue.shift(),
hooks = jQuery._queueHooks( elem, type ),
next = function() {
jQuery.dequeue( elem, type );
};
// If the fx queue is dequeued, always remove the progress sentinel
if ( fn === "inprogress" ) {
fn = queue.shift();
startLength--;
}
if ( fn ) {
// Add a progress sentinel to prevent the fx queue from being
// automatically dequeued
if ( type === "fx" ) {
queue.unshift( "inprogress" );
}
// Clear up the last queue stop function
delete hooks.stop;
fn.call( elem, next, hooks );
}
if ( !startLength && hooks ) {
hooks.empty.fire();
}
},
// Not public - generate a queueHooks object, or return the current one
_queueHooks: function( elem, type ) {
var key = type + "queueHooks";
return dataPriv.get( elem, key ) || dataPriv.access( elem, key, {
empty: jQuery.Callbacks( "once memory" ).add( function() {
dataPriv.remove( elem, [ type + "queue", key ] );
} )
} );
}
} );
jQuery.fn.extend( {
queue: function( type, data ) {
var setter = 2;
if ( typeof type !== "string" ) {
data = type;
type = "fx";
setter--;
}
if ( arguments.length < setter ) {
return jQuery.queue( this[ 0 ], type );
}
return data === undefined ?
this :
this.each( function() {
var queue = jQuery.queue( this, type, data );
// Ensure a hooks for this queue
jQuery._queueHooks( this, type );
if ( type === "fx" && queue[ 0 ] !== "inprogress" ) {
jQuery.dequeue( this, type );
}
} );
},
dequeue: function( type ) {
return this.each( function() {
jQuery.dequeue( this, type );
} );
},
clearQueue: function( type ) {
return this.queue( type || "fx", [] );
},
// Get a promise resolved when queues of a certain type
// are emptied (fx is the type by default)
promise: function( type, obj ) {
var tmp,
count = 1,
defer = jQuery.Deferred(),
elements = this,
i = this.length,
resolve = function() {
if ( !( --count ) ) {
defer.resolveWith( elements, [ elements ] );
}
};
if ( typeof type !== "string" ) {
obj = type;
type = undefined;
}
type = type || "fx";
while ( i-- ) {
tmp = dataPriv.get( elements[ i ], type + "queueHooks" );
if ( tmp && tmp.empty ) {
count++;
tmp.empty.add( resolve );
}
}
resolve();
return defer.promise( obj );
}
} );
var pnum = ( /[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/ ).source;
var rcssNum = new RegExp( "^(?:([+-])=|)(" + pnum + ")([a-z%]*)$", "i" );
var cssExpand = [ "Top", "Right", "Bottom", "Left" ];
var documentElement = document.documentElement;
var isAttached = function( elem ) {
return jQuery.contains( elem.ownerDocument, elem );
},
composed = { composed: true };
// Support: IE 9 - 11+, Edge 12 - 18+, iOS 10.0 - 10.2 only
// Check attachment across shadow DOM boundaries when possible (gh-3504)
// Support: iOS 10.0-10.2 only
// Early iOS 10 versions support `attachShadow` but not `getRootNode`,
// leading to errors. We need to check for `getRootNode`.
if ( documentElement.getRootNode ) {
isAttached = function( elem ) {
return jQuery.contains( elem.ownerDocument, elem ) ||
elem.getRootNode( composed ) === elem.ownerDocument;
};
}
var isHiddenWithinTree = function( elem, el ) {
// isHiddenWithinTree might be called from jQuery#filter function;
// in that case, element will be second argument
elem = el || elem;
// Inline style trumps all
return elem.style.display === "none" ||
elem.style.display === "" &&
// Otherwise, check computed style
// Support: Firefox <=43 - 45
// Disconnected elements can have computed display: none, so first confirm that elem is
// in the document.
isAttached( elem ) &&
jQuery.css( elem, "display" ) === "none";
};
function adjustCSS( elem, prop, valueParts, tween ) {
var adjusted, scale,
maxIterations = 20,
currentValue = tween ?
function() {
return tween.cur();
} :
function() {
return jQuery.css( elem, prop, "" );
},
initial = currentValue(),
unit = valueParts && valueParts[ 3 ] || ( jQuery.cssNumber[ prop ] ? "" : "px" ),
// Starting value computation is required for potential unit mismatches
initialInUnit = elem.nodeType &&
( jQuery.cssNumber[ prop ] || unit !== "px" && +initial ) &&
rcssNum.exec( jQuery.css( elem, prop ) );
if ( initialInUnit && initialInUnit[ 3 ] !== unit ) {
// Support: Firefox <=54
// Halve the iteration target value to prevent interference from CSS upper bounds (gh-2144)
initial = initial / 2;
// Trust units reported by jQuery.css
unit = unit || initialInUnit[ 3 ];
// Iteratively approximate from a nonzero starting point
initialInUnit = +initial || 1;
while ( maxIterations-- ) {
// Evaluate and update our best guess (doubling guesses that zero out).
// Finish if the scale equals or crosses 1 (making the old*new product non-positive).
jQuery.style( elem, prop, initialInUnit + unit );
if ( ( 1 - scale ) * ( 1 - ( scale = currentValue() / initial || 0.5 ) ) <= 0 ) {
maxIterations = 0;
}
initialInUnit = initialInUnit / scale;
}
initialInUnit = initialInUnit * 2;
jQuery.style( elem, prop, initialInUnit + unit );
// Make sure we update the tween properties later on
valueParts = valueParts || [];
}
if ( valueParts ) {
initialInUnit = +initialInUnit || +initial || 0;
// Apply relative offset (+=/-=) if specified
adjusted = valueParts[ 1 ] ?
initialInUnit + ( valueParts[ 1 ] + 1 ) * valueParts[ 2 ] :
+valueParts[ 2 ];
if ( tween ) {
tween.unit = unit;
tween.start = initialInUnit;
tween.end = adjusted;
}
}
return adjusted;
}
var defaultDisplayMap = {};
function getDefaultDisplay( elem ) {
var temp,
doc = elem.ownerDocument,
nodeName = elem.nodeName,
display = defaultDisplayMap[ nodeName ];
if ( display ) {
return display;
}
temp = doc.body.appendChild( doc.createElement( nodeName ) );
display = jQuery.css( temp, "display" );
temp.parentNode.removeChild( temp );
if ( display === "none" ) {
display = "block";
}
defaultDisplayMap[ nodeName ] = display;
return display;
}
function showHide( elements, show ) {
var display, elem,
values = [],
index = 0,
length = elements.length;
// Determine new display value for elements that need to change
for ( ; index < length; index++ ) {
elem = elements[ index ];
if ( !elem.style ) {
continue;
}
display = elem.style.display;
if ( show ) {
// Since we force visibility upon cascade-hidden elements, an immediate (and slow)
// check is required in this first loop unless we have a nonempty display value (either
// inline or about-to-be-restored)
if ( display === "none" ) {
values[ index ] = dataPriv.get( elem, "display" ) || null;
if ( !values[ index ] ) {
elem.style.display = "";
}
}
if ( elem.style.display === "" && isHiddenWithinTree( elem ) ) {
values[ index ] = getDefaultDisplay( elem );
}
} else {
if ( display !== "none" ) {
values[ index ] = "none";
// Remember what we're overwriting
dataPriv.set( elem, "display", display );
}
}
}
// Set the display of the elements in a second loop to avoid constant reflow
for ( index = 0; index < length; index++ ) {
if ( values[ index ] != null ) {
elements[ index ].style.display = values[ index ];
}
}
return elements;
}
jQuery.fn.extend( {
show: function() {
return showHide( this, true );
},
hide: function() {
return showHide( this );
},
toggle: function( state ) {
if ( typeof state === "boolean" ) {
return state ? this.show() : this.hide();
}
return this.each( function() {
if ( isHiddenWithinTree( this ) ) {
jQuery( this ).show();
} else {
jQuery( this ).hide();
}
} );
}
} );
var rcheckableType = ( /^(?:checkbox|radio)$/i );
var rtagName = ( /<([a-z][^\/\0>\x20\t\r\n\f]*)/i );
var rscriptType = ( /^$|^module$|\/(?:java|ecma)script/i );
( function() {
var fragment = document.createDocumentFragment(),
div = fragment.appendChild( document.createElement( "div" ) ),
input = document.createElement( "input" );
// Support: Android 4.0 - 4.3 only
// Check state lost if the name is set (#11217)
// Support: Windows Web Apps (WWA)
// `name` and `type` must use .setAttribute for WWA (#14901)
input.setAttribute( "type", "radio" );
input.setAttribute( "checked", "checked" );
input.setAttribute( "name", "t" );
div.appendChild( input );
// Support: Android <=4.1 only
// Older WebKit doesn't clone checked state correctly in fragments
support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked;
// Support: IE <=11 only
// Make sure textarea (and checkbox) defaultValue is properly cloned
div.innerHTML = "<textarea>x</textarea>";
support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue;
// Support: IE <=9 only
// IE <=9 replaces <option> tags with their contents when inserted outside of
// the select element.
div.innerHTML = "<option></option>";
support.option = !!div.lastChild;
} )();
// We have to close these tags to support XHTML (#13200)
var wrapMap = {
// XHTML parsers do not magically insert elements in the
// same way that tag soup parsers do. So we cannot shorten
// this by omitting <tbody> or other required elements.
thead: [ 1, "<table>", "</table>" ],
col: [ 2, "<table><colgroup>", "</colgroup></table>" ],
tr: [ 2, "<table><tbody>", "</tbody></table>" ],
td: [ 3, "<table><tbody><tr>", "</tr></tbody></table>" ],
_default: [ 0, "", "" ]
};
wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead;
wrapMap.th = wrapMap.td;
// Support: IE <=9 only
if ( !support.option ) {
wrapMap.optgroup = wrapMap.option = [ 1, "<select multiple='multiple'>", "</select>" ];
}
function getAll( context, tag ) {
// Support: IE <=9 - 11 only
// Use typeof to avoid zero-argument method invocation on host objects (#15151)
var ret;
if ( typeof context.getElementsByTagName !== "undefined" ) {
ret = context.getElementsByTagName( tag || "*" );
} else if ( typeof context.querySelectorAll !== "undefined" ) {
ret = context.querySelectorAll( tag || "*" );
} else {
ret = [];
}
if ( tag === undefined || tag && nodeName( context, tag ) ) {
return jQuery.merge( [ context ], ret );
}
return ret;
}
// Mark scripts as having already been evaluated
function setGlobalEval( elems, refElements ) {
var i = 0,
l = elems.length;
for ( ; i < l; i++ ) {
dataPriv.set(
elems[ i ],
"globalEval",
!refElements || dataPriv.get( refElements[ i ], "globalEval" )
);
}
}
var rhtml = /<|&#?\w+;/;
function buildFragment( elems, context, scripts, selection, ignored ) {
var elem, tmp, tag, wrap, attached, j,
fragment = context.createDocumentFragment(),
nodes = [],
i = 0,
l = elems.length;
for ( ; i < l; i++ ) {
elem = elems[ i ];
if ( elem || elem === 0 ) {
// Add nodes directly
if ( toType( elem ) === "object" ) {
// Support: Android <=4.0 only, PhantomJS 1 only
// push.apply(_, arraylike) throws on ancient WebKit
jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem );
// Convert non-html into a text node
} else if ( !rhtml.test( elem ) ) {
nodes.push( context.createTextNode( elem ) );
// Convert html into DOM nodes
} else {
tmp = tmp || fragment.appendChild( context.createElement( "div" ) );
// Deserialize a standard representation
tag = ( rtagName.exec( elem ) || [ "", "" ] )[ 1 ].toLowerCase();
wrap = wrapMap[ tag ] || wrapMap._default;
tmp.innerHTML = wrap[ 1 ] + jQuery.htmlPrefilter( elem ) + wrap[ 2 ];
// Descend through wrappers to the right content
j = wrap[ 0 ];
while ( j-- ) {
tmp = tmp.lastChild;
}
// Support: Android <=4.0 only, PhantomJS 1 only
// push.apply(_, arraylike) throws on ancient WebKit
jQuery.merge( nodes, tmp.childNodes );
// Remember the top-level container
tmp = fragment.firstChild;
// Ensure the created nodes are orphaned (#12392)
tmp.textContent = "";
}
}
}
// Remove wrapper from fragment
fragment.textContent = "";
i = 0;
while ( ( elem = nodes[ i++ ] ) ) {
// Skip elements already in the context collection (trac-4087)
if ( selection && jQuery.inArray( elem, selection ) > -1 ) {
if ( ignored ) {
ignored.push( elem );
}
continue;
}
attached = isAttached( elem );
// Append to fragment
tmp = getAll( fragment.appendChild( elem ), "script" );
// Preserve script evaluation history
if ( attached ) {
setGlobalEval( tmp );
}
// Capture executables
if ( scripts ) {
j = 0;
while ( ( elem = tmp[ j++ ] ) ) {
if ( rscriptType.test( elem.type || "" ) ) {
scripts.push( elem );
}
}
}
}
return fragment;
}
var
rkeyEvent = /^key/,
rmouseEvent = /^(?:mouse|pointer|contextmenu|drag|drop)|click/,
rtypenamespace = /^([^.]*)(?:\.(.+)|)/;
function returnTrue() {
return true;
}
function returnFalse() {
return false;
}
// Support: IE <=9 - 11+
// focus() and blur() are asynchronous, except when they are no-op.
// So expect focus to be synchronous when the element is already active,
// and blur to be synchronous when the element is not already active.
// (focus and blur are always synchronous in other supported browsers,
// this just defines when we can count on it).
function expectSync( elem, type ) {
return ( elem === safeActiveElement() ) === ( type === "focus" );
}
// Support: IE <=9 only
// Accessing document.activeElement can throw unexpectedly
// https://bugs.jquery.com/ticket/13393
function safeActiveElement() {
try {
return document.activeElement;
} catch ( err ) { }
}
function on( elem, types, selector, data, fn, one ) {
var origFn, type;
// Types can be a map of types/handlers
if ( typeof types === "object" ) {
// ( types-Object, selector, data )
if ( typeof selector !== "string" ) {
// ( types-Object, data )
data = data || selector;
selector = undefined;
}
for ( type in types ) {
on( elem, type, selector, data, types[ type ], one );
}
return elem;
}
if ( data == null && fn == null ) {
// ( types, fn )
fn = selector;
data = selector = undefined;
} else if ( fn == null ) {
if ( typeof selector === "string" ) {
// ( types, selector, fn )
fn = data;
data = undefined;
} else {
// ( types, data, fn )
fn = data;
data = selector;
selector = undefined;
}
}
if ( fn === false ) {
fn = returnFalse;
} else if ( !fn ) {
return elem;
}
if ( one === 1 ) {
origFn = fn;
fn = function( event ) {
// Can use an empty set, since event contains the info
jQuery().off( event );
return origFn.apply( this, arguments );
};
// Use same guid so caller can remove using origFn
fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ );
}
return elem.each( function() {
jQuery.event.add( this, types, fn, data, selector );
} );
}
/*
* Helper functions for managing events -- not part of the public interface.
* Props to Dean Edwards' addEvent library for many of the ideas.
*/
jQuery.event = {
global: {},
add: function( elem, types, handler, data, selector ) {
var handleObjIn, eventHandle, tmp,
events, t, handleObj,
special, handlers, type, namespaces, origType,
elemData = dataPriv.get( elem );
// Only attach events to objects that accept data
if ( !acceptData( elem ) ) {
return;
}
// Caller can pass in an object of custom data in lieu of the handler
if ( handler.handler ) {
handleObjIn = handler;
handler = handleObjIn.handler;
selector = handleObjIn.selector;
}
// Ensure that invalid selectors throw exceptions at attach time
// Evaluate against documentElement in case elem is a non-element node (e.g., document)
if ( selector ) {
jQuery.find.matchesSelector( documentElement, selector );
}
// Make sure that the handler has a unique ID, used to find/remove it later
if ( !handler.guid ) {
handler.guid = jQuery.guid++;
}
// Init the element's event structure and main handler, if this is the first
if ( !( events = elemData.events ) ) {
events = elemData.events = Object.create( null );
}
if ( !( eventHandle = elemData.handle ) ) {
eventHandle = elemData.handle = function( e ) {
// Discard the second event of a jQuery.event.trigger() and
// when an event is called after a page has unloaded
return typeof jQuery !== "undefined" && jQuery.event.triggered !== e.type ?
jQuery.event.dispatch.apply( elem, arguments ) : undefined;
};
}
// Handle multiple events separated by a space
types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
t = types.length;
while ( t-- ) {
tmp = rtypenamespace.exec( types[ t ] ) || [];
type = origType = tmp[ 1 ];
namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
// There *must* be a type, no attaching namespace-only handlers
if ( !type ) {
continue;
}
// If event changes its type, use the special event handlers for the changed type
special = jQuery.event.special[ type ] || {};
// If selector defined, determine special event api type, otherwise given type
type = ( selector ? special.delegateType : special.bindType ) || type;
// Update special based on newly reset type
special = jQuery.event.special[ type ] || {};
// handleObj is passed to all event handlers
handleObj = jQuery.extend( {
type: type,
origType: origType,
data: data,
handler: handler,
guid: handler.guid,
selector: selector,
needsContext: selector && jQuery.expr.match.needsContext.test( selector ),
namespace: namespaces.join( "." )
}, handleObjIn );
// Init the event handler queue if we're the first
if ( !( handlers = events[ type ] ) ) {
handlers = events[ type ] = [];
handlers.delegateCount = 0;
// Only use addEventListener if the special events handler returns false
if ( !special.setup ||
special.setup.call( elem, data, namespaces, eventHandle ) === false ) {
if ( elem.addEventListener ) {
elem.addEventListener( type, eventHandle );
}
}
}
if ( special.add ) {
special.add.call( elem, handleObj );
if ( !handleObj.handler.guid ) {
handleObj.handler.guid = handler.guid;
}
}
// Add to the element's handler list, delegates in front
if ( selector ) {
handlers.splice( handlers.delegateCount++, 0, handleObj );
} else {
handlers.push( handleObj );
}
// Keep track of which events have ever been used, for event optimization
jQuery.event.global[ type ] = true;
}
},
// Detach an event or set of events from an element
remove: function( elem, types, handler, selector, mappedTypes ) {
var j, origCount, tmp,
events, t, handleObj,
special, handlers, type, namespaces, origType,
elemData = dataPriv.hasData( elem ) && dataPriv.get( elem );
if ( !elemData || !( events = elemData.events ) ) {
return;
}
// Once for each type.namespace in types; type may be omitted
types = ( types || "" ).match( rnothtmlwhite ) || [ "" ];
t = types.length;
while ( t-- ) {
tmp = rtypenamespace.exec( types[ t ] ) || [];
type = origType = tmp[ 1 ];
namespaces = ( tmp[ 2 ] || "" ).split( "." ).sort();
// Unbind all events (on this namespace, if provided) for the element
if ( !type ) {
for ( type in events ) {
jQuery.event.remove( elem, type + types[ t ], handler, selector, true );
}
continue;
}
special = jQuery.event.special[ type ] || {};
type = ( selector ? special.delegateType : special.bindType ) || type;
handlers = events[ type ] || [];
tmp = tmp[ 2 ] &&
new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" );
// Remove matching events
origCount = j = handlers.length;
while ( j-- ) {
handleObj = handlers[ j ];
if ( ( mappedTypes || origType === handleObj.origType ) &&
( !handler || handler.guid === handleObj.guid ) &&
( !tmp || tmp.test( handleObj.namespace ) ) &&
( !selector || selector === handleObj.selector ||
selector === "**" && handleObj.selector ) ) {
handlers.splice( j, 1 );
if ( handleObj.selector ) {
handlers.delegateCount--;
}
if ( special.remove ) {
special.remove.call( elem, handleObj );
}
}
}
// Remove generic event handler if we removed something and no more handlers exist
// (avoids potential for endless recursion during removal of special event handlers)
if ( origCount && !handlers.length ) {
if ( !special.teardown ||
special.teardown.call( elem, namespaces, elemData.handle ) === false ) {
jQuery.removeEvent( elem, type, elemData.handle );
}
delete events[ type ];
}
}
// Remove data and the expando if it's no longer used
if ( jQuery.isEmptyObject( events ) ) {
dataPriv.remove( elem, "handle events" );
}
},
dispatch: function( nativeEvent ) {
var i, j, ret, matched, handleObj, handlerQueue,
args = new Array( arguments.length ),
// Make a writable jQuery.Event from the native event object
event = jQuery.event.fix( nativeEvent ),
handlers = (
dataPriv.get( this, "events" ) || Object.create( null )
)[ event.type ] || [],
special = jQuery.event.special[ event.type ] || {};
// Use the fix-ed jQuery.Event rather than the (read-only) native event
args[ 0 ] = event;
for ( i = 1; i < arguments.length; i++ ) {
args[ i ] = arguments[ i ];
}
event.delegateTarget = this;
// Call the preDispatch hook for the mapped type, and let it bail if desired
if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) {
return;
}
// Determine handlers
handlerQueue = jQuery.event.handlers.call( this, event, handlers );
// Run delegates first; they may want to stop propagation beneath us
i = 0;
while ( ( matched = handlerQueue[ i++ ] ) && !event.isPropagationStopped() ) {
event.currentTarget = matched.elem;
j = 0;
while ( ( handleObj = matched.handlers[ j++ ] ) &&
!event.isImmediatePropagationStopped() ) {
// If the event is namespaced, then each handler is only invoked if it is
// specially universal or its namespaces are a superset of the event's.
if ( !event.rnamespace || handleObj.namespace === false ||
event.rnamespace.test( handleObj.namespace ) ) {
event.handleObj = handleObj;
event.data = handleObj.data;
ret = ( ( jQuery.event.special[ handleObj.origType ] || {} ).handle ||
handleObj.handler ).apply( matched.elem, args );
if ( ret !== undefined ) {
if ( ( event.result = ret ) === false ) {
event.preventDefault();
event.stopPropagation();
}
}
}
}
}
// Call the postDispatch hook for the mapped type
if ( special.postDispatch ) {
special.postDispatch.call( this, event );
}
return event.result;
},
handlers: function( event, handlers ) {
var i, handleObj, sel, matchedHandlers, matchedSelectors,
handlerQueue = [],
delegateCount = handlers.delegateCount,
cur = event.target;
// Find delegate handlers
if ( delegateCount &&
// Support: IE <=9
// Black-hole SVG <use> instance trees (trac-13180)
cur.nodeType &&
// Support: Firefox <=42
// Suppress spec-violating clicks indicating a non-primary pointer button (trac-3861)
// https://www.w3.org/TR/DOM-Level-3-Events/#event-type-click
// Support: IE 11 only
// ...but not arrow key "clicks" of radio inputs, which can have `button` -1 (gh-2343)
!( event.type === "click" && event.button >= 1 ) ) {
for ( ; cur !== this; cur = cur.parentNode || this ) {
// Don't check non-elements (#13208)
// Don't process clicks on disabled elements (#6911, #8165, #11382, #11764)
if ( cur.nodeType === 1 && !( event.type === "click" && cur.disabled === true ) ) {
matchedHandlers = [];
matchedSelectors = {};
for ( i = 0; i < delegateCount; i++ ) {
handleObj = handlers[ i ];
// Don't conflict with Object.prototype properties (#13203)
sel = handleObj.selector + " ";
if ( matchedSelectors[ sel ] === undefined ) {
matchedSelectors[ sel ] = handleObj.needsContext ?
jQuery( sel, this ).index( cur ) > -1 :
jQuery.find( sel, this, null, [ cur ] ).length;
}
if ( matchedSelectors[ sel ] ) {
matchedHandlers.push( handleObj );
}
}
if ( matchedHandlers.length ) {
handlerQueue.push( { elem: cur, handlers: matchedHandlers } );
}
}
}
}
// Add the remaining (directly-bound) handlers
cur = this;
if ( delegateCount < handlers.length ) {
handlerQueue.push( { elem: cur, handlers: handlers.slice( delegateCount ) } );
}
return handlerQueue;
},
addProp: function( name, hook ) {
Object.defineProperty( jQuery.Event.prototype, name, {
enumerable: true,
configurable: true,
get: isFunction( hook ) ?
function() {
if ( this.originalEvent ) {
return hook( this.originalEvent );
}
} :
function() {
if ( this.originalEvent ) {
return this.originalEvent[ name ];
}
},
set: function( value ) {
Object.defineProperty( this, name, {
enumerable: true,
configurable: true,
writable: true,
value: value
} );
}
} );
},
fix: function( originalEvent ) {
return originalEvent[ jQuery.expando ] ?
originalEvent :
new jQuery.Event( originalEvent );
},
special: {
load: {
// Prevent triggered image.load events from bubbling to window.load
noBubble: true
},
click: {
// Utilize native event to ensure correct state for checkable inputs
setup: function( data ) {
// For mutual compressibility with _default, replace `this` access with a local var.
// `|| data` is dead code meant only to preserve the variable through minification.
var el = this || data;
// Claim the first handler
if ( rcheckableType.test( el.type ) &&
el.click && nodeName( el, "input" ) ) {
// dataPriv.set( el, "click", ... )
leverageNative( el, "click", returnTrue );
}
// Return false to allow normal processing in the caller
return false;
},
trigger: function( data ) {
// For mutual compressibility with _default, replace `this` access with a local var.
// `|| data` is dead code meant only to preserve the variable through minification.
var el = this || data;
// Force setup before triggering a click
if ( rcheckableType.test( el.type ) &&
el.click && nodeName( el, "input" ) ) {
leverageNative( el, "click" );
}
// Return non-false to allow normal event-path propagation
return true;
},
// For cross-browser consistency, suppress native .click() on links
// Also prevent it if we're currently inside a leveraged native-event stack
_default: function( event ) {
var target = event.target;
return rcheckableType.test( target.type ) &&
target.click && nodeName( target, "input" ) &&
dataPriv.get( target, "click" ) ||
nodeName( target, "a" );
}
},
beforeunload: {
postDispatch: function( event ) {
// Support: Firefox 20+
// Firefox doesn't alert if the returnValue field is not set.
if ( event.result !== undefined && event.originalEvent ) {
event.originalEvent.returnValue = event.result;
}
}
}
}
};
// Ensure the presence of an event listener that handles manually-triggered
// synthetic events by interrupting progress until reinvoked in response to
// *native* events that it fires directly, ensuring that state changes have
// already occurred before other listeners are invoked.
function leverageNative( el, type, expectSync ) {
// Missing expectSync indicates a trigger call, which must force setup through jQuery.event.add
if ( !expectSync ) {
if ( dataPriv.get( el, type ) === undefined ) {
jQuery.event.add( el, type, returnTrue );
}
return;
}
// Register the controller as a special universal handler for all event namespaces
dataPriv.set( el, type, false );
jQuery.event.add( el, type, {
namespace: false,
handler: function( event ) {
var notAsync, result,
saved = dataPriv.get( this, type );
if ( ( event.isTrigger & 1 ) && this[ type ] ) {
// Interrupt processing of the outer synthetic .trigger()ed event
// Saved data should be false in such cases, but might be a leftover capture object
// from an async native handler (gh-4350)
if ( !saved.length ) {
// Store arguments for use when handling the inner native event
// There will always be at least one argument (an event object), so this array
// will not be confused with a leftover capture object.
saved = slice.call( arguments );
dataPriv.set( this, type, saved );
// Trigger the native event and capture its result
// Support: IE <=9 - 11+
// focus() and blur() are asynchronous
notAsync = expectSync( this, type );
this[ type ]();
result = dataPriv.get( this, type );
if ( saved !== result || notAsync ) {
dataPriv.set( this, type, false );
} else {
result = {};
}
if ( saved !== result ) {
// Cancel the outer synthetic event
event.stopImmediatePropagation();
event.preventDefault();
return result.value;
}
// If this is an inner synthetic event for an event with a bubbling surrogate
// (focus or blur), assume that the surrogate already propagated from triggering the
// native event and prevent that from happening again here.
// This technically gets the ordering wrong w.r.t. to `.trigger()` (in which the
// bubbling surrogate propagates *after* the non-bubbling base), but that seems
// less bad than duplication.
} else if ( ( jQuery.event.special[ type ] || {} ).delegateType ) {
event.stopPropagation();
}
// If this is a native event triggered above, everything is now in order
// Fire an inner synthetic event with the original arguments
} else if ( saved.length ) {
// ...and capture the result
dataPriv.set( this, type, {
value: jQuery.event.trigger(
// Support: IE <=9 - 11+
// Extend with the prototype to reset the above stopImmediatePropagation()
jQuery.extend( saved[ 0 ], jQuery.Event.prototype ),
saved.slice( 1 ),
this
)
} );
// Abort handling of the native event
event.stopImmediatePropagation();
}
}
} );
}
jQuery.removeEvent = function( elem, type, handle ) {
// This "if" is needed for plain objects
if ( elem.removeEventListener ) {
elem.removeEventListener( type, handle );
}
};
jQuery.Event = function( src, props ) {
// Allow instantiation without the 'new' keyword
if ( !( this instanceof jQuery.Event ) ) {
return new jQuery.Event( src, props );
}
// Event object
if ( src && src.type ) {
this.originalEvent = src;
this.type = src.type;
// Events bubbling up the document may have been marked as prevented
// by a handler lower down the tree; reflect the correct value.
this.isDefaultPrevented = src.defaultPrevented ||
src.defaultPrevented === undefined &&
// Support: Android <=2.3 only
src.returnValue === false ?
returnTrue :
returnFalse;
// Create target properties
// Support: Safari <=6 - 7 only
// Target should not be a text node (#504, #13143)
this.target = ( src.target && src.target.nodeType === 3 ) ?
src.target.parentNode :
src.target;
this.currentTarget = src.currentTarget;
this.relatedTarget = src.relatedTarget;
// Event type
} else {
this.type = src;
}
// Put explicitly provided properties onto the event object
if ( props ) {
jQuery.extend( this, props );
}
// Create a timestamp if incoming event doesn't have one
this.timeStamp = src && src.timeStamp || Date.now();
// Mark it as fixed
this[ jQuery.expando ] = true;
};
// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding
// https://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html
jQuery.Event.prototype = {
constructor: jQuery.Event,
isDefaultPrevented: returnFalse,
isPropagationStopped: returnFalse,
isImmediatePropagationStopped: returnFalse,
isSimulated: false,
preventDefault: function() {
var e = this.originalEvent;
this.isDefaultPrevented = returnTrue;
if ( e && !this.isSimulated ) {
e.preventDefault();
}
},
stopPropagation: function() {
var e = this.originalEvent;
this.isPropagationStopped = returnTrue;
if ( e && !this.isSimulated ) {
e.stopPropagation();
}
},
stopImmediatePropagation: function() {
var e = this.originalEvent;
this.isImmediatePropagationStopped = returnTrue;
if ( e && !this.isSimulated ) {
e.stopImmediatePropagation();
}
this.stopPropagation();
}
};
// Includes all common event props including KeyEvent and MouseEvent specific props
jQuery.each( {
altKey: true,
bubbles: true,
cancelable: true,
changedTouches: true,
ctrlKey: true,
detail: true,
eventPhase: true,
metaKey: true,
pageX: true,
pageY: true,
shiftKey: true,
view: true,
"char": true,
code: true,
charCode: true,
key: true,
keyCode: true,
button: true,
buttons: true,
clientX: true,
clientY: true,
offsetX: true,
offsetY: true,
pointerId: true,
pointerType: true,
screenX: true,
screenY: true,
targetTouches: true,
toElement: true,
touches: true,
which: function( event ) {
var button = event.button;
// Add which for key events
if ( event.which == null && rkeyEvent.test( event.type ) ) {
return event.charCode != null ? event.charCode : event.keyCode;
}
// Add which for click: 1 === left; 2 === middle; 3 === right
if ( !event.which && button !== undefined && rmouseEvent.test( event.type ) ) {
if ( button & 1 ) {
return 1;
}
if ( button & 2 ) {
return 3;
}
if ( button & 4 ) {
return 2;
}
return 0;
}
return event.which;
}
}, jQuery.event.addProp );
jQuery.each( { focus: "focusin", blur: "focusout" }, function( type, delegateType ) {
jQuery.event.special[ type ] = {
// Utilize native event if possible so blur/focus sequence is correct
setup: function() {
// Claim the first handler
// dataPriv.set( this, "focus", ... )
// dataPriv.set( this, "blur", ... )
leverageNative( this, type, expectSync );
// Return false to allow normal processing in the caller
return false;
},
trigger: function() {
// Force setup before trigger
leverageNative( this, type );
// Return non-false to allow normal event-path propagation
return true;
},
delegateType: delegateType
};
} );
// Create mouseenter/leave events using mouseover/out and event-time checks
// so that event delegation works in jQuery.
// Do the same for pointerenter/pointerleave and pointerover/pointerout
//
// Support: Safari 7 only
// Safari sends mouseenter too often; see:
// https://bugs.chromium.org/p/chromium/issues/detail?id=470258
// for the description of the bug (it existed in older Chrome versions as well).
jQuery.each( {
mouseenter: "mouseover",
mouseleave: "mouseout",
pointerenter: "pointerover",
pointerleave: "pointerout"
}, function( orig, fix ) {
jQuery.event.special[ orig ] = {
delegateType: fix,
bindType: fix,
handle: function( event ) {
var ret,
target = this,
related = event.relatedTarget,
handleObj = event.handleObj;
// For mouseenter/leave call the handler if related is outside the target.
// NB: No relatedTarget if the mouse left/entered the browser window
if ( !related || ( related !== target && !jQuery.contains( target, related ) ) ) {
event.type = handleObj.origType;
ret = handleObj.handler.apply( this, arguments );
event.type = fix;
}
return ret;
}
};
} );
jQuery.fn.extend( {
on: function( types, selector, data, fn ) {
return on( this, types, selector, data, fn );
},
one: function( types, selector, data, fn ) {
return on( this, types, selector, data, fn, 1 );
},
off: function( types, selector, fn ) {
var handleObj, type;
if ( types && types.preventDefault && types.handleObj ) {
// ( event ) dispatched jQuery.Event
handleObj = types.handleObj;
jQuery( types.delegateTarget ).off(
handleObj.namespace ?
handleObj.origType + "." + handleObj.namespace :
handleObj.origType,
handleObj.selector,
handleObj.handler
);
return this;
}
if ( typeof types === "object" ) {
// ( types-object [, selector] )
for ( type in types ) {
this.off( type, selector, types[ type ] );
}
return this;
}
if ( selector === false || typeof selector === "function" ) {
// ( types [, fn] )
fn = selector;
selector = undefined;
}
if ( fn === false ) {
fn = returnFalse;
}
return this.each( function() {
jQuery.event.remove( this, types, fn, selector );
} );
}
} );
var
// Support: IE <=10 - 11, Edge 12 - 13 only
// In IE/Edge using regex groups here causes severe slowdowns.
// See https://connect.microsoft.com/IE/feedback/details/1736512/
rnoInnerhtml = /<script|<style|<link/i,
// checked="checked" or checked
rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i,
rcleanScript = /^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;
// Prefer a tbody over its parent table for containing new rows
function manipulationTarget( elem, content ) {
if ( nodeName( elem, "table" ) &&
nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ) {
return jQuery( elem ).children( "tbody" )[ 0 ] || elem;
}
return elem;
}
// Replace/restore the type attribute of script elements for safe DOM manipulation
function disableScript( elem ) {
elem.type = ( elem.getAttribute( "type" ) !== null ) + "/" + elem.type;
return elem;
}
function restoreScript( elem ) {
if ( ( elem.type || "" ).slice( 0, 5 ) === "true/" ) {
elem.type = elem.type.slice( 5 );
} else {
elem.removeAttribute( "type" );
}
return elem;
}
function cloneCopyEvent( src, dest ) {
var i, l, type, pdataOld, udataOld, udataCur, events;
if ( dest.nodeType !== 1 ) {
return;
}
// 1. Copy private data: events, handlers, etc.
if ( dataPriv.hasData( src ) ) {
pdataOld = dataPriv.get( src );
events = pdataOld.events;
if ( events ) {
dataPriv.remove( dest, "handle events" );
for ( type in events ) {
for ( i = 0, l = events[ type ].length; i < l; i++ ) {
jQuery.event.add( dest, type, events[ type ][ i ] );
}
}
}
}
// 2. Copy user data
if ( dataUser.hasData( src ) ) {
udataOld = dataUser.access( src );
udataCur = jQuery.extend( {}, udataOld );
dataUser.set( dest, udataCur );
}
}
// Fix IE bugs, see support tests
function fixInput( src, dest ) {
var nodeName = dest.nodeName.toLowerCase();
// Fails to persist the checked state of a cloned checkbox or radio button.
if ( nodeName === "input" && rcheckableType.test( src.type ) ) {
dest.checked = src.checked;
// Fails to return the selected option to the default selected state when cloning options
} else if ( nodeName === "input" || nodeName === "textarea" ) {
dest.defaultValue = src.defaultValue;
}
}
function domManip( collection, args, callback, ignored ) {
// Flatten any nested arrays
args = flat( args );
var fragment, first, scripts, hasScripts, node, doc,
i = 0,
l = collection.length,
iNoClone = l - 1,
value = args[ 0 ],
valueIsFunction = isFunction( value );
// We can't cloneNode fragments that contain checked, in WebKit
if ( valueIsFunction ||
( l > 1 && typeof value === "string" &&
!support.checkClone && rchecked.test( value ) ) ) {
return collection.each( function( index ) {
var self = collection.eq( index );
if ( valueIsFunction ) {
args[ 0 ] = value.call( this, index, self.html() );
}
domManip( self, args, callback, ignored );
} );
}
if ( l ) {
fragment = buildFragment( args, collection[ 0 ].ownerDocument, false, collection, ignored );
first = fragment.firstChild;
if ( fragment.childNodes.length === 1 ) {
fragment = first;
}
// Require either new content or an interest in ignored elements to invoke the callback
if ( first || ignored ) {
scripts = jQuery.map( getAll( fragment, "script" ), disableScript );
hasScripts = scripts.length;
// Use the original fragment for the last item
// instead of the first because it can end up
// being emptied incorrectly in certain situations (#8070).
for ( ; i < l; i++ ) {
node = fragment;
if ( i !== iNoClone ) {
node = jQuery.clone( node, true, true );
// Keep references to cloned scripts for later restoration
if ( hasScripts ) {
// Support: Android <=4.0 only, PhantomJS 1 only
// push.apply(_, arraylike) throws on ancient WebKit
jQuery.merge( scripts, getAll( node, "script" ) );
}
}
callback.call( collection[ i ], node, i );
}
if ( hasScripts ) {
doc = scripts[ scripts.length - 1 ].ownerDocument;
// Reenable scripts
jQuery.map( scripts, restoreScript );
// Evaluate executable scripts on first document insertion
for ( i = 0; i < hasScripts; i++ ) {
node = scripts[ i ];
if ( rscriptType.test( node.type || "" ) &&
!dataPriv.access( node, "globalEval" ) &&
jQuery.contains( doc, node ) ) {
if ( node.src && ( node.type || "" ).toLowerCase() !== "module" ) {
// Optional AJAX dependency, but won't run scripts if not present
if ( jQuery._evalUrl && !node.noModule ) {
jQuery._evalUrl( node.src, {
nonce: node.nonce || node.getAttribute( "nonce" )
}, doc );
}
} else {
DOMEval( node.textContent.replace( rcleanScript, "" ), node, doc );
}
}
}
}
}
}
return collection;
}
function remove( elem, selector, keepData ) {
var node,
nodes = selector ? jQuery.filter( selector, elem ) : elem,
i = 0;
for ( ; ( node = nodes[ i ] ) != null; i++ ) {
if ( !keepData && node.nodeType === 1 ) {
jQuery.cleanData( getAll( node ) );
}
if ( node.parentNode ) {
if ( keepData && isAttached( node ) ) {
setGlobalEval( getAll( node, "script" ) );
}
node.parentNode.removeChild( node );
}
}
return elem;
}
jQuery.extend( {
htmlPrefilter: function( html ) {
return html;
},
clone: function( elem, dataAndEvents, deepDataAndEvents ) {
var i, l, srcElements, destElements,
clone = elem.cloneNode( true ),
inPage = isAttached( elem );
// Fix IE cloning issues
if ( !support.noCloneChecked && ( elem.nodeType === 1 || elem.nodeType === 11 ) &&
!jQuery.isXMLDoc( elem ) ) {
// We eschew Sizzle here for performance reasons: https://jsperf.com/getall-vs-sizzle/2
destElements = getAll( clone );
srcElements = getAll( elem );
for ( i = 0, l = srcElements.length; i < l; i++ ) {
fixInput( srcElements[ i ], destElements[ i ] );
}
}
// Copy the events from the original to the clone
if ( dataAndEvents ) {
if ( deepDataAndEvents ) {
srcElements = srcElements || getAll( elem );
destElements = destElements || getAll( clone );
for ( i = 0, l = srcElements.length; i < l; i++ ) {
cloneCopyEvent( srcElements[ i ], destElements[ i ] );
}
} else {
cloneCopyEvent( elem, clone );
}
}
// Preserve script evaluation history
destElements = getAll( clone, "script" );
if ( destElements.length > 0 ) {
setGlobalEval( destElements, !inPage && getAll( elem, "script" ) );
}
// Return the cloned set
return clone;
},
cleanData: function( elems ) {
var data, elem, type,
special = jQuery.event.special,
i = 0;
for ( ; ( elem = elems[ i ] ) !== undefined; i++ ) {
if ( acceptData( elem ) ) {
if ( ( data = elem[ dataPriv.expando ] ) ) {
if ( data.events ) {
for ( type in data.events ) {
if ( special[ type ] ) {
jQuery.event.remove( elem, type );
// This is a shortcut to avoid jQuery.event.remove's overhead
} else {
jQuery.removeEvent( elem, type, data.handle );
}
}
}
// Support: Chrome <=35 - 45+
// Assign undefined instead of using delete, see Data#remove
elem[ dataPriv.expando ] = undefined;
}
if ( elem[ dataUser.expando ] ) {
// Support: Chrome <=35 - 45+
// Assign undefined instead of using delete, see Data#remove
elem[ dataUser.expando ] = undefined;
}
}
}
}
} );
jQuery.fn.extend( {
detach: function( selector ) {
return remove( this, selector, true );
},
remove: function( selector ) {
return remove( this, selector );
},
text: function( value ) {
return access( this, function( value ) {
return value === undefined ?
jQuery.text( this ) :
this.empty().each( function() {
if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
this.textContent = value;
}
} );
}, null, value, arguments.length );
},
append: function() {
return domManip( this, arguments, function( elem ) {
if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
var target = manipulationTarget( this, elem );
target.appendChild( elem );
}
} );
},
prepend: function() {
return domManip( this, arguments, function( elem ) {
if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) {
var target = manipulationTarget( this, elem );
target.insertBefore( elem, target.firstChild );
}
} );
},
before: function() {
return domManip( this, arguments, function( elem ) {
if ( this.parentNode ) {
this.parentNode.insertBefore( elem, this );
}
} );
},
after: function() {
return domManip( this, arguments, function( elem ) {
if ( this.parentNode ) {
this.parentNode.insertBefore( elem, this.nextSibling );
}
} );
},
empty: function() {
var elem,
i = 0;
for ( ; ( elem = this[ i ] ) != null; i++ ) {
if ( elem.nodeType === 1 ) {
// Prevent memory leaks
jQuery.cleanData( getAll( elem, false ) );
// Remove any remaining nodes
elem.textContent = "";
}
}
return this;
},
clone: function( dataAndEvents, deepDataAndEvents ) {
dataAndEvents = dataAndEvents == null ? false : dataAndEvents;
deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents;
return this.map( function() {
return jQuery.clone( this, dataAndEvents, deepDataAndEvents );
} );
},
html: function( value ) {
return access( this, function( value ) {
var elem = this[ 0 ] || {},
i = 0,
l = this.length;
if ( value === undefined && elem.nodeType === 1 ) {
return elem.innerHTML;
}
// See if we can take a shortcut and just use innerHTML
if ( typeof value === "string" && !rnoInnerhtml.test( value ) &&
!wrapMap[ ( rtagName.exec( value ) || [ "", "" ] )[ 1 ].toLowerCase() ] ) {
value = jQuery.htmlPrefilter( value );
try {
for ( ; i < l; i++ ) {
elem = this[ i ] || {};
// Remove element nodes and prevent memory leaks
if ( elem.nodeType === 1 ) {
jQuery.cleanData( getAll( elem, false ) );
elem.innerHTML = value;
}
}
elem = 0;
// If using innerHTML throws an exception, use the fallback method
} catch ( e ) {}
}
if ( elem ) {
this.empty().append( value );
}
}, null, value, arguments.length );
},
replaceWith: function() {
var ignored = [];
// Make the changes, replacing each non-ignored context element with the new content
return domManip( this, arguments, function( elem ) {
var parent = this.parentNode;
if ( jQuery.inArray( this, ignored ) < 0 ) {
jQuery.cleanData( getAll( this ) );
if ( parent ) {
parent.replaceChild( elem, this );
}
}
// Force callback invocation
}, ignored );
}
} );
jQuery.each( {
appendTo: "append",
prependTo: "prepend",
insertBefore: "before",
insertAfter: "after",
replaceAll: "replaceWith"
}, function( name, original ) {
jQuery.fn[ name ] = function( selector ) {
var elems,
ret = [],
insert = jQuery( selector ),
last = insert.length - 1,
i = 0;
for ( ; i <= last; i++ ) {
elems = i === last ? this : this.clone( true );
jQuery( insert[ i ] )[ original ]( elems );
// Support: Android <=4.0 only, PhantomJS 1 only
// .get() because push.apply(_, arraylike) throws on ancient WebKit
push.apply( ret, elems.get() );
}
return this.pushStack( ret );
};
} );
var rnumnonpx = new RegExp( "^(" + pnum + ")(?!px)[a-z%]+$", "i" );
var getStyles = function( elem ) {
// Support: IE <=11 only, Firefox <=30 (#15098, #14150)
// IE throws on elements created in popups
// FF meanwhile throws on frame elements through "defaultView.getComputedStyle"
var view = elem.ownerDocument.defaultView;
if ( !view || !view.opener ) {
view = window;
}
return view.getComputedStyle( elem );
};
var swap = function( elem, options, callback ) {
var ret, name,
old = {};
// Remember the old values, and insert the new ones
for ( name in options ) {
old[ name ] = elem.style[ name ];
elem.style[ name ] = options[ name ];
}
ret = callback.call( elem );
// Revert the old values
for ( name in options ) {
elem.style[ name ] = old[ name ];
}
return ret;
};
var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" );
( function() {
// Executing both pixelPosition & boxSizingReliable tests require only one layout
// so they're executed at the same time to save the second computation.
function computeStyleTests() {
// This is a singleton, we need to execute it only once
if ( !div ) {
return;
}
container.style.cssText = "position:absolute;left:-11111px;width:60px;" +
"margin-top:1px;padding:0;border:0";
div.style.cssText =
"position:relative;display:block;box-sizing:border-box;overflow:scroll;" +
"margin:auto;border:1px;padding:1px;" +
"width:60%;top:1%";
documentElement.appendChild( container ).appendChild( div );
var divStyle = window.getComputedStyle( div );
pixelPositionVal = divStyle.top !== "1%";
// Support: Android 4.0 - 4.3 only, Firefox <=3 - 44
reliableMarginLeftVal = roundPixelMeasures( divStyle.marginLeft ) === 12;
// Support: Android 4.0 - 4.3 only, Safari <=9.1 - 10.1, iOS <=7.0 - 9.3
// Some styles come back with percentage values, even though they shouldn't
div.style.right = "60%";
pixelBoxStylesVal = roundPixelMeasures( divStyle.right ) === 36;
// Support: IE 9 - 11 only
// Detect misreporting of content dimensions for box-sizing:border-box elements
boxSizingReliableVal = roundPixelMeasures( divStyle.width ) === 36;
// Support: IE 9 only
// Detect overflow:scroll screwiness (gh-3699)
// Support: Chrome <=64
// Don't get tricked when zoom affects offsetWidth (gh-4029)
div.style.position = "absolute";
scrollboxSizeVal = roundPixelMeasures( div.offsetWidth / 3 ) === 12;
documentElement.removeChild( container );
// Nullify the div so it wouldn't be stored in the memory and
// it will also be a sign that checks already performed
div = null;
}
function roundPixelMeasures( measure ) {
return Math.round( parseFloat( measure ) );
}
var pixelPositionVal, boxSizingReliableVal, scrollboxSizeVal, pixelBoxStylesVal,
reliableTrDimensionsVal, reliableMarginLeftVal,
container = document.createElement( "div" ),
div = document.createElement( "div" );
// Finish early in limited (non-browser) environments
if ( !div.style ) {
return;
}
// Support: IE <=9 - 11 only
// Style of cloned element affects source element cloned (#8908)
div.style.backgroundClip = "content-box";
div.cloneNode( true ).style.backgroundClip = "";
support.clearCloneStyle = div.style.backgroundClip === "content-box";
jQuery.extend( support, {
boxSizingReliable: function() {
computeStyleTests();
return boxSizingReliableVal;
},
pixelBoxStyles: function() {
computeStyleTests();
return pixelBoxStylesVal;
},
pixelPosition: function() {
computeStyleTests();
return pixelPositionVal;
},
reliableMarginLeft: function() {
computeStyleTests();
return reliableMarginLeftVal;
},
scrollboxSize: function() {
computeStyleTests();
return scrollboxSizeVal;
},
// Support: IE 9 - 11+, Edge 15 - 18+
// IE/Edge misreport `getComputedStyle` of table rows with width/height
// set in CSS while `offset*` properties report correct values.
// Behavior in IE 9 is more subtle than in newer versions & it passes
// some versions of this test; make sure not to make it pass there!
reliableTrDimensions: function() {
var table, tr, trChild, trStyle;
if ( reliableTrDimensionsVal == null ) {
table = document.createElement( "table" );
tr = document.createElement( "tr" );
trChild = document.createElement( "div" );
table.style.cssText = "position:absolute;left:-11111px";
tr.style.height = "1px";
trChild.style.height = "9px";
documentElement
.appendChild( table )
.appendChild( tr )
.appendChild( trChild );
trStyle = window.getComputedStyle( tr );
reliableTrDimensionsVal = parseInt( trStyle.height ) > 3;
documentElement.removeChild( table );
}
return reliableTrDimensionsVal;
}
} );
} )();
function curCSS( elem, name, computed ) {
var width, minWidth, maxWidth, ret,
// Support: Firefox 51+
// Retrieving style before computed somehow
// fixes an issue with getting wrong values
// on detached elements
style = elem.style;
computed = computed || getStyles( elem );
// getPropertyValue is needed for:
// .css('filter') (IE 9 only, #12537)
// .css('--customProperty) (#3144)
if ( computed ) {
ret = computed.getPropertyValue( name ) || computed[ name ];
if ( ret === "" && !isAttached( elem ) ) {
ret = jQuery.style( elem, name );
}
// A tribute to the "awesome hack by Dean Edwards"
// Android Browser returns percentage for some values,
// but width seems to be reliably pixels.
// This is against the CSSOM draft spec:
// https://drafts.csswg.org/cssom/#resolved-values
if ( !support.pixelBoxStyles() && rnumnonpx.test( ret ) && rboxStyle.test( name ) ) {
// Remember the original values
width = style.width;
minWidth = style.minWidth;
maxWidth = style.maxWidth;
// Put in the new values to get a computed value out
style.minWidth = style.maxWidth = style.width = ret;
ret = computed.width;
// Revert the changed values
style.width = width;
style.minWidth = minWidth;
style.maxWidth = maxWidth;
}
}
return ret !== undefined ?
// Support: IE <=9 - 11 only
// IE returns zIndex value as an integer.
ret + "" :
ret;
}
function addGetHookIf( conditionFn, hookFn ) {
// Define the hook, we'll check on the first run if it's really needed.
return {
get: function() {
if ( conditionFn() ) {
// Hook not needed (or it's not possible to use it due
// to missing dependency), remove it.
delete this.get;
return;
}
// Hook needed; redefine it so that the support test is not executed again.
return ( this.get = hookFn ).apply( this, arguments );
}
};
}
var cssPrefixes = [ "Webkit", "Moz", "ms" ],
emptyStyle = document.createElement( "div" ).style,
vendorProps = {};
// Return a vendor-prefixed property or undefined
function vendorPropName( name ) {
// Check for vendor prefixed names
var capName = name[ 0 ].toUpperCase() + name.slice( 1 ),
i = cssPrefixes.length;
while ( i-- ) {
name = cssPrefixes[ i ] + capName;
if ( name in emptyStyle ) {
return name;
}
}
}
// Return a potentially-mapped jQuery.cssProps or vendor prefixed property
function finalPropName( name ) {
var final = jQuery.cssProps[ name ] || vendorProps[ name ];
if ( final ) {
return final;
}
if ( name in emptyStyle ) {
return name;
}
return vendorProps[ name ] = vendorPropName( name ) || name;
}
var
// Swappable if display is none or starts with table
// except "table", "table-cell", or "table-caption"
// See here for display values: https://developer.mozilla.org/en-US/docs/CSS/display
rdisplayswap = /^(none|table(?!-c[ea]).+)/,
rcustomProp = /^--/,
cssShow = { position: "absolute", visibility: "hidden", display: "block" },
cssNormalTransform = {
letterSpacing: "0",
fontWeight: "400"
};
function setPositiveNumber( _elem, value, subtract ) {
// Any relative (+/-) values have already been
// normalized at this point
var matches = rcssNum.exec( value );
return matches ?
// Guard against undefined "subtract", e.g., when used as in cssHooks
Math.max( 0, matches[ 2 ] - ( subtract || 0 ) ) + ( matches[ 3 ] || "px" ) :
value;
}
function boxModelAdjustment( elem, dimension, box, isBorderBox, styles, computedVal ) {
var i = dimension === "width" ? 1 : 0,
extra = 0,
delta = 0;
// Adjustment may not be necessary
if ( box === ( isBorderBox ? "border" : "content" ) ) {
return 0;
}
for ( ; i < 4; i += 2 ) {
// Both box models exclude margin
if ( box === "margin" ) {
delta += jQuery.css( elem, box + cssExpand[ i ], true, styles );
}
// If we get here with a content-box, we're seeking "padding" or "border" or "margin"
if ( !isBorderBox ) {
// Add padding
delta += jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
// For "border" or "margin", add border
if ( box !== "padding" ) {
delta += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
// But still keep track of it otherwise
} else {
extra += jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
}
// If we get here with a border-box (content + padding + border), we're seeking "content" or
// "padding" or "margin"
} else {
// For "content", subtract padding
if ( box === "content" ) {
delta -= jQuery.css( elem, "padding" + cssExpand[ i ], true, styles );
}
// For "content" or "padding", subtract border
if ( box !== "margin" ) {
delta -= jQuery.css( elem, "border" + cssExpand[ i ] + "Width", true, styles );
}
}
}
// Account for positive content-box scroll gutter when requested by providing computedVal
if ( !isBorderBox && computedVal >= 0 ) {
// offsetWidth/offsetHeight is a rounded sum of content, padding, scroll gutter, and border
// Assuming integer scroll gutter, subtract the rest and round down
delta += Math.max( 0, Math.ceil(
elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] -
computedVal -
delta -
extra -
0.5
// If offsetWidth/offsetHeight is unknown, then we can't determine content-box scroll gutter
// Use an explicit zero to avoid NaN (gh-3964)
) ) || 0;
}
return delta;
}
function getWidthOrHeight( elem, dimension, extra ) {
// Start with computed style
var styles = getStyles( elem ),
// To avoid forcing a reflow, only fetch boxSizing if we need it (gh-4322).
// Fake content-box until we know it's needed to know the true value.
boxSizingNeeded = !support.boxSizingReliable() || extra,
isBorderBox = boxSizingNeeded &&
jQuery.css( elem, "boxSizing", false, styles ) === "border-box",
valueIsBorderBox = isBorderBox,
val = curCSS( elem, dimension, styles ),
offsetProp = "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 );
// Support: Firefox <=54
// Return a confounding non-pixel value or feign ignorance, as appropriate.
if ( rnumnonpx.test( val ) ) {
if ( !extra ) {
return val;
}
val = "auto";
}
// Support: IE 9 - 11 only
// Use offsetWidth/offsetHeight for when box sizing is unreliable.
// In those cases, the computed value can be trusted to be border-box.
if ( ( !support.boxSizingReliable() && isBorderBox ||
// Support: IE 10 - 11+, Edge 15 - 18+
// IE/Edge misreport `getComputedStyle` of table rows with width/height
// set in CSS while `offset*` properties report correct values.
// Interestingly, in some cases IE 9 doesn't suffer from this issue.
!support.reliableTrDimensions() && nodeName( elem, "tr" ) ||
// Fall back to offsetWidth/offsetHeight when value is "auto"
// This happens for inline elements with no explicit setting (gh-3571)
val === "auto" ||
// Support: Android <=4.1 - 4.3 only
// Also use offsetWidth/offsetHeight for misreported inline dimensions (gh-3602)
!parseFloat( val ) && jQuery.css( elem, "display", false, styles ) === "inline" ) &&
// Make sure the element is visible & connected
elem.getClientRects().length ) {
isBorderBox = jQuery.css( elem, "boxSizing", false, styles ) === "border-box";
// Where available, offsetWidth/offsetHeight approximate border box dimensions.
// Where not available (e.g., SVG), assume unreliable box-sizing and interpret the
// retrieved value as a content box dimension.
valueIsBorderBox = offsetProp in elem;
if ( valueIsBorderBox ) {
val = elem[ offsetProp ];
}
}
// Normalize "" and auto
val = parseFloat( val ) || 0;
// Adjust for the element's box model
return ( val +
boxModelAdjustment(
elem,
dimension,
extra || ( isBorderBox ? "border" : "content" ),
valueIsBorderBox,
styles,
// Provide the current computed size to request scroll gutter calculation (gh-3589)
val
)
) + "px";
}
jQuery.extend( {
// Add in style property hooks for overriding the default
// behavior of getting and setting a style property
cssHooks: {
opacity: {
get: function( elem, computed ) {
if ( computed ) {
// We should always get a number back from opacity
var ret = curCSS( elem, "opacity" );
return ret === "" ? "1" : ret;
}
}
}
},
// Don't automatically add "px" to these possibly-unitless properties
cssNumber: {
"animationIterationCount": true,
"columnCount": true,
"fillOpacity": true,
"flexGrow": true,
"flexShrink": true,
"fontWeight": true,
"gridArea": true,
"gridColumn": true,
"gridColumnEnd": true,
"gridColumnStart": true,
"gridRow": true,
"gridRowEnd": true,
"gridRowStart": true,
"lineHeight": true,
"opacity": true,
"order": true,
"orphans": true,
"widows": true,
"zIndex": true,
"zoom": true
},
// Add in properties whose names you wish to fix before
// setting or getting the value
cssProps: {},
// Get and set the style property on a DOM Node
style: function( elem, name, value, extra ) {
// Don't set styles on text and comment nodes
if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) {
return;
}
// Make sure that we're working with the right name
var ret, type, hooks,
origName = camelCase( name ),
isCustomProp = rcustomProp.test( name ),
style = elem.style;
// Make sure that we're working with the right name. We don't
// want to query the value if it is a CSS custom property
// since they are user-defined.
if ( !isCustomProp ) {
name = finalPropName( origName );
}
// Gets hook for the prefixed version, then unprefixed version
hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
// Check if we're setting a value
if ( value !== undefined ) {
type = typeof value;
// Convert "+=" or "-=" to relative numbers (#7345)
if ( type === "string" && ( ret = rcssNum.exec( value ) ) && ret[ 1 ] ) {
value = adjustCSS( elem, name, ret );
// Fixes bug #9237
type = "number";
}
// Make sure that null and NaN values aren't set (#7116)
if ( value == null || value !== value ) {
return;
}
// If a number was passed in, add the unit (except for certain CSS properties)
// The isCustomProp check can be removed in jQuery 4.0 when we only auto-append
// "px" to a few hardcoded values.
if ( type === "number" && !isCustomProp ) {
value += ret && ret[ 3 ] || ( jQuery.cssNumber[ origName ] ? "" : "px" );
}
// background-* props affect original clone's values
if ( !support.clearCloneStyle && value === "" && name.indexOf( "background" ) === 0 ) {
style[ name ] = "inherit";
}
// If a hook was provided, use that value, otherwise just set the specified value
if ( !hooks || !( "set" in hooks ) ||
( value = hooks.set( elem, value, extra ) ) !== undefined ) {
if ( isCustomProp ) {
style.setProperty( name, value );
} else {
style[ name ] = value;
}
}
} else {
// If a hook was provided get the non-computed value from there
if ( hooks && "get" in hooks &&
( ret = hooks.get( elem, false, extra ) ) !== undefined ) {
return ret;
}
// Otherwise just get the value from the style object
return style[ name ];
}
},
css: function( elem, name, extra, styles ) {
var val, num, hooks,
origName = camelCase( name ),
isCustomProp = rcustomProp.test( name );
// Make sure that we're working with the right name. We don't
// want to modify the value if it is a CSS custom property
// since they are user-defined.
if ( !isCustomProp ) {
name = finalPropName( origName );
}
// Try prefixed name followed by the unprefixed name
hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ];
// If a hook was provided get the computed value from there
if ( hooks && "get" in hooks ) {
val = hooks.get( elem, true, extra );
}
// Otherwise, if a way to get the computed value exists, use that
if ( val === undefined ) {
val = curCSS( elem, name, styles );
}
// Convert "normal" to computed value
if ( val === "normal" && name in cssNormalTransform ) {
val = cssNormalTransform[ name ];
}
// Make numeric if forced or a qualifier was provided and val looks numeric
if ( extra === "" || extra ) {
num = parseFloat( val );
return extra === true || isFinite( num ) ? num || 0 : val;
}
return val;
}
} );
jQuery.each( [ "height", "width" ], function( _i, dimension ) {
jQuery.cssHooks[ dimension ] = {
get: function( elem, computed, extra ) {
if ( computed ) {
// Certain elements can have dimension info if we invisibly show them
// but it must have a current display style that would benefit
return rdisplayswap.test( jQuery.css( elem, "display" ) ) &&
// Support: Safari 8+
// Table columns in Safari have non-zero offsetWidth & zero
// getBoundingClientRect().width unless display is changed.
// Support: IE <=11 only
// Running getBoundingClientRect on a disconnected node
// in IE throws an error.
( !elem.getClientRects().length || !elem.getBoundingClientRect().width ) ?
swap( elem, cssShow, function() {
return getWidthOrHeight( elem, dimension, extra );
} ) :
getWidthOrHeight( elem, dimension, extra );
}
},
set: function( elem, value, extra ) {
var matches,
styles = getStyles( elem ),
// Only read styles.position if the test has a chance to fail
// to avoid forcing a reflow.
scrollboxSizeBuggy = !support.scrollboxSize() &&
styles.position === "absolute",
// To avoid forcing a reflow, only fetch boxSizing if we need it (gh-3991)
boxSizingNeeded = scrollboxSizeBuggy || extra,
isBorderBox = boxSizingNeeded &&
jQuery.css( elem, "boxSizing", false, styles ) === "border-box",
subtract = extra ?
boxModelAdjustment(
elem,
dimension,
extra,
isBorderBox,
styles
) :
0;
// Account for unreliable border-box dimensions by comparing offset* to computed and
// faking a content-box to get border and padding (gh-3699)
if ( isBorderBox && scrollboxSizeBuggy ) {
subtract -= Math.ceil(
elem[ "offset" + dimension[ 0 ].toUpperCase() + dimension.slice( 1 ) ] -
parseFloat( styles[ dimension ] ) -
boxModelAdjustment( elem, dimension, "border", false, styles ) -
0.5
);
}
// Convert to pixels if value adjustment is needed
if ( subtract && ( matches = rcssNum.exec( value ) ) &&
( matches[ 3 ] || "px" ) !== "px" ) {
elem.style[ dimension ] = value;
value = jQuery.css( elem, dimension );
}
return setPositiveNumber( elem, value, subtract );
}
};
} );
jQuery.cssHooks.marginLeft = addGetHookIf( support.reliableMarginLeft,
function( elem, computed ) {
if ( computed ) {
return ( parseFloat( curCSS( elem, "marginLeft" ) ) ||
elem.getBoundingClientRect().left -
swap( elem, { marginLeft: 0 }, function() {
return elem.getBoundingClientRect().left;
} )
) + "px";
}
}
);
// These hooks are used by animate to expand properties
jQuery.each( {
margin: "",
padding: "",
border: "Width"
}, function( prefix, suffix ) {
jQuery.cssHooks[ prefix + suffix ] = {
expand: function( value ) {
var i = 0,
expanded = {},
// Assumes a single number if not a string
parts = typeof value === "string" ? value.split( " " ) : [ value ];
for ( ; i < 4; i++ ) {
expanded[ prefix + cssExpand[ i ] + suffix ] =
parts[ i ] || parts[ i - 2 ] || parts[ 0 ];
}
return expanded;
}
};
if ( prefix !== "margin" ) {
jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber;
}
} );
jQuery.fn.extend( {
css: function( name, value ) {
return access( this, function( elem, name, value ) {
var styles, len,
map = {},
i = 0;
if ( Array.isArray( name ) ) {
styles = getStyles( elem );
len = name.length;
for ( ; i < len; i++ ) {
map[ name[ i ] ] = jQuery.css( elem, name[ i ], false, styles );
}
return map;
}
return value !== undefined ?
jQuery.style( elem, name, value ) :
jQuery.css( elem, name );
}, name, value, arguments.length > 1 );
}
} );
function Tween( elem, options, prop, end, easing ) {
return new Tween.prototype.init( elem, options, prop, end, easing );
}
jQuery.Tween = Tween;
Tween.prototype = {
constructor: Tween,
init: function( elem, options, prop, end, easing, unit ) {
this.elem = elem;
this.prop = prop;
this.easing = easing || jQuery.easing._default;
this.options = options;
this.start = this.now = this.cur();
this.end = end;
this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" );
},
cur: function() {
var hooks = Tween.propHooks[ this.prop ];
return hooks && hooks.get ?
hooks.get( this ) :
Tween.propHooks._default.get( this );
},
run: function( percent ) {
var eased,
hooks = Tween.propHooks[ this.prop ];
if ( this.options.duration ) {
this.pos = eased = jQuery.easing[ this.easing ](
percent, this.options.duration * percent, 0, 1, this.options.duration
);
} else {
this.pos = eased = percent;
}
this.now = ( this.end - this.start ) * eased + this.start;
if ( this.options.step ) {
this.options.step.call( this.elem, this.now, this );
}
if ( hooks && hooks.set ) {
hooks.set( this );
} else {
Tween.propHooks._default.set( this );
}
return this;
}
};
Tween.prototype.init.prototype = Tween.prototype;
Tween.propHooks = {
_default: {
get: function( tween ) {
var result;
// Use a property on the element directly when it is not a DOM element,
// or when there is no matching style property that exists.
if ( tween.elem.nodeType !== 1 ||
tween.elem[ tween.prop ] != null && tween.elem.style[ tween.prop ] == null ) {
return tween.elem[ tween.prop ];
}
// Passing an empty string as a 3rd parameter to .css will automatically
// attempt a parseFloat and fallback to a string if the parse fails.
// Simple values such as "10px" are parsed to Float;
// complex values such as "rotate(1rad)" are returned as-is.
result = jQuery.css( tween.elem, tween.prop, "" );
// Empty strings, null, undefined and "auto" are converted to 0.
return !result || result === "auto" ? 0 : result;
},
set: function( tween ) {
// Use step hook for back compat.
// Use cssHook if its there.
// Use .style if available and use plain properties where available.
if ( jQuery.fx.step[ tween.prop ] ) {
jQuery.fx.step[ tween.prop ]( tween );
} else if ( tween.elem.nodeType === 1 && (
jQuery.cssHooks[ tween.prop ] ||
tween.elem.style[ finalPropName( tween.prop ) ] != null ) ) {
jQuery.style( tween.elem, tween.prop, tween.now + tween.unit );
} else {
tween.elem[ tween.prop ] = tween.now;
}
}
}
};
// Support: IE <=9 only
// Panic based approach to setting things on disconnected nodes
Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = {
set: function( tween ) {
if ( tween.elem.nodeType && tween.elem.parentNode ) {
tween.elem[ tween.prop ] = tween.now;
}
}
};
jQuery.easing = {
linear: function( p ) {
return p;
},
swing: function( p ) {
return 0.5 - Math.cos( p * Math.PI ) / 2;
},
_default: "swing"
};
jQuery.fx = Tween.prototype.init;
// Back compat <1.8 extension point
jQuery.fx.step = {};
var
fxNow, inProgress,
rfxtypes = /^(?:toggle|show|hide)$/,
rrun = /queueHooks$/;
function schedule() {
if ( inProgress ) {
if ( document.hidden === false && window.requestAnimationFrame ) {
window.requestAnimationFrame( schedule );
} else {
window.setTimeout( schedule, jQuery.fx.interval );
}
jQuery.fx.tick();
}
}
// Animations created synchronously will run synchronously
function createFxNow() {
window.setTimeout( function() {
fxNow = undefined;
} );
return ( fxNow = Date.now() );
}
// Generate parameters to create a standard animation
function genFx( type, includeWidth ) {
var which,
i = 0,
attrs = { height: type };
// If we include width, step value is 1 to do all cssExpand values,
// otherwise step value is 2 to skip over Left and Right
includeWidth = includeWidth ? 1 : 0;
for ( ; i < 4; i += 2 - includeWidth ) {
which = cssExpand[ i ];
attrs[ "margin" + which ] = attrs[ "padding" + which ] = type;
}
if ( includeWidth ) {
attrs.opacity = attrs.width = type;
}
return attrs;
}
function createTween( value, prop, animation ) {
var tween,
collection = ( Animation.tweeners[ prop ] || [] ).concat( Animation.tweeners[ "*" ] ),
index = 0,
length = collection.length;
for ( ; index < length; index++ ) {
if ( ( tween = collection[ index ].call( animation, prop, value ) ) ) {
// We're done with this property
return tween;
}
}
}
function defaultPrefilter( elem, props, opts ) {
var prop, value, toggle, hooks, oldfire, propTween, restoreDisplay, display,
isBox = "width" in props || "height" in props,
anim = this,
orig = {},
style = elem.style,
hidden = elem.nodeType && isHiddenWithinTree( elem ),
dataShow = dataPriv.get( elem, "fxshow" );
// Queue-skipping animations hijack the fx hooks
if ( !opts.queue ) {
hooks = jQuery._queueHooks( elem, "fx" );
if ( hooks.unqueued == null ) {
hooks.unqueued = 0;
oldfire = hooks.empty.fire;
hooks.empty.fire = function() {
if ( !hooks.unqueued ) {
oldfire();
}
};
}
hooks.unqueued++;
anim.always( function() {
// Ensure the complete handler is called before this completes
anim.always( function() {
hooks.unqueued--;
if ( !jQuery.queue( elem, "fx" ).length ) {
hooks.empty.fire();
}
} );
} );
}
// Detect show/hide animations
for ( prop in props ) {
value = props[ prop ];
if ( rfxtypes.test( value ) ) {
delete props[ prop ];
toggle = toggle || value === "toggle";
if ( value === ( hidden ? "hide" : "show" ) ) {
// Pretend to be hidden if this is a "show" and
// there is still data from a stopped show/hide
if ( value === "show" && dataShow && dataShow[ prop ] !== undefined ) {
hidden = true;
// Ignore all other no-op show/hide data
} else {
continue;
}
}
orig[ prop ] = dataShow && dataShow[ prop ] || jQuery.style( elem, prop );
}
}
// Bail out if this is a no-op like .hide().hide()
propTween = !jQuery.isEmptyObject( props );
if ( !propTween && jQuery.isEmptyObject( orig ) ) {
return;
}
// Restrict "overflow" and "display" styles during box animations
if ( isBox && elem.nodeType === 1 ) {
// Support: IE <=9 - 11, Edge 12 - 15
// Record all 3 overflow attributes because IE does not infer the shorthand
// from identically-valued overflowX and overflowY and Edge just mirrors
// the overflowX value there.
opts.overflow = [ style.overflow, style.overflowX, style.overflowY ];
// Identify a display type, preferring old show/hide data over the CSS cascade
restoreDisplay = dataShow && dataShow.display;
if ( restoreDisplay == null ) {
restoreDisplay = dataPriv.get( elem, "display" );
}
display = jQuery.css( elem, "display" );
if ( display === "none" ) {
if ( restoreDisplay ) {
display = restoreDisplay;
} else {
// Get nonempty value(s) by temporarily forcing visibility
showHide( [ elem ], true );
restoreDisplay = elem.style.display || restoreDisplay;
display = jQuery.css( elem, "display" );
showHide( [ elem ] );
}
}
// Animate inline elements as inline-block
if ( display === "inline" || display === "inline-block" && restoreDisplay != null ) {
if ( jQuery.css( elem, "float" ) === "none" ) {
// Restore the original display value at the end of pure show/hide animations
if ( !propTween ) {
anim.done( function() {
style.display = restoreDisplay;
} );
if ( restoreDisplay == null ) {
display = style.display;
restoreDisplay = display === "none" ? "" : display;
}
}
style.display = "inline-block";
}
}
}
if ( opts.overflow ) {
style.overflow = "hidden";
anim.always( function() {
style.overflow = opts.overflow[ 0 ];
style.overflowX = opts.overflow[ 1 ];
style.overflowY = opts.overflow[ 2 ];
} );
}
// Implement show/hide animations
propTween = false;
for ( prop in orig ) {
// General show/hide setup for this element animation
if ( !propTween ) {
if ( dataShow ) {
if ( "hidden" in dataShow ) {
hidden = dataShow.hidden;
}
} else {
dataShow = dataPriv.access( elem, "fxshow", { display: restoreDisplay } );
}
// Store hidden/visible for toggle so `.stop().toggle()` "reverses"
if ( toggle ) {
dataShow.hidden = !hidden;
}
// Show elements before animating them
if ( hidden ) {
showHide( [ elem ], true );
}
/* eslint-disable no-loop-func */
anim.done( function() {
/* eslint-enable no-loop-func */
// The final step of a "hide" animation is actually hiding the element
if ( !hidden ) {
showHide( [ elem ] );
}
dataPriv.remove( elem, "fxshow" );
for ( prop in orig ) {
jQuery.style( elem, prop, orig[ prop ] );
}
} );
}
// Per-property setup
propTween = createTween( hidden ? dataShow[ prop ] : 0, prop, anim );
if ( !( prop in dataShow ) ) {
dataShow[ prop ] = propTween.start;
if ( hidden ) {
propTween.end = propTween.start;
propTween.start = 0;
}
}
}
}
function propFilter( props, specialEasing ) {
var index, name, easing, value, hooks;
// camelCase, specialEasing and expand cssHook pass
for ( index in props ) {
name = camelCase( index );
easing = specialEasing[ name ];
value = props[ index ];
if ( Array.isArray( value ) ) {
easing = value[ 1 ];
value = props[ index ] = value[ 0 ];
}
if ( index !== name ) {
props[ name ] = value;
delete props[ index ];
}
hooks = jQuery.cssHooks[ name ];
if ( hooks && "expand" in hooks ) {
value = hooks.expand( value );
delete props[ name ];
// Not quite $.extend, this won't overwrite existing keys.
// Reusing 'index' because we have the correct "name"
for ( index in value ) {
if ( !( index in props ) ) {
props[ index ] = value[ index ];
specialEasing[ index ] = easing;
}
}
} else {
specialEasing[ name ] = easing;
}
}
}
function Animation( elem, properties, options ) {
var result,
stopped,
index = 0,
length = Animation.prefilters.length,
deferred = jQuery.Deferred().always( function() {
// Don't match elem in the :animated selector
delete tick.elem;
} ),
tick = function() {
if ( stopped ) {
return false;
}
var currentTime = fxNow || createFxNow(),
remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ),
// Support: Android 2.3 only
// Archaic crash bug won't allow us to use `1 - ( 0.5 || 0 )` (#12497)
temp = remaining / animation.duration || 0,
percent = 1 - temp,
index = 0,
length = animation.tweens.length;
for ( ; index < length; index++ ) {
animation.tweens[ index ].run( percent );
}
deferred.notifyWith( elem, [ animation, percent, remaining ] );
// If there's more to do, yield
if ( percent < 1 && length ) {
return remaining;
}
// If this was an empty animation, synthesize a final progress notification
if ( !length ) {
deferred.notifyWith( elem, [ animation, 1, 0 ] );
}
// Resolve the animation and report its conclusion
deferred.resolveWith( elem, [ animation ] );
return false;
},
animation = deferred.promise( {
elem: elem,
props: jQuery.extend( {}, properties ),
opts: jQuery.extend( true, {
specialEasing: {},
easing: jQuery.easing._default
}, options ),
originalProperties: properties,
originalOptions: options,
startTime: fxNow || createFxNow(),
duration: options.duration,
tweens: [],
createTween: function( prop, end ) {
var tween = jQuery.Tween( elem, animation.opts, prop, end,
animation.opts.specialEasing[ prop ] || animation.opts.easing );
animation.tweens.push( tween );
return tween;
},
stop: function( gotoEnd ) {
var index = 0,
// If we are going to the end, we want to run all the tweens
// otherwise we skip this part
length = gotoEnd ? animation.tweens.length : 0;
if ( stopped ) {
return this;
}
stopped = true;
for ( ; index < length; index++ ) {
animation.tweens[ index ].run( 1 );
}
// Resolve when we played the last frame; otherwise, reject
if ( gotoEnd ) {
deferred.notifyWith( elem, [ animation, 1, 0 ] );
deferred.resolveWith( elem, [ animation, gotoEnd ] );
} else {
deferred.rejectWith( elem, [ animation, gotoEnd ] );
}
return this;
}
} ),
props = animation.props;
propFilter( props, animation.opts.specialEasing );
for ( ; index < length; index++ ) {
result = Animation.prefilters[ index ].call( animation, elem, props, animation.opts );
if ( result ) {
if ( isFunction( result.stop ) ) {
jQuery._queueHooks( animation.elem, animation.opts.queue ).stop =
result.stop.bind( result );
}
return result;
}
}
jQuery.map( props, createTween, animation );
if ( isFunction( animation.opts.start ) ) {
animation.opts.start.call( elem, animation );
}
// Attach callbacks from options
animation
.progress( animation.opts.progress )
.done( animation.opts.done, animation.opts.complete )
.fail( animation.opts.fail )
.always( animation.opts.always );
jQuery.fx.timer(
jQuery.extend( tick, {
elem: elem,
anim: animation,
queue: animation.opts.queue
} )
);
return animation;
}
jQuery.Animation = jQuery.extend( Animation, {
tweeners: {
"*": [ function( prop, value ) {
var tween = this.createTween( prop, value );
adjustCSS( tween.elem, prop, rcssNum.exec( value ), tween );
return tween;
} ]
},
tweener: function( props, callback ) {
if ( isFunction( props ) ) {
callback = props;
props = [ "*" ];
} else {
props = props.match( rnothtmlwhite );
}
var prop,
index = 0,
length = props.length;
for ( ; index < length; index++ ) {
prop = props[ index ];
Animation.tweeners[ prop ] = Animation.tweeners[ prop ] || [];
Animation.tweeners[ prop ].unshift( callback );
}
},
prefilters: [ defaultPrefilter ],
prefilter: function( callback, prepend ) {
if ( prepend ) {
Animation.prefilters.unshift( callback );
} else {
Animation.prefilters.push( callback );
}
}
} );
jQuery.speed = function( speed, easing, fn ) {
var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : {
complete: fn || !fn && easing ||
isFunction( speed ) && speed,
duration: speed,
easing: fn && easing || easing && !isFunction( easing ) && easing
};
// Go to the end state if fx are off
if ( jQuery.fx.off ) {
opt.duration = 0;
} else {
if ( typeof opt.duration !== "number" ) {
if ( opt.duration in jQuery.fx.speeds ) {
opt.duration = jQuery.fx.speeds[ opt.duration ];
} else {
opt.duration = jQuery.fx.speeds._default;
}
}
}
// Normalize opt.queue - true/undefined/null -> "fx"
if ( opt.queue == null || opt.queue === true ) {
opt.queue = "fx";
}
// Queueing
opt.old = opt.complete;
opt.complete = function() {
if ( isFunction( opt.old ) ) {
opt.old.call( this );
}
if ( opt.queue ) {
jQuery.dequeue( this, opt.queue );
}
};
return opt;
};
jQuery.fn.extend( {
fadeTo: function( speed, to, easing, callback ) {
// Show any hidden elements after setting opacity to 0
return this.filter( isHiddenWithinTree ).css( "opacity", 0 ).show()
// Animate to the value specified
.end().animate( { opacity: to }, speed, easing, callback );
},
animate: function( prop, speed, easing, callback ) {
var empty = jQuery.isEmptyObject( prop ),
optall = jQuery.speed( speed, easing, callback ),
doAnimation = function() {
// Operate on a copy of prop so per-property easing won't be lost
var anim = Animation( this, jQuery.extend( {}, prop ), optall );
// Empty animations, or finishing resolves immediately
if ( empty || dataPriv.get( this, "finish" ) ) {
anim.stop( true );
}
};
doAnimation.finish = doAnimation;
return empty || optall.queue === false ?
this.each( doAnimation ) :
this.queue( optall.queue, doAnimation );
},
stop: function( type, clearQueue, gotoEnd ) {
var stopQueue = function( hooks ) {
var stop = hooks.stop;
delete hooks.stop;
stop( gotoEnd );
};
if ( typeof type !== "string" ) {
gotoEnd = clearQueue;
clearQueue = type;
type = undefined;
}
if ( clearQueue ) {
this.queue( type || "fx", [] );
}
return this.each( function() {
var dequeue = true,
index = type != null && type + "queueHooks",
timers = jQuery.timers,
data = dataPriv.get( this );
if ( index ) {
if ( data[ index ] && data[ index ].stop ) {
stopQueue( data[ index ] );
}
} else {
for ( index in data ) {
if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) {
stopQueue( data[ index ] );
}
}
}
for ( index = timers.length; index--; ) {
if ( timers[ index ].elem === this &&
( type == null || timers[ index ].queue === type ) ) {
timers[ index ].anim.stop( gotoEnd );
dequeue = false;
timers.splice( index, 1 );
}
}
// Start the next in the queue if the last step wasn't forced.
// Timers currently will call their complete callbacks, which
// will dequeue but only if they were gotoEnd.
if ( dequeue || !gotoEnd ) {
jQuery.dequeue( this, type );
}
} );
},
finish: function( type ) {
if ( type !== false ) {
type = type || "fx";
}
return this.each( function() {
var index,
data = dataPriv.get( this ),
queue = data[ type + "queue" ],
hooks = data[ type + "queueHooks" ],
timers = jQuery.timers,
length = queue ? queue.length : 0;
// Enable finishing flag on private data
data.finish = true;
// Empty the queue first
jQuery.queue( this, type, [] );
if ( hooks && hooks.stop ) {
hooks.stop.call( this, true );
}
// Look for any active animations, and finish them
for ( index = timers.length; index--; ) {
if ( timers[ index ].elem === this && timers[ index ].queue === type ) {
timers[ index ].anim.stop( true );
timers.splice( index, 1 );
}
}
// Look for any animations in the old queue and finish them
for ( index = 0; index < length; index++ ) {
if ( queue[ index ] && queue[ index ].finish ) {
queue[ index ].finish.call( this );
}
}
// Turn off finishing flag
delete data.finish;
} );
}
} );
jQuery.each( [ "toggle", "show", "hide" ], function( _i, name ) {
var cssFn = jQuery.fn[ name ];
jQuery.fn[ name ] = function( speed, easing, callback ) {
return speed == null || typeof speed === "boolean" ?
cssFn.apply( this, arguments ) :
this.animate( genFx( name, true ), speed, easing, callback );
};
} );
// Generate shortcuts for custom animations
jQuery.each( {
slideDown: genFx( "show" ),
slideUp: genFx( "hide" ),
slideToggle: genFx( "toggle" ),
fadeIn: { opacity: "show" },
fadeOut: { opacity: "hide" },
fadeToggle: { opacity: "toggle" }
}, function( name, props ) {
jQuery.fn[ name ] = function( speed, easing, callback ) {
return this.animate( props, speed, easing, callback );
};
} );
jQuery.timers = [];
jQuery.fx.tick = function() {
var timer,
i = 0,
timers = jQuery.timers;
fxNow = Date.now();
for ( ; i < timers.length; i++ ) {
timer = timers[ i ];
// Run the timer and safely remove it when done (allowing for external removal)
if ( !timer() && timers[ i ] === timer ) {
timers.splice( i--, 1 );
}
}
if ( !timers.length ) {
jQuery.fx.stop();
}
fxNow = undefined;
};
jQuery.fx.timer = function( timer ) {
jQuery.timers.push( timer );
jQuery.fx.start();
};
jQuery.fx.interval = 13;
jQuery.fx.start = function() {
if ( inProgress ) {
return;
}
inProgress = true;
schedule();
};
jQuery.fx.stop = function() {
inProgress = null;
};
jQuery.fx.speeds = {
slow: 600,
fast: 200,
// Default speed
_default: 400
};
// Based off of the plugin by Clint Helfers, with permission.
// https://web.archive.org/web/20100324014747/http://blindsignals.com/index.php/2009/07/jquery-delay/
jQuery.fn.delay = function( time, type ) {
time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time;
type = type || "fx";
return this.queue( type, function( next, hooks ) {
var timeout = window.setTimeout( next, time );
hooks.stop = function() {
window.clearTimeout( timeout );
};
} );
};
( function() {
var input = document.createElement( "input" ),
select = document.createElement( "select" ),
opt = select.appendChild( document.createElement( "option" ) );
input.type = "checkbox";
// Support: Android <=4.3 only
// Default value for a checkbox should be "on"
support.checkOn = input.value !== "";
// Support: IE <=11 only
// Must access selectedIndex to make default options select
support.optSelected = opt.selected;
// Support: IE <=11 only
// An input loses its value after becoming a radio
input = document.createElement( "input" );
input.value = "t";
input.type = "radio";
support.radioValue = input.value === "t";
} )();
var boolHook,
attrHandle = jQuery.expr.attrHandle;
jQuery.fn.extend( {
attr: function( name, value ) {
return access( this, jQuery.attr, name, value, arguments.length > 1 );
},
removeAttr: function( name ) {
return this.each( function() {
jQuery.removeAttr( this, name );
} );
}
} );
jQuery.extend( {
attr: function( elem, name, value ) {
var ret, hooks,
nType = elem.nodeType;
// Don't get/set attributes on text, comment and attribute nodes
if ( nType === 3 || nType === 8 || nType === 2 ) {
return;
}
// Fallback to prop when attributes are not supported
if ( typeof elem.getAttribute === "undefined" ) {
return jQuery.prop( elem, name, value );
}
// Attribute hooks are determined by the lowercase version
// Grab necessary hook if one is defined
if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) {
hooks = jQuery.attrHooks[ name.toLowerCase() ] ||
( jQuery.expr.match.bool.test( name ) ? boolHook : undefined );
}
if ( value !== undefined ) {
if ( value === null ) {
jQuery.removeAttr( elem, name );
return;
}
if ( hooks && "set" in hooks &&
( ret = hooks.set( elem, value, name ) ) !== undefined ) {
return ret;
}
elem.setAttribute( name, value + "" );
return value;
}
if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) {
return ret;
}
ret = jQuery.find.attr( elem, name );
// Non-existent attributes return null, we normalize to undefined
return ret == null ? undefined : ret;
},
attrHooks: {
type: {
set: function( elem, value ) {
if ( !support.radioValue && value === "radio" &&
nodeName( elem, "input" ) ) {
var val = elem.value;
elem.setAttribute( "type", value );
if ( val ) {
elem.value = val;
}
return value;
}
}
}
},
removeAttr: function( elem, value ) {
var name,
i = 0,
// Attribute names can contain non-HTML whitespace characters
// https://html.spec.whatwg.org/multipage/syntax.html#attributes-2
attrNames = value && value.match( rnothtmlwhite );
if ( attrNames && elem.nodeType === 1 ) {
while ( ( name = attrNames[ i++ ] ) ) {
elem.removeAttribute( name );
}
}
}
} );
// Hooks for boolean attributes
boolHook = {
set: function( elem, value, name ) {
if ( value === false ) {
// Remove boolean attributes when set to false
jQuery.removeAttr( elem, name );
} else {
elem.setAttribute( name, name );
}
return name;
}
};
jQuery.each( jQuery.expr.match.bool.source.match( /\w+/g ), function( _i, name ) {
var getter = attrHandle[ name ] || jQuery.find.attr;
attrHandle[ name ] = function( elem, name, isXML ) {
var ret, handle,
lowercaseName = name.toLowerCase();
if ( !isXML ) {
// Avoid an infinite loop by temporarily removing this function from the getter
handle = attrHandle[ lowercaseName ];
attrHandle[ lowercaseName ] = ret;
ret = getter( elem, name, isXML ) != null ?
lowercaseName :
null;
attrHandle[ lowercaseName ] = handle;
}
return ret;
};
} );
var rfocusable = /^(?:input|select|textarea|button)$/i,
rclickable = /^(?:a|area)$/i;
jQuery.fn.extend( {
prop: function( name, value ) {
return access( this, jQuery.prop, name, value, arguments.length > 1 );
},
removeProp: function( name ) {
return this.each( function() {
delete this[ jQuery.propFix[ name ] || name ];
} );
}
} );
jQuery.extend( {
prop: function( elem, name, value ) {
var ret, hooks,
nType = elem.nodeType;
// Don't get/set properties on text, comment and attribute nodes
if ( nType === 3 || nType === 8 || nType === 2 ) {
return;
}
if ( nType !== 1 || !jQuery.isXMLDoc( elem ) ) {
// Fix name and attach hooks
name = jQuery.propFix[ name ] || name;
hooks = jQuery.propHooks[ name ];
}
if ( value !== undefined ) {
if ( hooks && "set" in hooks &&
( ret = hooks.set( elem, value, name ) ) !== undefined ) {
return ret;
}
return ( elem[ name ] = value );
}
if ( hooks && "get" in hooks && ( ret = hooks.get( elem, name ) ) !== null ) {
return ret;
}
return elem[ name ];
},
propHooks: {
tabIndex: {
get: function( elem ) {
// Support: IE <=9 - 11 only
// elem.tabIndex doesn't always return the
// correct value when it hasn't been explicitly set
// https://web.archive.org/web/20141116233347/http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/
// Use proper attribute retrieval(#12072)
var tabindex = jQuery.find.attr( elem, "tabindex" );
if ( tabindex ) {
return parseInt( tabindex, 10 );
}
if (
rfocusable.test( elem.nodeName ) ||
rclickable.test( elem.nodeName ) &&
elem.href
) {
return 0;
}
return -1;
}
}
},
propFix: {
"for": "htmlFor",
"class": "className"
}
} );
// Support: IE <=11 only
// Accessing the selectedIndex property
// forces the browser to respect setting selected
// on the option
// The getter ensures a default option is selected
// when in an optgroup
// eslint rule "no-unused-expressions" is disabled for this code
// since it considers such accessions noop
if ( !support.optSelected ) {
jQuery.propHooks.selected = {
get: function( elem ) {
/* eslint no-unused-expressions: "off" */
var parent = elem.parentNode;
if ( parent && parent.parentNode ) {
parent.parentNode.selectedIndex;
}
return null;
},
set: function( elem ) {
/* eslint no-unused-expressions: "off" */
var parent = elem.parentNode;
if ( parent ) {
parent.selectedIndex;
if ( parent.parentNode ) {
parent.parentNode.selectedIndex;
}
}
}
};
}
jQuery.each( [
"tabIndex",
"readOnly",
"maxLength",
"cellSpacing",
"cellPadding",
"rowSpan",
"colSpan",
"useMap",
"frameBorder",
"contentEditable"
], function() {
jQuery.propFix[ this.toLowerCase() ] = this;
} );
// Strip and collapse whitespace according to HTML spec
// https://infra.spec.whatwg.org/#strip-and-collapse-ascii-whitespace
function stripAndCollapse( value ) {
var tokens = value.match( rnothtmlwhite ) || [];
return tokens.join( " " );
}
function getClass( elem ) {
return elem.getAttribute && elem.getAttribute( "class" ) || "";
}
function classesToArray( value ) {
if ( Array.isArray( value ) ) {
return value;
}
if ( typeof value === "string" ) {
return value.match( rnothtmlwhite ) || [];
}
return [];
}
jQuery.fn.extend( {
addClass: function( value ) {
var classes, elem, cur, curValue, clazz, j, finalValue,
i = 0;
if ( isFunction( value ) ) {
return this.each( function( j ) {
jQuery( this ).addClass( value.call( this, j, getClass( this ) ) );
} );
}
classes = classesToArray( value );
if ( classes.length ) {
while ( ( elem = this[ i++ ] ) ) {
curValue = getClass( elem );
cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
if ( cur ) {
j = 0;
while ( ( clazz = classes[ j++ ] ) ) {
if ( cur.indexOf( " " + clazz + " " ) < 0 ) {
cur += clazz + " ";
}
}
// Only assign if different to avoid unneeded rendering.
finalValue = stripAndCollapse( cur );
if ( curValue !== finalValue ) {
elem.setAttribute( "class", finalValue );
}
}
}
}
return this;
},
removeClass: function( value ) {
var classes, elem, cur, curValue, clazz, j, finalValue,
i = 0;
if ( isFunction( value ) ) {
return this.each( function( j ) {
jQuery( this ).removeClass( value.call( this, j, getClass( this ) ) );
} );
}
if ( !arguments.length ) {
return this.attr( "class", "" );
}
classes = classesToArray( value );
if ( classes.length ) {
while ( ( elem = this[ i++ ] ) ) {
curValue = getClass( elem );
// This expression is here for better compressibility (see addClass)
cur = elem.nodeType === 1 && ( " " + stripAndCollapse( curValue ) + " " );
if ( cur ) {
j = 0;
while ( ( clazz = classes[ j++ ] ) ) {
// Remove *all* instances
while ( cur.indexOf( " " + clazz + " " ) > -1 ) {
cur = cur.replace( " " + clazz + " ", " " );
}
}
// Only assign if different to avoid unneeded rendering.
finalValue = stripAndCollapse( cur );
if ( curValue !== finalValue ) {
elem.setAttribute( "class", finalValue );
}
}
}
}
return this;
},
toggleClass: function( value, stateVal ) {
var type = typeof value,
isValidValue = type === "string" || Array.isArray( value );
if ( typeof stateVal === "boolean" && isValidValue ) {
return stateVal ? this.addClass( value ) : this.removeClass( value );
}
if ( isFunction( value ) ) {
return this.each( function( i ) {
jQuery( this ).toggleClass(
value.call( this, i, getClass( this ), stateVal ),
stateVal
);
} );
}
return this.each( function() {
var className, i, self, classNames;
if ( isValidValue ) {
// Toggle individual class names
i = 0;
self = jQuery( this );
classNames = classesToArray( value );
while ( ( className = classNames[ i++ ] ) ) {
// Check each className given, space separated list
if ( self.hasClass( className ) ) {
self.removeClass( className );
} else {
self.addClass( className );
}
}
// Toggle whole class name
} else if ( value === undefined || type === "boolean" ) {
className = getClass( this );
if ( className ) {
// Store className if set
dataPriv.set( this, "__className__", className );
}
// If the element has a class name or if we're passed `false`,
// then remove the whole classname (if there was one, the above saved it).
// Otherwise bring back whatever was previously saved (if anything),
// falling back to the empty string if nothing was stored.
if ( this.setAttribute ) {
this.setAttribute( "class",
className || value === false ?
"" :
dataPriv.get( this, "__className__" ) || ""
);
}
}
} );
},
hasClass: function( selector ) {
var className, elem,
i = 0;
className = " " + selector + " ";
while ( ( elem = this[ i++ ] ) ) {
if ( elem.nodeType === 1 &&
( " " + stripAndCollapse( getClass( elem ) ) + " " ).indexOf( className ) > -1 ) {
return true;
}
}
return false;
}
} );
var rreturn = /\r/g;
jQuery.fn.extend( {
val: function( value ) {
var hooks, ret, valueIsFunction,
elem = this[ 0 ];
if ( !arguments.length ) {
if ( elem ) {
hooks = jQuery.valHooks[ elem.type ] ||
jQuery.valHooks[ elem.nodeName.toLowerCase() ];
if ( hooks &&
"get" in hooks &&
( ret = hooks.get( elem, "value" ) ) !== undefined
) {
return ret;
}
ret = elem.value;
// Handle most common string cases
if ( typeof ret === "string" ) {
return ret.replace( rreturn, "" );
}
// Handle cases where value is null/undef or number
return ret == null ? "" : ret;
}
return;
}
valueIsFunction = isFunction( value );
return this.each( function( i ) {
var val;
if ( this.nodeType !== 1 ) {
return;
}
if ( valueIsFunction ) {
val = value.call( this, i, jQuery( this ).val() );
} else {
val = value;
}
// Treat null/undefined as ""; convert numbers to string
if ( val == null ) {
val = "";
} else if ( typeof val === "number" ) {
val += "";
} else if ( Array.isArray( val ) ) {
val = jQuery.map( val, function( value ) {
return value == null ? "" : value + "";
} );
}
hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ];
// If set returns undefined, fall back to normal setting
if ( !hooks || !( "set" in hooks ) || hooks.set( this, val, "value" ) === undefined ) {
this.value = val;
}
} );
}
} );
jQuery.extend( {
valHooks: {
option: {
get: function( elem ) {
var val = jQuery.find.attr( elem, "value" );
return val != null ?
val :
// Support: IE <=10 - 11 only
// option.text throws exceptions (#14686, #14858)
// Strip and collapse whitespace
// https://html.spec.whatwg.org/#strip-and-collapse-whitespace
stripAndCollapse( jQuery.text( elem ) );
}
},
select: {
get: function( elem ) {
var value, option, i,
options = elem.options,
index = elem.selectedIndex,
one = elem.type === "select-one",
values = one ? null : [],
max = one ? index + 1 : options.length;
if ( index < 0 ) {
i = max;
} else {
i = one ? index : 0;
}
// Loop through all the selected options
for ( ; i < max; i++ ) {
option = options[ i ];
// Support: IE <=9 only
// IE8-9 doesn't update selected after form reset (#2551)
if ( ( option.selected || i === index ) &&
// Don't return options that are disabled or in a disabled optgroup
!option.disabled &&
( !option.parentNode.disabled ||
!nodeName( option.parentNode, "optgroup" ) ) ) {
// Get the specific value for the option
value = jQuery( option ).val();
// We don't need an array for one selects
if ( one ) {
return value;
}
// Multi-Selects return an array
values.push( value );
}
}
return values;
},
set: function( elem, value ) {
var optionSet, option,
options = elem.options,
values = jQuery.makeArray( value ),
i = options.length;
while ( i-- ) {
option = options[ i ];
/* eslint-disable no-cond-assign */
if ( option.selected =
jQuery.inArray( jQuery.valHooks.option.get( option ), values ) > -1
) {
optionSet = true;
}
/* eslint-enable no-cond-assign */
}
// Force browsers to behave consistently when non-matching value is set
if ( !optionSet ) {
elem.selectedIndex = -1;
}
return values;
}
}
}
} );
// Radios and checkboxes getter/setter
jQuery.each( [ "radio", "checkbox" ], function() {
jQuery.valHooks[ this ] = {
set: function( elem, value ) {
if ( Array.isArray( value ) ) {
return ( elem.checked = jQuery.inArray( jQuery( elem ).val(), value ) > -1 );
}
}
};
if ( !support.checkOn ) {
jQuery.valHooks[ this ].get = function( elem ) {
return elem.getAttribute( "value" ) === null ? "on" : elem.value;
};
}
} );
// Return jQuery for attributes-only inclusion
support.focusin = "onfocusin" in window;
var rfocusMorph = /^(?:focusinfocus|focusoutblur)$/,
stopPropagationCallback = function( e ) {
e.stopPropagation();
};
jQuery.extend( jQuery.event, {
trigger: function( event, data, elem, onlyHandlers ) {
var i, cur, tmp, bubbleType, ontype, handle, special, lastElement,
eventPath = [ elem || document ],
type = hasOwn.call( event, "type" ) ? event.type : event,
namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split( "." ) : [];
cur = lastElement = tmp = elem = elem || document;
// Don't do events on text and comment nodes
if ( elem.nodeType === 3 || elem.nodeType === 8 ) {
return;
}
// focus/blur morphs to focusin/out; ensure we're not firing them right now
if ( rfocusMorph.test( type + jQuery.event.triggered ) ) {
return;
}
if ( type.indexOf( "." ) > -1 ) {
// Namespaced trigger; create a regexp to match event type in handle()
namespaces = type.split( "." );
type = namespaces.shift();
namespaces.sort();
}
ontype = type.indexOf( ":" ) < 0 && "on" + type;
// Caller can pass in a jQuery.Event object, Object, or just an event type string
event = event[ jQuery.expando ] ?
event :
new jQuery.Event( type, typeof event === "object" && event );
// Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true)
event.isTrigger = onlyHandlers ? 2 : 3;
event.namespace = namespaces.join( "." );
event.rnamespace = event.namespace ?
new RegExp( "(^|\\.)" + namespaces.join( "\\.(?:.*\\.|)" ) + "(\\.|$)" ) :
null;
// Clean up the event in case it is being reused
event.result = undefined;
if ( !event.target ) {
event.target = elem;
}
// Clone any incoming data and prepend the event, creating the handler arg list
data = data == null ?
[ event ] :
jQuery.makeArray( data, [ event ] );
// Allow special events to draw outside the lines
special = jQuery.event.special[ type ] || {};
if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) {
return;
}
// Determine event propagation path in advance, per W3C events spec (#9951)
// Bubble up to document, then to window; watch for a global ownerDocument var (#9724)
if ( !onlyHandlers && !special.noBubble && !isWindow( elem ) ) {
bubbleType = special.delegateType || type;
if ( !rfocusMorph.test( bubbleType + type ) ) {
cur = cur.parentNode;
}
for ( ; cur; cur = cur.parentNode ) {
eventPath.push( cur );
tmp = cur;
}
// Only add window if we got to document (e.g., not plain obj or detached DOM)
if ( tmp === ( elem.ownerDocument || document ) ) {
eventPath.push( tmp.defaultView || tmp.parentWindow || window );
}
}
// Fire handlers on the event path
i = 0;
while ( ( cur = eventPath[ i++ ] ) && !event.isPropagationStopped() ) {
lastElement = cur;
event.type = i > 1 ?
bubbleType :
special.bindType || type;
// jQuery handler
handle = (
dataPriv.get( cur, "events" ) || Object.create( null )
)[ event.type ] &&
dataPriv.get( cur, "handle" );
if ( handle ) {
handle.apply( cur, data );
}
// Native handler
handle = ontype && cur[ ontype ];
if ( handle && handle.apply && acceptData( cur ) ) {
event.result = handle.apply( cur, data );
if ( event.result === false ) {
event.preventDefault();
}
}
}
event.type = type;
// If nobody prevented the default action, do it now
if ( !onlyHandlers && !event.isDefaultPrevented() ) {
if ( ( !special._default ||
special._default.apply( eventPath.pop(), data ) === false ) &&
acceptData( elem ) ) {
// Call a native DOM method on the target with the same name as the event.
// Don't do default actions on window, that's where global variables be (#6170)
if ( ontype && isFunction( elem[ type ] ) && !isWindow( elem ) ) {
// Don't re-trigger an onFOO event when we call its FOO() method
tmp = elem[ ontype ];
if ( tmp ) {
elem[ ontype ] = null;
}
// Prevent re-triggering of the same event, since we already bubbled it above
jQuery.event.triggered = type;
if ( event.isPropagationStopped() ) {
lastElement.addEventListener( type, stopPropagationCallback );
}
elem[ type ]();
if ( event.isPropagationStopped() ) {
lastElement.removeEventListener( type, stopPropagationCallback );
}
jQuery.event.triggered = undefined;
if ( tmp ) {
elem[ ontype ] = tmp;
}
}
}
}
return event.result;
},
// Piggyback on a donor event to simulate a different one
// Used only for `focus(in | out)` events
simulate: function( type, elem, event ) {
var e = jQuery.extend(
new jQuery.Event(),
event,
{
type: type,
isSimulated: true
}
);
jQuery.event.trigger( e, null, elem );
}
} );
jQuery.fn.extend( {
trigger: function( type, data ) {
return this.each( function() {
jQuery.event.trigger( type, data, this );
} );
},
triggerHandler: function( type, data ) {
var elem = this[ 0 ];
if ( elem ) {
return jQuery.event.trigger( type, data, elem, true );
}
}
} );
// Support: Firefox <=44
// Firefox doesn't have focus(in | out) events
// Related ticket - https://bugzilla.mozilla.org/show_bug.cgi?id=687787
//
// Support: Chrome <=48 - 49, Safari <=9.0 - 9.1
// focus(in | out) events fire after focus & blur events,
// which is spec violation - http://www.w3.org/TR/DOM-Level-3-Events/#events-focusevent-event-order
// Related ticket - https://bugs.chromium.org/p/chromium/issues/detail?id=449857
if ( !support.focusin ) {
jQuery.each( { focus: "focusin", blur: "focusout" }, function( orig, fix ) {
// Attach a single capturing handler on the document while someone wants focusin/focusout
var handler = function( event ) {
jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ) );
};
jQuery.event.special[ fix ] = {
setup: function() {
// Handle: regular nodes (via `this.ownerDocument`), window
// (via `this.document`) & document (via `this`).
var doc = this.ownerDocument || this.document || this,
attaches = dataPriv.access( doc, fix );
if ( !attaches ) {
doc.addEventListener( orig, handler, true );
}
dataPriv.access( doc, fix, ( attaches || 0 ) + 1 );
},
teardown: function() {
var doc = this.ownerDocument || this.document || this,
attaches = dataPriv.access( doc, fix ) - 1;
if ( !attaches ) {
doc.removeEventListener( orig, handler, true );
dataPriv.remove( doc, fix );
} else {
dataPriv.access( doc, fix, attaches );
}
}
};
} );
}
var location = window.location;
var nonce = { guid: Date.now() };
var rquery = ( /\?/ );
// Cross-browser xml parsing
jQuery.parseXML = function( data ) {
var xml;
if ( !data || typeof data !== "string" ) {
return null;
}
// Support: IE 9 - 11 only
// IE throws on parseFromString with invalid input.
try {
xml = ( new window.DOMParser() ).parseFromString( data, "text/xml" );
} catch ( e ) {
xml = undefined;
}
if ( !xml || xml.getElementsByTagName( "parsererror" ).length ) {
jQuery.error( "Invalid XML: " + data );
}
return xml;
};
var
rbracket = /\[\]$/,
rCRLF = /\r?\n/g,
rsubmitterTypes = /^(?:submit|button|image|reset|file)$/i,
rsubmittable = /^(?:input|select|textarea|keygen)/i;
function buildParams( prefix, obj, traditional, add ) {
var name;
if ( Array.isArray( obj ) ) {
// Serialize array item.
jQuery.each( obj, function( i, v ) {
if ( traditional || rbracket.test( prefix ) ) {
// Treat each array item as a scalar.
add( prefix, v );
} else {
// Item is non-scalar (array or object), encode its numeric index.
buildParams(
prefix + "[" + ( typeof v === "object" && v != null ? i : "" ) + "]",
v,
traditional,
add
);
}
} );
} else if ( !traditional && toType( obj ) === "object" ) {
// Serialize object item.
for ( name in obj ) {
buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add );
}
} else {
// Serialize scalar item.
add( prefix, obj );
}
}
// Serialize an array of form elements or a set of
// key/values into a query string
jQuery.param = function( a, traditional ) {
var prefix,
s = [],
add = function( key, valueOrFunction ) {
// If value is a function, invoke it and use its return value
var value = isFunction( valueOrFunction ) ?
valueOrFunction() :
valueOrFunction;
s[ s.length ] = encodeURIComponent( key ) + "=" +
encodeURIComponent( value == null ? "" : value );
};
if ( a == null ) {
return "";
}
// If an array was passed in, assume that it is an array of form elements.
if ( Array.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) {
// Serialize the form elements
jQuery.each( a, function() {
add( this.name, this.value );
} );
} else {
// If traditional, encode the "old" way (the way 1.3.2 or older
// did it), otherwise encode params recursively.
for ( prefix in a ) {
buildParams( prefix, a[ prefix ], traditional, add );
}
}
// Return the resulting serialization
return s.join( "&" );
};
jQuery.fn.extend( {
serialize: function() {
return jQuery.param( this.serializeArray() );
},
serializeArray: function() {
return this.map( function() {
// Can add propHook for "elements" to filter or add form elements
var elements = jQuery.prop( this, "elements" );
return elements ? jQuery.makeArray( elements ) : this;
} )
.filter( function() {
var type = this.type;
// Use .is( ":disabled" ) so that fieldset[disabled] works
return this.name && !jQuery( this ).is( ":disabled" ) &&
rsubmittable.test( this.nodeName ) && !rsubmitterTypes.test( type ) &&
( this.checked || !rcheckableType.test( type ) );
} )
.map( function( _i, elem ) {
var val = jQuery( this ).val();
if ( val == null ) {
return null;
}
if ( Array.isArray( val ) ) {
return jQuery.map( val, function( val ) {
return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
} );
}
return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) };
} ).get();
}
} );
var
r20 = /%20/g,
rhash = /#.*$/,
rantiCache = /([?&])_=[^&]*/,
rheaders = /^(.*?):[ \t]*([^\r\n]*)$/mg,
// #7653, #8125, #8152: local protocol detection
rlocalProtocol = /^(?:about|app|app-storage|.+-extension|file|res|widget):$/,
rnoContent = /^(?:GET|HEAD)$/,
rprotocol = /^\/\//,
/* Prefilters
* 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example)
* 2) These are called:
* - BEFORE asking for a transport
* - AFTER param serialization (s.data is a string if s.processData is true)
* 3) key is the dataType
* 4) the catchall symbol "*" can be used
* 5) execution will start with transport dataType and THEN continue down to "*" if needed
*/
prefilters = {},
/* Transports bindings
* 1) key is the dataType
* 2) the catchall symbol "*" can be used
* 3) selection will start with transport dataType and THEN go to "*" if needed
*/
transports = {},
// Avoid comment-prolog char sequence (#10098); must appease lint and evade compression
allTypes = "*/".concat( "*" ),
// Anchor tag for parsing the document origin
originAnchor = document.createElement( "a" );
originAnchor.href = location.href;
// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport
function addToPrefiltersOrTransports( structure ) {
// dataTypeExpression is optional and defaults to "*"
return function( dataTypeExpression, func ) {
if ( typeof dataTypeExpression !== "string" ) {
func = dataTypeExpression;
dataTypeExpression = "*";
}
var dataType,
i = 0,
dataTypes = dataTypeExpression.toLowerCase().match( rnothtmlwhite ) || [];
if ( isFunction( func ) ) {
// For each dataType in the dataTypeExpression
while ( ( dataType = dataTypes[ i++ ] ) ) {
// Prepend if requested
if ( dataType[ 0 ] === "+" ) {
dataType = dataType.slice( 1 ) || "*";
( structure[ dataType ] = structure[ dataType ] || [] ).unshift( func );
// Otherwise append
} else {
( structure[ dataType ] = structure[ dataType ] || [] ).push( func );
}
}
}
};
}
// Base inspection function for prefilters and transports
function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR ) {
var inspected = {},
seekingTransport = ( structure === transports );
function inspect( dataType ) {
var selected;
inspected[ dataType ] = true;
jQuery.each( structure[ dataType ] || [], function( _, prefilterOrFactory ) {
var dataTypeOrTransport = prefilterOrFactory( options, originalOptions, jqXHR );
if ( typeof dataTypeOrTransport === "string" &&
!seekingTransport && !inspected[ dataTypeOrTransport ] ) {
options.dataTypes.unshift( dataTypeOrTransport );
inspect( dataTypeOrTransport );
return false;
} else if ( seekingTransport ) {
return !( selected = dataTypeOrTransport );
}
} );
return selected;
}
return inspect( options.dataTypes[ 0 ] ) || !inspected[ "*" ] && inspect( "*" );
}
// A special extend for ajax options
// that takes "flat" options (not to be deep extended)
// Fixes #9887
function ajaxExtend( target, src ) {
var key, deep,
flatOptions = jQuery.ajaxSettings.flatOptions || {};
for ( key in src ) {
if ( src[ key ] !== undefined ) {
( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ];
}
}
if ( deep ) {
jQuery.extend( true, target, deep );
}
return target;
}
/* Handles responses to an ajax request:
* - finds the right dataType (mediates between content-type and expected dataType)
* - returns the corresponding response
*/
function ajaxHandleResponses( s, jqXHR, responses ) {
var ct, type, finalDataType, firstDataType,
contents = s.contents,
dataTypes = s.dataTypes;
// Remove auto dataType and get content-type in the process
while ( dataTypes[ 0 ] === "*" ) {
dataTypes.shift();
if ( ct === undefined ) {
ct = s.mimeType || jqXHR.getResponseHeader( "Content-Type" );
}
}
// Check if we're dealing with a known content-type
if ( ct ) {
for ( type in contents ) {
if ( contents[ type ] && contents[ type ].test( ct ) ) {
dataTypes.unshift( type );
break;
}
}
}
// Check to see if we have a response for the expected dataType
if ( dataTypes[ 0 ] in responses ) {
finalDataType = dataTypes[ 0 ];
} else {
// Try convertible dataTypes
for ( type in responses ) {
if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[ 0 ] ] ) {
finalDataType = type;
break;
}
if ( !firstDataType ) {
firstDataType = type;
}
}
// Or just use first one
finalDataType = finalDataType || firstDataType;
}
// If we found a dataType
// We add the dataType to the list if needed
// and return the corresponding response
if ( finalDataType ) {
if ( finalDataType !== dataTypes[ 0 ] ) {
dataTypes.unshift( finalDataType );
}
return responses[ finalDataType ];
}
}
/* Chain conversions given the request and the original response
* Also sets the responseXXX fields on the jqXHR instance
*/
function ajaxConvert( s, response, jqXHR, isSuccess ) {
var conv2, current, conv, tmp, prev,
converters = {},
// Work with a copy of dataTypes in case we need to modify it for conversion
dataTypes = s.dataTypes.slice();
// Create converters map with lowercased keys
if ( dataTypes[ 1 ] ) {
for ( conv in s.converters ) {
converters[ conv.toLowerCase() ] = s.converters[ conv ];
}
}
current = dataTypes.shift();
// Convert to each sequential dataType
while ( current ) {
if ( s.responseFields[ current ] ) {
jqXHR[ s.responseFields[ current ] ] = response;
}
// Apply the dataFilter if provided
if ( !prev && isSuccess && s.dataFilter ) {
response = s.dataFilter( response, s.dataType );
}
prev = current;
current = dataTypes.shift();
if ( current ) {
// There's only work to do if current dataType is non-auto
if ( current === "*" ) {
current = prev;
// Convert response if prev dataType is non-auto and differs from current
} else if ( prev !== "*" && prev !== current ) {
// Seek a direct converter
conv = converters[ prev + " " + current ] || converters[ "* " + current ];
// If none found, seek a pair
if ( !conv ) {
for ( conv2 in converters ) {
// If conv2 outputs current
tmp = conv2.split( " " );
if ( tmp[ 1 ] === current ) {
// If prev can be converted to accepted input
conv = converters[ prev + " " + tmp[ 0 ] ] ||
converters[ "* " + tmp[ 0 ] ];
if ( conv ) {
// Condense equivalence converters
if ( conv === true ) {
conv = converters[ conv2 ];
// Otherwise, insert the intermediate dataType
} else if ( converters[ conv2 ] !== true ) {
current = tmp[ 0 ];
dataTypes.unshift( tmp[ 1 ] );
}
break;
}
}
}
}
// Apply converter (if not an equivalence)
if ( conv !== true ) {
// Unless errors are allowed to bubble, catch and return them
if ( conv && s.throws ) {
response = conv( response );
} else {
try {
response = conv( response );
} catch ( e ) {
return {
state: "parsererror",
error: conv ? e : "No conversion from " + prev + " to " + current
};
}
}
}
}
}
}
return { state: "success", data: response };
}
jQuery.extend( {
// Counter for holding the number of active queries
active: 0,
// Last-Modified header cache for next request
lastModified: {},
etag: {},
ajaxSettings: {
url: location.href,
type: "GET",
isLocal: rlocalProtocol.test( location.protocol ),
global: true,
processData: true,
async: true,
contentType: "application/x-www-form-urlencoded; charset=UTF-8",
/*
timeout: 0,
data: null,
dataType: null,
username: null,
password: null,
cache: null,
throws: false,
traditional: false,
headers: {},
*/
accepts: {
"*": allTypes,
text: "text/plain",
html: "text/html",
xml: "application/xml, text/xml",
json: "application/json, text/javascript"
},
contents: {
xml: /\bxml\b/,
html: /\bhtml/,
json: /\bjson\b/
},
responseFields: {
xml: "responseXML",
text: "responseText",
json: "responseJSON"
},
// Data converters
// Keys separate source (or catchall "*") and destination types with a single space
converters: {
// Convert anything to text
"* text": String,
// Text to html (true = no transformation)
"text html": true,
// Evaluate text as a json expression
"text json": JSON.parse,
// Parse text as xml
"text xml": jQuery.parseXML
},
// For options that shouldn't be deep extended:
// you can add your own custom options here if
// and when you create one that shouldn't be
// deep extended (see ajaxExtend)
flatOptions: {
url: true,
context: true
}
},
// Creates a full fledged settings object into target
// with both ajaxSettings and settings fields.
// If target is omitted, writes into ajaxSettings.
ajaxSetup: function( target, settings ) {
return settings ?
// Building a settings object
ajaxExtend( ajaxExtend( target, jQuery.ajaxSettings ), settings ) :
// Extending ajaxSettings
ajaxExtend( jQuery.ajaxSettings, target );
},
ajaxPrefilter: addToPrefiltersOrTransports( prefilters ),
ajaxTransport: addToPrefiltersOrTransports( transports ),
// Main method
ajax: function( url, options ) {
// If url is an object, simulate pre-1.5 signature
if ( typeof url === "object" ) {
options = url;
url = undefined;
}
// Force options to be an object
options = options || {};
var transport,
// URL without anti-cache param
cacheURL,
// Response headers
responseHeadersString,
responseHeaders,
// timeout handle
timeoutTimer,
// Url cleanup var
urlAnchor,
// Request state (becomes false upon send and true upon completion)
completed,
// To know if global events are to be dispatched
fireGlobals,
// Loop variable
i,
// uncached part of the url
uncached,
// Create the final options object
s = jQuery.ajaxSetup( {}, options ),
// Callbacks context
callbackContext = s.context || s,
// Context for global events is callbackContext if it is a DOM node or jQuery collection
globalEventContext = s.context &&
( callbackContext.nodeType || callbackContext.jquery ) ?
jQuery( callbackContext ) :
jQuery.event,
// Deferreds
deferred = jQuery.Deferred(),
completeDeferred = jQuery.Callbacks( "once memory" ),
// Status-dependent callbacks
statusCode = s.statusCode || {},
// Headers (they are sent all at once)
requestHeaders = {},
requestHeadersNames = {},
// Default abort message
strAbort = "canceled",
// Fake xhr
jqXHR = {
readyState: 0,
// Builds headers hashtable if needed
getResponseHeader: function( key ) {
var match;
if ( completed ) {
if ( !responseHeaders ) {
responseHeaders = {};
while ( ( match = rheaders.exec( responseHeadersString ) ) ) {
responseHeaders[ match[ 1 ].toLowerCase() + " " ] =
( responseHeaders[ match[ 1 ].toLowerCase() + " " ] || [] )
.concat( match[ 2 ] );
}
}
match = responseHeaders[ key.toLowerCase() + " " ];
}
return match == null ? null : match.join( ", " );
},
// Raw string
getAllResponseHeaders: function() {
return completed ? responseHeadersString : null;
},
// Caches the header
setRequestHeader: function( name, value ) {
if ( completed == null ) {
name = requestHeadersNames[ name.toLowerCase() ] =
requestHeadersNames[ name.toLowerCase() ] || name;
requestHeaders[ name ] = value;
}
return this;
},
// Overrides response content-type header
overrideMimeType: function( type ) {
if ( completed == null ) {
s.mimeType = type;
}
return this;
},
// Status-dependent callbacks
statusCode: function( map ) {
var code;
if ( map ) {
if ( completed ) {
// Execute the appropriate callbacks
jqXHR.always( map[ jqXHR.status ] );
} else {
// Lazy-add the new callbacks in a way that preserves old ones
for ( code in map ) {
statusCode[ code ] = [ statusCode[ code ], map[ code ] ];
}
}
}
return this;
},
// Cancel the request
abort: function( statusText ) {
var finalText = statusText || strAbort;
if ( transport ) {
transport.abort( finalText );
}
done( 0, finalText );
return this;
}
};
// Attach deferreds
deferred.promise( jqXHR );
// Add protocol if not provided (prefilters might expect it)
// Handle falsy url in the settings object (#10093: consistency with old signature)
// We also use the url parameter if available
s.url = ( ( url || s.url || location.href ) + "" )
.replace( rprotocol, location.protocol + "//" );
// Alias method option to type as per ticket #12004
s.type = options.method || options.type || s.method || s.type;
// Extract dataTypes list
s.dataTypes = ( s.dataType || "*" ).toLowerCase().match( rnothtmlwhite ) || [ "" ];
// A cross-domain request is in order when the origin doesn't match the current origin.
if ( s.crossDomain == null ) {
urlAnchor = document.createElement( "a" );
// Support: IE <=8 - 11, Edge 12 - 15
// IE throws exception on accessing the href property if url is malformed,
// e.g. http://example.com:80x/
try {
urlAnchor.href = s.url;
// Support: IE <=8 - 11 only
// Anchor's host property isn't correctly set when s.url is relative
urlAnchor.href = urlAnchor.href;
s.crossDomain = originAnchor.protocol + "//" + originAnchor.host !==
urlAnchor.protocol + "//" + urlAnchor.host;
} catch ( e ) {
// If there is an error parsing the URL, assume it is crossDomain,
// it can be rejected by the transport if it is invalid
s.crossDomain = true;
}
}
// Convert data if not already a string
if ( s.data && s.processData && typeof s.data !== "string" ) {
s.data = jQuery.param( s.data, s.traditional );
}
// Apply prefilters
inspectPrefiltersOrTransports( prefilters, s, options, jqXHR );
// If request was aborted inside a prefilter, stop there
if ( completed ) {
return jqXHR;
}
// We can fire global events as of now if asked to
// Don't fire events if jQuery.event is undefined in an AMD-usage scenario (#15118)
fireGlobals = jQuery.event && s.global;
// Watch for a new set of requests
if ( fireGlobals && jQuery.active++ === 0 ) {
jQuery.event.trigger( "ajaxStart" );
}
// Uppercase the type
s.type = s.type.toUpperCase();
// Determine if request has content
s.hasContent = !rnoContent.test( s.type );
// Save the URL in case we're toying with the If-Modified-Since
// and/or If-None-Match header later on
// Remove hash to simplify url manipulation
cacheURL = s.url.replace( rhash, "" );
// More options handling for requests with no content
if ( !s.hasContent ) {
// Remember the hash so we can put it back
uncached = s.url.slice( cacheURL.length );
// If data is available and should be processed, append data to url
if ( s.data && ( s.processData || typeof s.data === "string" ) ) {
cacheURL += ( rquery.test( cacheURL ) ? "&" : "?" ) + s.data;
// #9682: remove data so that it's not used in an eventual retry
delete s.data;
}
// Add or update anti-cache param if needed
if ( s.cache === false ) {
cacheURL = cacheURL.replace( rantiCache, "$1" );
uncached = ( rquery.test( cacheURL ) ? "&" : "?" ) + "_=" + ( nonce.guid++ ) +
uncached;
}
// Put hash and anti-cache on the URL that will be requested (gh-1732)
s.url = cacheURL + uncached;
// Change '%20' to '+' if this is encoded form body content (gh-2658)
} else if ( s.data && s.processData &&
( s.contentType || "" ).indexOf( "application/x-www-form-urlencoded" ) === 0 ) {
s.data = s.data.replace( r20, "+" );
}
// Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
if ( s.ifModified ) {
if ( jQuery.lastModified[ cacheURL ] ) {
jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ cacheURL ] );
}
if ( jQuery.etag[ cacheURL ] ) {
jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ cacheURL ] );
}
}
// Set the correct header, if data is being sent
if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) {
jqXHR.setRequestHeader( "Content-Type", s.contentType );
}
// Set the Accepts header for the server, depending on the dataType
jqXHR.setRequestHeader(
"Accept",
s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[ 0 ] ] ?
s.accepts[ s.dataTypes[ 0 ] ] +
( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) :
s.accepts[ "*" ]
);
// Check for headers option
for ( i in s.headers ) {
jqXHR.setRequestHeader( i, s.headers[ i ] );
}
// Allow custom headers/mimetypes and early abort
if ( s.beforeSend &&
( s.beforeSend.call( callbackContext, jqXHR, s ) === false || completed ) ) {
// Abort if not done already and return
return jqXHR.abort();
}
// Aborting is no longer a cancellation
strAbort = "abort";
// Install callbacks on deferreds
completeDeferred.add( s.complete );
jqXHR.done( s.success );
jqXHR.fail( s.error );
// Get transport
transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR );
// If no transport, we auto-abort
if ( !transport ) {
done( -1, "No Transport" );
} else {
jqXHR.readyState = 1;
// Send global event
if ( fireGlobals ) {
globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] );
}
// If request was aborted inside ajaxSend, stop there
if ( completed ) {
return jqXHR;
}
// Timeout
if ( s.async && s.timeout > 0 ) {
timeoutTimer = window.setTimeout( function() {
jqXHR.abort( "timeout" );
}, s.timeout );
}
try {
completed = false;
transport.send( requestHeaders, done );
} catch ( e ) {
// Rethrow post-completion exceptions
if ( completed ) {
throw e;
}
// Propagate others as results
done( -1, e );
}
}
// Callback for when everything is done
function done( status, nativeStatusText, responses, headers ) {
var isSuccess, success, error, response, modified,
statusText = nativeStatusText;
// Ignore repeat invocations
if ( completed ) {
return;
}
completed = true;
// Clear timeout if it exists
if ( timeoutTimer ) {
window.clearTimeout( timeoutTimer );
}
// Dereference transport for early garbage collection
// (no matter how long the jqXHR object will be used)
transport = undefined;
// Cache response headers
responseHeadersString = headers || "";
// Set readyState
jqXHR.readyState = status > 0 ? 4 : 0;
// Determine if successful
isSuccess = status >= 200 && status < 300 || status === 304;
// Get response data
if ( responses ) {
response = ajaxHandleResponses( s, jqXHR, responses );
}
// Use a noop converter for missing script
if ( !isSuccess && jQuery.inArray( "script", s.dataTypes ) > -1 ) {
s.converters[ "text script" ] = function() {};
}
// Convert no matter what (that way responseXXX fields are always set)
response = ajaxConvert( s, response, jqXHR, isSuccess );
// If successful, handle type chaining
if ( isSuccess ) {
// Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode.
if ( s.ifModified ) {
modified = jqXHR.getResponseHeader( "Last-Modified" );
if ( modified ) {
jQuery.lastModified[ cacheURL ] = modified;
}
modified = jqXHR.getResponseHeader( "etag" );
if ( modified ) {
jQuery.etag[ cacheURL ] = modified;
}
}
// if no content
if ( status === 204 || s.type === "HEAD" ) {
statusText = "nocontent";
// if not modified
} else if ( status === 304 ) {
statusText = "notmodified";
// If we have data, let's convert it
} else {
statusText = response.state;
success = response.data;
error = response.error;
isSuccess = !error;
}
} else {
// Extract error from statusText and normalize for non-aborts
error = statusText;
if ( status || !statusText ) {
statusText = "error";
if ( status < 0 ) {
status = 0;
}
}
}
// Set data for the fake xhr object
jqXHR.status = status;
jqXHR.statusText = ( nativeStatusText || statusText ) + "";
// Success/Error
if ( isSuccess ) {
deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] );
} else {
deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] );
}
// Status-dependent callbacks
jqXHR.statusCode( statusCode );
statusCode = undefined;
if ( fireGlobals ) {
globalEventContext.trigger( isSuccess ? "ajaxSuccess" : "ajaxError",
[ jqXHR, s, isSuccess ? success : error ] );
}
// Complete
completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] );
if ( fireGlobals ) {
globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] );
// Handle the global AJAX counter
if ( !( --jQuery.active ) ) {
jQuery.event.trigger( "ajaxStop" );
}
}
}
return jqXHR;
},
getJSON: function( url, data, callback ) {
return jQuery.get( url, data, callback, "json" );
},
getScript: function( url, callback ) {
return jQuery.get( url, undefined, callback, "script" );
}
} );
jQuery.each( [ "get", "post" ], function( _i, method ) {
jQuery[ method ] = function( url, data, callback, type ) {
// Shift arguments if data argument was omitted
if ( isFunction( data ) ) {
type = type || callback;
callback = data;
data = undefined;
}
// The url can be an options object (which then must have .url)
return jQuery.ajax( jQuery.extend( {
url: url,
type: method,
dataType: type,
data: data,
success: callback
}, jQuery.isPlainObject( url ) && url ) );
};
} );
jQuery.ajaxPrefilter( function( s ) {
var i;
for ( i in s.headers ) {
if ( i.toLowerCase() === "content-type" ) {
s.contentType = s.headers[ i ] || "";
}
}
} );
jQuery._evalUrl = function( url, options, doc ) {
return jQuery.ajax( {
url: url,
// Make this explicit, since user can override this through ajaxSetup (#11264)
type: "GET",
dataType: "script",
cache: true,
async: false,
global: false,
// Only evaluate the response if it is successful (gh-4126)
// dataFilter is not invoked for failure responses, so using it instead
// of the default converter is kludgy but it works.
converters: {
"text script": function() {}
},
dataFilter: function( response ) {
jQuery.globalEval( response, options, doc );
}
} );
};
jQuery.fn.extend( {
wrapAll: function( html ) {
var wrap;
if ( this[ 0 ] ) {
if ( isFunction( html ) ) {
html = html.call( this[ 0 ] );
}
// The elements to wrap the target around
wrap = jQuery( html, this[ 0 ].ownerDocument ).eq( 0 ).clone( true );
if ( this[ 0 ].parentNode ) {
wrap.insertBefore( this[ 0 ] );
}
wrap.map( function() {
var elem = this;
while ( elem.firstElementChild ) {
elem = elem.firstElementChild;
}
return elem;
} ).append( this );
}
return this;
},
wrapInner: function( html ) {
if ( isFunction( html ) ) {
return this.each( function( i ) {
jQuery( this ).wrapInner( html.call( this, i ) );
} );
}
return this.each( function() {
var self = jQuery( this ),
contents = self.contents();
if ( contents.length ) {
contents.wrapAll( html );
} else {
self.append( html );
}
} );
},
wrap: function( html ) {
var htmlIsFunction = isFunction( html );
return this.each( function( i ) {
jQuery( this ).wrapAll( htmlIsFunction ? html.call( this, i ) : html );
} );
},
unwrap: function( selector ) {
this.parent( selector ).not( "body" ).each( function() {
jQuery( this ).replaceWith( this.childNodes );
} );
return this;
}
} );
jQuery.expr.pseudos.hidden = function( elem ) {
return !jQuery.expr.pseudos.visible( elem );
};
jQuery.expr.pseudos.visible = function( elem ) {
return !!( elem.offsetWidth || elem.offsetHeight || elem.getClientRects().length );
};
jQuery.ajaxSettings.xhr = function() {
try {
return new window.XMLHttpRequest();
} catch ( e ) {}
};
var xhrSuccessStatus = {
// File protocol always yields status code 0, assume 200
0: 200,
// Support: IE <=9 only
// #1450: sometimes IE returns 1223 when it should be 204
1223: 204
},
xhrSupported = jQuery.ajaxSettings.xhr();
support.cors = !!xhrSupported && ( "withCredentials" in xhrSupported );
support.ajax = xhrSupported = !!xhrSupported;
jQuery.ajaxTransport( function( options ) {
var callback, errorCallback;
// Cross domain only allowed if supported through XMLHttpRequest
if ( support.cors || xhrSupported && !options.crossDomain ) {
return {
send: function( headers, complete ) {
var i,
xhr = options.xhr();
xhr.open(
options.type,
options.url,
options.async,
options.username,
options.password
);
// Apply custom fields if provided
if ( options.xhrFields ) {
for ( i in options.xhrFields ) {
xhr[ i ] = options.xhrFields[ i ];
}
}
// Override mime type if needed
if ( options.mimeType && xhr.overrideMimeType ) {
xhr.overrideMimeType( options.mimeType );
}
// X-Requested-With header
// For cross-domain requests, seeing as conditions for a preflight are
// akin to a jigsaw puzzle, we simply never set it to be sure.
// (it can always be set on a per-request basis or even using ajaxSetup)
// For same-domain requests, won't change header if already provided.
if ( !options.crossDomain && !headers[ "X-Requested-With" ] ) {
headers[ "X-Requested-With" ] = "XMLHttpRequest";
}
// Set headers
for ( i in headers ) {
xhr.setRequestHeader( i, headers[ i ] );
}
// Callback
callback = function( type ) {
return function() {
if ( callback ) {
callback = errorCallback = xhr.onload =
xhr.onerror = xhr.onabort = xhr.ontimeout =
xhr.onreadystatechange = null;
if ( type === "abort" ) {
xhr.abort();
} else if ( type === "error" ) {
// Support: IE <=9 only
// On a manual native abort, IE9 throws
// errors on any property access that is not readyState
if ( typeof xhr.status !== "number" ) {
complete( 0, "error" );
} else {
complete(
// File: protocol always yields status 0; see #8605, #14207
xhr.status,
xhr.statusText
);
}
} else {
complete(
xhrSuccessStatus[ xhr.status ] || xhr.status,
xhr.statusText,
// Support: IE <=9 only
// IE9 has no XHR2 but throws on binary (trac-11426)
// For XHR2 non-text, let the caller handle it (gh-2498)
( xhr.responseType || "text" ) !== "text" ||
typeof xhr.responseText !== "string" ?
{ binary: xhr.response } :
{ text: xhr.responseText },
xhr.getAllResponseHeaders()
);
}
}
};
};
// Listen to events
xhr.onload = callback();
errorCallback = xhr.onerror = xhr.ontimeout = callback( "error" );
// Support: IE 9 only
// Use onreadystatechange to replace onabort
// to handle uncaught aborts
if ( xhr.onabort !== undefined ) {
xhr.onabort = errorCallback;
} else {
xhr.onreadystatechange = function() {
// Check readyState before timeout as it changes
if ( xhr.readyState === 4 ) {
// Allow onerror to be called first,
// but that will not handle a native abort
// Also, save errorCallback to a variable
// as xhr.onerror cannot be accessed
window.setTimeout( function() {
if ( callback ) {
errorCallback();
}
} );
}
};
}
// Create the abort callback
callback = callback( "abort" );
try {
// Do send the request (this may raise an exception)
xhr.send( options.hasContent && options.data || null );
} catch ( e ) {
// #14683: Only rethrow if this hasn't been notified as an error yet
if ( callback ) {
throw e;
}
}
},
abort: function() {
if ( callback ) {
callback();
}
}
};
}
} );
// Prevent auto-execution of scripts when no explicit dataType was provided (See gh-2432)
jQuery.ajaxPrefilter( function( s ) {
if ( s.crossDomain ) {
s.contents.script = false;
}
} );
// Install script dataType
jQuery.ajaxSetup( {
accepts: {
script: "text/javascript, application/javascript, " +
"application/ecmascript, application/x-ecmascript"
},
contents: {
script: /\b(?:java|ecma)script\b/
},
converters: {
"text script": function( text ) {
jQuery.globalEval( text );
return text;
}
}
} );
// Handle cache's special case and crossDomain
jQuery.ajaxPrefilter( "script", function( s ) {
if ( s.cache === undefined ) {
s.cache = false;
}
if ( s.crossDomain ) {
s.type = "GET";
}
} );
// Bind script tag hack transport
jQuery.ajaxTransport( "script", function( s ) {
// This transport only deals with cross domain or forced-by-attrs requests
if ( s.crossDomain || s.scriptAttrs ) {
var script, callback;
return {
send: function( _, complete ) {
script = jQuery( "<script>" )
.attr( s.scriptAttrs || {} )
.prop( { charset: s.scriptCharset, src: s.url } )
.on( "load error", callback = function( evt ) {
script.remove();
callback = null;
if ( evt ) {
complete( evt.type === "error" ? 404 : 200, evt.type );
}
} );
// Use native DOM manipulation to avoid our domManip AJAX trickery
document.head.appendChild( script[ 0 ] );
},
abort: function() {
if ( callback ) {
callback();
}
}
};
}
} );
var oldCallbacks = [],
rjsonp = /(=)\?(?=&|$)|\?\?/;
// Default jsonp settings
jQuery.ajaxSetup( {
jsonp: "callback",
jsonpCallback: function() {
var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce.guid++ ) );
this[ callback ] = true;
return callback;
}
} );
// Detect, normalize options and install callbacks for jsonp requests
jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) {
var callbackName, overwritten, responseContainer,
jsonProp = s.jsonp !== false && ( rjsonp.test( s.url ) ?
"url" :
typeof s.data === "string" &&
( s.contentType || "" )
.indexOf( "application/x-www-form-urlencoded" ) === 0 &&
rjsonp.test( s.data ) && "data"
);
// Handle iff the expected data type is "jsonp" or we have a parameter to set
if ( jsonProp || s.dataTypes[ 0 ] === "jsonp" ) {
// Get callback name, remembering preexisting value associated with it
callbackName = s.jsonpCallback = isFunction( s.jsonpCallback ) ?
s.jsonpCallback() :
s.jsonpCallback;
// Insert callback into url or form data
if ( jsonProp ) {
s[ jsonProp ] = s[ jsonProp ].replace( rjsonp, "$1" + callbackName );
} else if ( s.jsonp !== false ) {
s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName;
}
// Use data converter to retrieve json after script execution
s.converters[ "script json" ] = function() {
if ( !responseContainer ) {
jQuery.error( callbackName + " was not called" );
}
return responseContainer[ 0 ];
};
// Force json dataType
s.dataTypes[ 0 ] = "json";
// Install callback
overwritten = window[ callbackName ];
window[ callbackName ] = function() {
responseContainer = arguments;
};
// Clean-up function (fires after converters)
jqXHR.always( function() {
// If previous value didn't exist - remove it
if ( overwritten === undefined ) {
jQuery( window ).removeProp( callbackName );
// Otherwise restore preexisting value
} else {
window[ callbackName ] = overwritten;
}
// Save back as free
if ( s[ callbackName ] ) {
// Make sure that re-using the options doesn't screw things around
s.jsonpCallback = originalSettings.jsonpCallback;
// Save the callback name for future use
oldCallbacks.push( callbackName );
}
// Call if it was a function and we have a response
if ( responseContainer && isFunction( overwritten ) ) {
overwritten( responseContainer[ 0 ] );
}
responseContainer = overwritten = undefined;
} );
// Delegate to script
return "script";
}
} );
// Support: Safari 8 only
// In Safari 8 documents created via document.implementation.createHTMLDocument
// collapse sibling forms: the second one becomes a child of the first one.
// Because of that, this security measure has to be disabled in Safari 8.
// https://bugs.webkit.org/show_bug.cgi?id=137337
support.createHTMLDocument = ( function() {
var body = document.implementation.createHTMLDocument( "" ).body;
body.innerHTML = "<form></form><form></form>";
return body.childNodes.length === 2;
} )();
// Argument "data" should be string of html
// context (optional): If specified, the fragment will be created in this context,
// defaults to document
// keepScripts (optional): If true, will include scripts passed in the html string
jQuery.parseHTML = function( data, context, keepScripts ) {
if ( typeof data !== "string" ) {
return [];
}
if ( typeof context === "boolean" ) {
keepScripts = context;
context = false;
}
var base, parsed, scripts;
if ( !context ) {
// Stop scripts or inline event handlers from being executed immediately
// by using document.implementation
if ( support.createHTMLDocument ) {
context = document.implementation.createHTMLDocument( "" );
// Set the base href for the created document
// so any parsed elements with URLs
// are based on the document's URL (gh-2965)
base = context.createElement( "base" );
base.href = document.location.href;
context.head.appendChild( base );
} else {
context = document;
}
}
parsed = rsingleTag.exec( data );
scripts = !keepScripts && [];
// Single tag
if ( parsed ) {
return [ context.createElement( parsed[ 1 ] ) ];
}
parsed = buildFragment( [ data ], context, scripts );
if ( scripts && scripts.length ) {
jQuery( scripts ).remove();
}
return jQuery.merge( [], parsed.childNodes );
};
/**
* Load a url into a page
*/
jQuery.fn.load = function( url, params, callback ) {
var selector, type, response,
self = this,
off = url.indexOf( " " );
if ( off > -1 ) {
selector = stripAndCollapse( url.slice( off ) );
url = url.slice( 0, off );
}
// If it's a function
if ( isFunction( params ) ) {
// We assume that it's the callback
callback = params;
params = undefined;
// Otherwise, build a param string
} else if ( params && typeof params === "object" ) {
type = "POST";
}
// If we have elements to modify, make the request
if ( self.length > 0 ) {
jQuery.ajax( {
url: url,
// If "type" variable is undefined, then "GET" method will be used.
// Make value of this field explicit since
// user can override it through ajaxSetup method
type: type || "GET",
dataType: "html",
data: params
} ).done( function( responseText ) {
// Save response for use in complete callback
response = arguments;
self.html( selector ?
// If a selector was specified, locate the right elements in a dummy div
// Exclude scripts to avoid IE 'Permission Denied' errors
jQuery( "<div>" ).append( jQuery.parseHTML( responseText ) ).find( selector ) :
// Otherwise use the full result
responseText );
// If the request succeeds, this function gets "data", "status", "jqXHR"
// but they are ignored because response was set above.
// If it fails, this function gets "jqXHR", "status", "error"
} ).always( callback && function( jqXHR, status ) {
self.each( function() {
callback.apply( this, response || [ jqXHR.responseText, status, jqXHR ] );
} );
} );
}
return this;
};
jQuery.expr.pseudos.animated = function( elem ) {
return jQuery.grep( jQuery.timers, function( fn ) {
return elem === fn.elem;
} ).length;
};
jQuery.offset = {
setOffset: function( elem, options, i ) {
var curPosition, curLeft, curCSSTop, curTop, curOffset, curCSSLeft, calculatePosition,
position = jQuery.css( elem, "position" ),
curElem = jQuery( elem ),
props = {};
// Set position first, in-case top/left are set even on static elem
if ( position === "static" ) {
elem.style.position = "relative";
}
curOffset = curElem.offset();
curCSSTop = jQuery.css( elem, "top" );
curCSSLeft = jQuery.css( elem, "left" );
calculatePosition = ( position === "absolute" || position === "fixed" ) &&
( curCSSTop + curCSSLeft ).indexOf( "auto" ) > -1;
// Need to be able to calculate position if either
// top or left is auto and position is either absolute or fixed
if ( calculatePosition ) {
curPosition = curElem.position();
curTop = curPosition.top;
curLeft = curPosition.left;
} else {
curTop = parseFloat( curCSSTop ) || 0;
curLeft = parseFloat( curCSSLeft ) || 0;
}
if ( isFunction( options ) ) {
// Use jQuery.extend here to allow modification of coordinates argument (gh-1848)
options = options.call( elem, i, jQuery.extend( {}, curOffset ) );
}
if ( options.top != null ) {
props.top = ( options.top - curOffset.top ) + curTop;
}
if ( options.left != null ) {
props.left = ( options.left - curOffset.left ) + curLeft;
}
if ( "using" in options ) {
options.using.call( elem, props );
} else {
if ( typeof props.top === "number" ) {
props.top += "px";
}
if ( typeof props.left === "number" ) {
props.left += "px";
}
curElem.css( props );
}
}
};
jQuery.fn.extend( {
// offset() relates an element's border box to the document origin
offset: function( options ) {
// Preserve chaining for setter
if ( arguments.length ) {
return options === undefined ?
this :
this.each( function( i ) {
jQuery.offset.setOffset( this, options, i );
} );
}
var rect, win,
elem = this[ 0 ];
if ( !elem ) {
return;
}
// Return zeros for disconnected and hidden (display: none) elements (gh-2310)
// Support: IE <=11 only
// Running getBoundingClientRect on a
// disconnected node in IE throws an error
if ( !elem.getClientRects().length ) {
return { top: 0, left: 0 };
}
// Get document-relative position by adding viewport scroll to viewport-relative gBCR
rect = elem.getBoundingClientRect();
win = elem.ownerDocument.defaultView;
return {
top: rect.top + win.pageYOffset,
left: rect.left + win.pageXOffset
};
},
// position() relates an element's margin box to its offset parent's padding box
// This corresponds to the behavior of CSS absolute positioning
position: function() {
if ( !this[ 0 ] ) {
return;
}
var offsetParent, offset, doc,
elem = this[ 0 ],
parentOffset = { top: 0, left: 0 };
// position:fixed elements are offset from the viewport, which itself always has zero offset
if ( jQuery.css( elem, "position" ) === "fixed" ) {
// Assume position:fixed implies availability of getBoundingClientRect
offset = elem.getBoundingClientRect();
} else {
offset = this.offset();
// Account for the *real* offset parent, which can be the document or its root element
// when a statically positioned element is identified
doc = elem.ownerDocument;
offsetParent = elem.offsetParent || doc.documentElement;
while ( offsetParent &&
( offsetParent === doc.body || offsetParent === doc.documentElement ) &&
jQuery.css( offsetParent, "position" ) === "static" ) {
offsetParent = offsetParent.parentNode;
}
if ( offsetParent && offsetParent !== elem && offsetParent.nodeType === 1 ) {
// Incorporate borders into its offset, since they are outside its content origin
parentOffset = jQuery( offsetParent ).offset();
parentOffset.top += jQuery.css( offsetParent, "borderTopWidth", true );
parentOffset.left += jQuery.css( offsetParent, "borderLeftWidth", true );
}
}
// Subtract parent offsets and element margins
return {
top: offset.top - parentOffset.top - jQuery.css( elem, "marginTop", true ),
left: offset.left - parentOffset.left - jQuery.css( elem, "marginLeft", true )
};
},
// This method will return documentElement in the following cases:
// 1) For the element inside the iframe without offsetParent, this method will return
// documentElement of the parent window
// 2) For the hidden or detached element
// 3) For body or html element, i.e. in case of the html node - it will return itself
//
// but those exceptions were never presented as a real life use-cases
// and might be considered as more preferable results.
//
// This logic, however, is not guaranteed and can change at any point in the future
offsetParent: function() {
return this.map( function() {
var offsetParent = this.offsetParent;
while ( offsetParent && jQuery.css( offsetParent, "position" ) === "static" ) {
offsetParent = offsetParent.offsetParent;
}
return offsetParent || documentElement;
} );
}
} );
// Create scrollLeft and scrollTop methods
jQuery.each( { scrollLeft: "pageXOffset", scrollTop: "pageYOffset" }, function( method, prop ) {
var top = "pageYOffset" === prop;
jQuery.fn[ method ] = function( val ) {
return access( this, function( elem, method, val ) {
// Coalesce documents and windows
var win;
if ( isWindow( elem ) ) {
win = elem;
} else if ( elem.nodeType === 9 ) {
win = elem.defaultView;
}
if ( val === undefined ) {
return win ? win[ prop ] : elem[ method ];
}
if ( win ) {
win.scrollTo(
!top ? val : win.pageXOffset,
top ? val : win.pageYOffset
);
} else {
elem[ method ] = val;
}
}, method, val, arguments.length );
};
} );
// Support: Safari <=7 - 9.1, Chrome <=37 - 49
// Add the top/left cssHooks using jQuery.fn.position
// Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084
// Blink bug: https://bugs.chromium.org/p/chromium/issues/detail?id=589347
// getComputedStyle returns percent when specified for top/left/bottom/right;
// rather than make the css module depend on the offset module, just check for it here
jQuery.each( [ "top", "left" ], function( _i, prop ) {
jQuery.cssHooks[ prop ] = addGetHookIf( support.pixelPosition,
function( elem, computed ) {
if ( computed ) {
computed = curCSS( elem, prop );
// If curCSS returns percentage, fallback to offset
return rnumnonpx.test( computed ) ?
jQuery( elem ).position()[ prop ] + "px" :
computed;
}
}
);
} );
// Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods
jQuery.each( { Height: "height", Width: "width" }, function( name, type ) {
jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name },
function( defaultExtra, funcName ) {
// Margin is only for outerHeight, outerWidth
jQuery.fn[ funcName ] = function( margin, value ) {
var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ),
extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" );
return access( this, function( elem, type, value ) {
var doc;
if ( isWindow( elem ) ) {
// $( window ).outerWidth/Height return w/h including scrollbars (gh-1729)
return funcName.indexOf( "outer" ) === 0 ?
elem[ "inner" + name ] :
elem.document.documentElement[ "client" + name ];
}
// Get document width or height
if ( elem.nodeType === 9 ) {
doc = elem.documentElement;
// Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height],
// whichever is greatest
return Math.max(
elem.body[ "scroll" + name ], doc[ "scroll" + name ],
elem.body[ "offset" + name ], doc[ "offset" + name ],
doc[ "client" + name ]
);
}
return value === undefined ?
// Get width or height on the element, requesting but not forcing parseFloat
jQuery.css( elem, type, extra ) :
// Set width or height on the element
jQuery.style( elem, type, value, extra );
}, type, chainable ? margin : undefined, chainable );
};
} );
} );
jQuery.each( [
"ajaxStart",
"ajaxStop",
"ajaxComplete",
"ajaxError",
"ajaxSuccess",
"ajaxSend"
], function( _i, type ) {
jQuery.fn[ type ] = function( fn ) {
return this.on( type, fn );
};
} );
jQuery.fn.extend( {
bind: function( types, data, fn ) {
return this.on( types, null, data, fn );
},
unbind: function( types, fn ) {
return this.off( types, null, fn );
},
delegate: function( selector, types, data, fn ) {
return this.on( types, selector, data, fn );
},
undelegate: function( selector, types, fn ) {
// ( namespace ) or ( selector, types [, fn] )
return arguments.length === 1 ?
this.off( selector, "**" ) :
this.off( types, selector || "**", fn );
},
hover: function( fnOver, fnOut ) {
return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver );
}
} );
jQuery.each( ( "blur focus focusin focusout resize scroll click dblclick " +
"mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " +
"change select submit keydown keypress keyup contextmenu" ).split( " " ),
function( _i, name ) {
// Handle event binding
jQuery.fn[ name ] = function( data, fn ) {
return arguments.length > 0 ?
this.on( name, null, data, fn ) :
this.trigger( name );
};
} );
// Support: Android <=4.0 only
// Make sure we trim BOM and NBSP
var rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;
// Bind a function to a context, optionally partially applying any
// arguments.
// jQuery.proxy is deprecated to promote standards (specifically Function#bind)
// However, it is not slated for removal any time soon
jQuery.proxy = function( fn, context ) {
var tmp, args, proxy;
if ( typeof context === "string" ) {
tmp = fn[ context ];
context = fn;
fn = tmp;
}
// Quick check to determine if target is callable, in the spec
// this throws a TypeError, but we will just return undefined.
if ( !isFunction( fn ) ) {
return undefined;
}
// Simulated bind
args = slice.call( arguments, 2 );
proxy = function() {
return fn.apply( context || this, args.concat( slice.call( arguments ) ) );
};
// Set the guid of unique handler to the same of original handler, so it can be removed
proxy.guid = fn.guid = fn.guid || jQuery.guid++;
return proxy;
};
jQuery.holdReady = function( hold ) {
if ( hold ) {
jQuery.readyWait++;
} else {
jQuery.ready( true );
}
};
jQuery.isArray = Array.isArray;
jQuery.parseJSON = JSON.parse;
jQuery.nodeName = nodeName;
jQuery.isFunction = isFunction;
jQuery.isWindow = isWindow;
jQuery.camelCase = camelCase;
jQuery.type = toType;
jQuery.now = Date.now;
jQuery.isNumeric = function( obj ) {
// As of jQuery 3.0, isNumeric is limited to
// strings and numbers (primitives or objects)
// that can be coerced to finite numbers (gh-2662)
var type = jQuery.type( obj );
return ( type === "number" || type === "string" ) &&
// parseFloat NaNs numeric-cast false positives ("")
// ...but misinterprets leading-number strings, particularly hex literals ("0x...")
// subtraction forces infinities to NaN
!isNaN( obj - parseFloat( obj ) );
};
jQuery.trim = function( text ) {
return text == null ?
"" :
( text + "" ).replace( rtrim, "" );
};
// Register as a named AMD module, since jQuery can be concatenated with other
// files that may use define, but not via a proper concatenation script that
// understands anonymous AMD modules. A named AMD is safest and most robust
// way to register. Lowercase jquery is used because AMD module names are
// derived from file names, and jQuery is normally delivered in a lowercase
// file name. Do this after creating the global so that if an AMD module wants
// to call noConflict to hide this version of jQuery, it will work.
// Note that for maximum portability, libraries that are not jQuery should
// declare themselves as anonymous modules, and avoid setting a global if an
// AMD loader is present. jQuery is a special case. For more information, see
// https://github.com/jrburke/requirejs/wiki/Updating-existing-libraries#wiki-anon
if ( typeof define === "function" && define.amd ) {
define( "jquery", [], function() {
return jQuery;
} );
}
var
// Map over jQuery in case of overwrite
_jQuery = window.jQuery,
// Map over the $ in case of overwrite
_$ = window.$;
jQuery.noConflict = function( deep ) {
if ( window.$ === jQuery ) {
window.$ = _$;
}
if ( deep && window.jQuery === jQuery ) {
window.jQuery = _jQuery;
}
return jQuery;
};
// Expose jQuery and $ identifiers, even in AMD
// (#7102#comment:10, https://github.com/jquery/jquery/pull/557)
// and CommonJS for browser emulators (#13566)
if ( typeof noGlobal === "undefined" ) {
window.jQuery = window.$ = jQuery;
}
return jQuery;
} );
|
Django-locallibrary/LocalLibrary/staticfiles/admin/js/vendor/jquery/jquery.23c7c5d2d131.js/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/staticfiles/admin/js/vendor/jquery/jquery.23c7c5d2d131.js",
"repo_id": "Django-locallibrary",
"token_count": 109014
}
| 9 |
"""Contains the Command base classes that depend on PipSession.
The classes in this module are in a separate module so the commands not
needing download / PackageFinder capability don't unnecessarily import the
PackageFinder machinery and all its vendored dependencies, etc.
"""
import logging
import os
from functools import partial
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.cli.command_context import CommandContextMixIn
from pip._internal.exceptions import CommandError, PreviousBuildDirError
from pip._internal.index.collector import LinkCollector
from pip._internal.index.package_finder import PackageFinder
from pip._internal.models.selection_prefs import SelectionPreferences
from pip._internal.network.download import Downloader
from pip._internal.network.session import PipSession
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.constructors import (
install_req_from_editable,
install_req_from_line,
install_req_from_parsed_requirement,
install_req_from_req_string,
)
from pip._internal.req.req_file import parse_requirements
from pip._internal.self_outdated_check import pip_self_version_check
from pip._internal.utils.temp_dir import tempdir_kinds
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Any, List, Optional, Tuple
from pip._internal.cache import WheelCache
from pip._internal.models.target_python import TargetPython
from pip._internal.req.req_install import InstallRequirement
from pip._internal.req.req_tracker import RequirementTracker
from pip._internal.resolution.base import BaseResolver
from pip._internal.utils.temp_dir import (
TempDirectory,
TempDirectoryTypeRegistry,
)
logger = logging.getLogger(__name__)
class SessionCommandMixin(CommandContextMixIn):
"""
A class mixin for command classes needing _build_session().
"""
def __init__(self):
# type: () -> None
super(SessionCommandMixin, self).__init__()
self._session = None # Optional[PipSession]
@classmethod
def _get_index_urls(cls, options):
# type: (Values) -> Optional[List[str]]
"""Return a list of index urls from user-provided options."""
index_urls = []
if not getattr(options, "no_index", False):
url = getattr(options, "index_url", None)
if url:
index_urls.append(url)
urls = getattr(options, "extra_index_urls", None)
if urls:
index_urls.extend(urls)
# Return None rather than an empty list
return index_urls or None
def get_default_session(self, options):
# type: (Values) -> PipSession
"""Get a default-managed session."""
if self._session is None:
self._session = self.enter_context(self._build_session(options))
# there's no type annotation on requests.Session, so it's
# automatically ContextManager[Any] and self._session becomes Any,
# then https://github.com/python/mypy/issues/7696 kicks in
assert self._session is not None
return self._session
def _build_session(self, options, retries=None, timeout=None):
# type: (Values, Optional[int], Optional[int]) -> PipSession
assert not options.cache_dir or os.path.isabs(options.cache_dir)
session = PipSession(
cache=(
os.path.join(options.cache_dir, "http")
if options.cache_dir else None
),
retries=retries if retries is not None else options.retries,
trusted_hosts=options.trusted_hosts,
index_urls=self._get_index_urls(options),
)
# Handle custom ca-bundles from the user
if options.cert:
session.verify = options.cert
# Handle SSL client certificate
if options.client_cert:
session.cert = options.client_cert
# Handle timeouts
if options.timeout or timeout:
session.timeout = (
timeout if timeout is not None else options.timeout
)
# Handle configured proxies
if options.proxy:
session.proxies = {
"http": options.proxy,
"https": options.proxy,
}
# Determine if we can prompt the user for authentication or not
session.auth.prompting = not options.no_input
return session
class IndexGroupCommand(Command, SessionCommandMixin):
"""
Abstract base class for commands with the index_group options.
This also corresponds to the commands that permit the pip version check.
"""
def handle_pip_version_check(self, options):
# type: (Values) -> None
"""
Do the pip version check if not disabled.
This overrides the default behavior of not doing the check.
"""
# Make sure the index_group options are present.
assert hasattr(options, 'no_index')
if options.disable_pip_version_check or options.no_index:
return
# Otherwise, check if we're using the latest version of pip available.
session = self._build_session(
options,
retries=0,
timeout=min(5, options.timeout)
)
with session:
pip_self_version_check(session, options)
KEEPABLE_TEMPDIR_TYPES = [
tempdir_kinds.BUILD_ENV,
tempdir_kinds.EPHEM_WHEEL_CACHE,
tempdir_kinds.REQ_BUILD,
]
def with_cleanup(func):
# type: (Any) -> Any
"""Decorator for common logic related to managing temporary
directories.
"""
def configure_tempdir_registry(registry):
# type: (TempDirectoryTypeRegistry) -> None
for t in KEEPABLE_TEMPDIR_TYPES:
registry.set_delete(t, False)
def wrapper(self, options, args):
# type: (RequirementCommand, Values, List[Any]) -> Optional[int]
assert self.tempdir_registry is not None
if options.no_clean:
configure_tempdir_registry(self.tempdir_registry)
try:
return func(self, options, args)
except PreviousBuildDirError:
# This kind of conflict can occur when the user passes an explicit
# build directory with a pre-existing folder. In that case we do
# not want to accidentally remove it.
configure_tempdir_registry(self.tempdir_registry)
raise
return wrapper
class RequirementCommand(IndexGroupCommand):
def __init__(self, *args, **kw):
# type: (Any, Any) -> None
super(RequirementCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(cmdoptions.no_clean())
@staticmethod
def make_requirement_preparer(
temp_build_dir, # type: TempDirectory
options, # type: Values
req_tracker, # type: RequirementTracker
session, # type: PipSession
finder, # type: PackageFinder
use_user_site, # type: bool
download_dir=None, # type: str
wheel_download_dir=None, # type: str
):
# type: (...) -> RequirementPreparer
"""
Create a RequirementPreparer instance for the given parameters.
"""
downloader = Downloader(session, progress_bar=options.progress_bar)
temp_build_dir_path = temp_build_dir.path
assert temp_build_dir_path is not None
return RequirementPreparer(
build_dir=temp_build_dir_path,
src_dir=options.src_dir,
download_dir=download_dir,
wheel_download_dir=wheel_download_dir,
build_isolation=options.build_isolation,
req_tracker=req_tracker,
downloader=downloader,
finder=finder,
require_hashes=options.require_hashes,
use_user_site=use_user_site,
)
@staticmethod
def make_resolver(
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
options, # type: Values
wheel_cache=None, # type: Optional[WheelCache]
use_user_site=False, # type: bool
ignore_installed=True, # type: bool
ignore_requires_python=False, # type: bool
force_reinstall=False, # type: bool
upgrade_strategy="to-satisfy-only", # type: str
use_pep517=None, # type: Optional[bool]
py_version_info=None # type: Optional[Tuple[int, ...]]
):
# type: (...) -> BaseResolver
"""
Create a Resolver instance for the given parameters.
"""
make_install_req = partial(
install_req_from_req_string,
isolated=options.isolated_mode,
use_pep517=use_pep517,
)
# The long import name and duplicated invocation is needed to convince
# Mypy into correctly typechecking. Otherwise it would complain the
# "Resolver" class being redefined.
if '2020-resolver' in options.features_enabled:
import pip._internal.resolution.resolvelib.resolver
return pip._internal.resolution.resolvelib.resolver.Resolver(
preparer=preparer,
finder=finder,
wheel_cache=wheel_cache,
make_install_req=make_install_req,
use_user_site=use_user_site,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
force_reinstall=force_reinstall,
upgrade_strategy=upgrade_strategy,
py_version_info=py_version_info,
lazy_wheel='fast-deps' in options.features_enabled,
)
import pip._internal.resolution.legacy.resolver
return pip._internal.resolution.legacy.resolver.Resolver(
preparer=preparer,
finder=finder,
wheel_cache=wheel_cache,
make_install_req=make_install_req,
use_user_site=use_user_site,
ignore_dependencies=options.ignore_dependencies,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
force_reinstall=force_reinstall,
upgrade_strategy=upgrade_strategy,
py_version_info=py_version_info,
)
def get_requirements(
self,
args, # type: List[str]
options, # type: Values
finder, # type: PackageFinder
session, # type: PipSession
):
# type: (...) -> List[InstallRequirement]
"""
Parse command-line arguments into the corresponding requirements.
"""
requirements = [] # type: List[InstallRequirement]
for filename in options.constraints:
for parsed_req in parse_requirements(
filename,
constraint=True, finder=finder, options=options,
session=session):
req_to_add = install_req_from_parsed_requirement(
parsed_req,
isolated=options.isolated_mode,
user_supplied=False,
)
requirements.append(req_to_add)
for req in args:
req_to_add = install_req_from_line(
req, None, isolated=options.isolated_mode,
use_pep517=options.use_pep517,
user_supplied=True,
)
requirements.append(req_to_add)
for req in options.editables:
req_to_add = install_req_from_editable(
req,
user_supplied=True,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
)
requirements.append(req_to_add)
# NOTE: options.require_hashes may be set if --require-hashes is True
for filename in options.requirements:
for parsed_req in parse_requirements(
filename,
finder=finder, options=options, session=session):
req_to_add = install_req_from_parsed_requirement(
parsed_req,
isolated=options.isolated_mode,
use_pep517=options.use_pep517,
user_supplied=True,
)
requirements.append(req_to_add)
# If any requirement has hash options, enable hash checking.
if any(req.has_hash_options for req in requirements):
options.require_hashes = True
if not (args or options.editables or options.requirements):
opts = {'name': self.name}
if options.find_links:
raise CommandError(
'You must give at least one requirement to {name} '
'(maybe you meant "pip {name} {links}"?)'.format(
**dict(opts, links=' '.join(options.find_links))))
else:
raise CommandError(
'You must give at least one requirement to {name} '
'(see "pip help {name}")'.format(**opts))
return requirements
@staticmethod
def trace_basic_info(finder):
# type: (PackageFinder) -> None
"""
Trace basic information about the provided objects.
"""
# Display where finder is looking for packages
search_scope = finder.search_scope
locations = search_scope.get_formatted_locations()
if locations:
logger.info(locations)
def _build_package_finder(
self,
options, # type: Values
session, # type: PipSession
target_python=None, # type: Optional[TargetPython]
ignore_requires_python=None, # type: Optional[bool]
):
# type: (...) -> PackageFinder
"""
Create a package finder appropriate to this requirement command.
:param ignore_requires_python: Whether to ignore incompatible
"Requires-Python" values in links. Defaults to False.
"""
link_collector = LinkCollector.create(session, options=options)
selection_prefs = SelectionPreferences(
allow_yanked=True,
format_control=options.format_control,
allow_all_prereleases=options.pre,
prefer_binary=options.prefer_binary,
ignore_requires_python=ignore_requires_python,
)
return PackageFinder.create(
link_collector=link_collector,
selection_prefs=selection_prefs,
target_python=target_python,
)
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/req_command.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/req_command.py",
"repo_id": "Django-locallibrary",
"token_count": 6748
}
| 10 |
from __future__ import absolute_import
import logging
import os
from email.parser import FeedParser
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.utils.misc import write_output
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import List, Dict, Iterator
logger = logging.getLogger(__name__)
class ShowCommand(Command):
"""
Show information about one or more installed packages.
The output is in RFC-compliant mail header format.
"""
usage = """
%prog [options] <package> ..."""
ignore_require_venv = True
def add_options(self):
# type: () -> None
self.cmd_opts.add_option(
'-f', '--files',
dest='files',
action='store_true',
default=False,
help='Show the full list of installed files for each package.')
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
# type: (Values, List[str]) -> int
if not args:
logger.warning('ERROR: Please provide a package name or names.')
return ERROR
query = args
results = search_packages_info(query)
if not print_results(
results, list_files=options.files, verbose=options.verbose):
return ERROR
return SUCCESS
def search_packages_info(query):
# type: (List[str]) -> Iterator[Dict[str, str]]
"""
Gather details from installed distributions. Print distribution name,
version, location, and installed files. Installed files requires a
pip generated 'installed-files.txt' in the distributions '.egg-info'
directory.
"""
installed = {}
for p in pkg_resources.working_set:
installed[canonicalize_name(p.project_name)] = p
query_names = [canonicalize_name(name) for name in query]
missing = sorted(
[name for name, pkg in zip(query, query_names) if pkg not in installed]
)
if missing:
logger.warning('Package(s) not found: %s', ', '.join(missing))
def get_requiring_packages(package_name):
# type: (str) -> List[str]
canonical_name = canonicalize_name(package_name)
return [
pkg.project_name for pkg in pkg_resources.working_set
if canonical_name in
[canonicalize_name(required.name) for required in
pkg.requires()]
]
for dist in [installed[pkg] for pkg in query_names if pkg in installed]:
package = {
'name': dist.project_name,
'version': dist.version,
'location': dist.location,
'requires': [dep.project_name for dep in dist.requires()],
'required_by': get_requiring_packages(dist.project_name)
}
file_list = None
metadata = ''
if isinstance(dist, pkg_resources.DistInfoDistribution):
# RECORDs should be part of .dist-info metadatas
if dist.has_metadata('RECORD'):
lines = dist.get_metadata_lines('RECORD')
paths = [line.split(',')[0] for line in lines]
paths = [os.path.join(dist.location, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('METADATA'):
metadata = dist.get_metadata('METADATA')
else:
# Otherwise use pip's log for .egg-info's
if dist.has_metadata('installed-files.txt'):
paths = dist.get_metadata_lines('installed-files.txt')
paths = [os.path.join(dist.egg_info, p) for p in paths]
file_list = [os.path.relpath(p, dist.location) for p in paths]
if dist.has_metadata('PKG-INFO'):
metadata = dist.get_metadata('PKG-INFO')
if dist.has_metadata('entry_points.txt'):
entry_points = dist.get_metadata_lines('entry_points.txt')
package['entry_points'] = entry_points
if dist.has_metadata('INSTALLER'):
for line in dist.get_metadata_lines('INSTALLER'):
if line.strip():
package['installer'] = line.strip()
break
# @todo: Should pkg_resources.Distribution have a
# `get_pkg_info` method?
feed_parser = FeedParser()
feed_parser.feed(metadata)
pkg_info_dict = feed_parser.close()
for key in ('metadata-version', 'summary',
'home-page', 'author', 'author-email', 'license'):
package[key] = pkg_info_dict.get(key)
# It looks like FeedParser cannot deal with repeated headers
classifiers = []
for line in metadata.splitlines():
if line.startswith('Classifier: '):
classifiers.append(line[len('Classifier: '):])
package['classifiers'] = classifiers
if file_list:
package['files'] = sorted(file_list)
yield package
def print_results(distributions, list_files=False, verbose=False):
# type: (Iterator[Dict[str, str]], bool, bool) -> bool
"""
Print the information from installed distributions found.
"""
results_printed = False
for i, dist in enumerate(distributions):
results_printed = True
if i > 0:
write_output("---")
write_output("Name: %s", dist.get('name', ''))
write_output("Version: %s", dist.get('version', ''))
write_output("Summary: %s", dist.get('summary', ''))
write_output("Home-page: %s", dist.get('home-page', ''))
write_output("Author: %s", dist.get('author', ''))
write_output("Author-email: %s", dist.get('author-email', ''))
write_output("License: %s", dist.get('license', ''))
write_output("Location: %s", dist.get('location', ''))
write_output("Requires: %s", ', '.join(dist.get('requires', [])))
write_output("Required-by: %s", ', '.join(dist.get('required_by', [])))
if verbose:
write_output("Metadata-Version: %s",
dist.get('metadata-version', ''))
write_output("Installer: %s", dist.get('installer', ''))
write_output("Classifiers:")
for classifier in dist.get('classifiers', []):
write_output(" %s", classifier)
write_output("Entry-points:")
for entry in dist.get('entry_points', []):
write_output(" %s", entry.strip())
if list_files:
write_output("Files:")
for line in dist.get('files', []):
write_output(" %s", line.strip())
if "files" not in dist:
write_output("Cannot locate installed-files.txt")
return results_printed
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/show.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/show.py",
"repo_id": "Django-locallibrary",
"token_count": 3034
}
| 11 |
"""Backing implementation for InstallRequirement's various constructors
The idea here is that these formed a major chunk of InstallRequirement's size
so, moving them and support code dedicated to them outside of that class
helps creates for better understandability for the rest of the code.
These are meant to be used elsewhere within pip to create instances of
InstallRequirement.
"""
import logging
import os
import re
from pip._vendor.packaging.markers import Marker
from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
from pip._vendor.packaging.specifiers import Specifier
from pip._vendor.pkg_resources import RequirementParseError, parse_requirements
from pip._internal.exceptions import InstallationError
from pip._internal.models.index import PyPI, TestPyPI
from pip._internal.models.link import Link
from pip._internal.models.wheel import Wheel
from pip._internal.pyproject import make_pyproject_path
from pip._internal.req.req_install import InstallRequirement
from pip._internal.utils.deprecation import deprecated
from pip._internal.utils.filetypes import ARCHIVE_EXTENSIONS
from pip._internal.utils.misc import is_installable_dir, splitext
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs import is_url, vcs
if MYPY_CHECK_RUNNING:
from typing import (
Any, Dict, Optional, Set, Tuple, Union,
)
from pip._internal.req.req_file import ParsedRequirement
__all__ = [
"install_req_from_editable", "install_req_from_line",
"parse_editable"
]
logger = logging.getLogger(__name__)
operators = Specifier._operators.keys()
def is_archive_file(name):
# type: (str) -> bool
"""Return True if `name` is a considered as an archive file."""
ext = splitext(name)[1].lower()
if ext in ARCHIVE_EXTENSIONS:
return True
return False
def _strip_extras(path):
# type: (str) -> Tuple[str, Optional[str]]
m = re.match(r'^(.+)(\[[^\]]+\])$', path)
extras = None
if m:
path_no_extras = m.group(1)
extras = m.group(2)
else:
path_no_extras = path
return path_no_extras, extras
def convert_extras(extras):
# type: (Optional[str]) -> Set[str]
if not extras:
return set()
return Requirement("placeholder" + extras.lower()).extras
def parse_editable(editable_req):
# type: (str) -> Tuple[Optional[str], str, Set[str]]
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
# If a file path is specified with extras, strip off the extras.
url_no_extras, extras = _strip_extras(url)
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
msg = (
'File "setup.py" not found. Directory cannot be installed '
'in editable mode: {}'.format(os.path.abspath(url_no_extras))
)
pyproject_path = make_pyproject_path(url_no_extras)
if os.path.isfile(pyproject_path):
msg += (
'\n(A "pyproject.toml" file was found, but editable '
'mode currently requires a setup.py based build.)'
)
raise InstallationError(msg)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
Requirement("placeholder" + extras.lower()).extras,
)
else:
return package_name, url_no_extras, set()
for version_control in vcs:
if url.lower().startswith('{}:'.format(version_control)):
url = '{}+{}'.format(version_control, url)
break
if '+' not in url:
raise InstallationError(
'{} is not a valid editable requirement. '
'It should either be a path to a local project or a VCS URL '
'(beginning with svn+, git+, hg+, or bzr+).'.format(editable_req)
)
vc_type = url.split('+', 1)[0].lower()
if not vcs.get_backend(vc_type):
backends = ", ".join([bends.name + '+URL' for bends in vcs.backends])
error_message = "For --editable={}, " \
"only {} are currently supported".format(
editable_req, backends)
raise InstallationError(error_message)
package_name = Link(url).egg_fragment
if not package_name:
raise InstallationError(
"Could not detect requirement name for '{}', please specify one "
"with #egg=your_package_name".format(editable_req)
)
return package_name, url, set()
def deduce_helpful_msg(req):
# type: (str) -> str
"""Returns helpful msg in case requirements file does not exist,
or cannot be parsed.
:params req: Requirements file path
"""
msg = ""
if os.path.exists(req):
msg = " It does exist."
# Try to parse and check if it is a requirements file.
try:
with open(req, 'r') as fp:
# parse first line only
next(parse_requirements(fp.read()))
msg += (
"The argument you provided "
"({}) appears to be a"
" requirements file. If that is the"
" case, use the '-r' flag to install"
" the packages specified within it."
).format(req)
except RequirementParseError:
logger.debug(
"Cannot parse '%s' as requirements file", req, exc_info=True
)
else:
msg += " File '{}' does not exist.".format(req)
return msg
class RequirementParts(object):
def __init__(
self,
requirement, # type: Optional[Requirement]
link, # type: Optional[Link]
markers, # type: Optional[Marker]
extras, # type: Set[str]
):
self.requirement = requirement
self.link = link
self.markers = markers
self.extras = extras
def parse_req_from_editable(editable_req):
# type: (str) -> RequirementParts
name, url, extras_override = parse_editable(editable_req)
if name is not None:
try:
req = Requirement(name)
except InvalidRequirement:
raise InstallationError("Invalid requirement: '{}'".format(name))
else:
req = None
link = Link(url)
return RequirementParts(req, link, None, extras_override)
# ---- The actual constructors follow ----
def install_req_from_editable(
editable_req, # type: str
comes_from=None, # type: Optional[Union[InstallRequirement, str]]
use_pep517=None, # type: Optional[bool]
isolated=False, # type: bool
options=None, # type: Optional[Dict[str, Any]]
constraint=False, # type: bool
user_supplied=False, # type: bool
):
# type: (...) -> InstallRequirement
parts = parse_req_from_editable(editable_req)
return InstallRequirement(
parts.requirement,
comes_from=comes_from,
user_supplied=user_supplied,
editable=True,
link=parts.link,
constraint=constraint,
use_pep517=use_pep517,
isolated=isolated,
install_options=options.get("install_options", []) if options else [],
global_options=options.get("global_options", []) if options else [],
hash_options=options.get("hashes", {}) if options else {},
extras=parts.extras,
)
def _looks_like_path(name):
# type: (str) -> bool
"""Checks whether the string "looks like" a path on the filesystem.
This does not check whether the target actually exists, only judge from the
appearance.
Returns true if any of the following conditions is true:
* a path separator is found (either os.path.sep or os.path.altsep);
* a dot is found (which represents the current directory).
"""
if os.path.sep in name:
return True
if os.path.altsep is not None and os.path.altsep in name:
return True
if name.startswith("."):
return True
return False
def _get_url_from_path(path, name):
# type: (str, str) -> Optional[str]
"""
First, it checks whether a provided path is an installable directory
(e.g. it has a setup.py). If it is, returns the path.
If false, check if the path is an archive file (such as a .whl).
The function checks if the path is a file. If false, if the path has
an @, it will treat it as a PEP 440 URL requirement and return the path.
"""
if _looks_like_path(name) and os.path.isdir(path):
if is_installable_dir(path):
return path_to_url(path)
raise InstallationError(
"Directory {name!r} is not installable. Neither 'setup.py' "
"nor 'pyproject.toml' found.".format(**locals())
)
if not is_archive_file(path):
return None
if os.path.isfile(path):
return path_to_url(path)
urlreq_parts = name.split('@', 1)
if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
# If the path contains '@' and the part before it does not look
# like a path, try to treat it as a PEP 440 URL req instead.
return None
logger.warning(
'Requirement %r looks like a filename, but the '
'file does not exist',
name
)
return path_to_url(path)
def parse_req_from_line(name, line_source):
# type: (str, Optional[str]) -> RequirementParts
if is_url(name):
marker_sep = '; '
else:
marker_sep = ';'
if marker_sep in name:
name, markers_as_string = name.split(marker_sep, 1)
markers_as_string = markers_as_string.strip()
if not markers_as_string:
markers = None
else:
markers = Marker(markers_as_string)
else:
markers = None
name = name.strip()
req_as_string = None
path = os.path.normpath(os.path.abspath(name))
link = None
extras_as_string = None
if is_url(name):
link = Link(name)
else:
p, extras_as_string = _strip_extras(path)
url = _get_url_from_path(p, name)
if url is not None:
link = Link(url)
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == 'file' and re.search(r'\.\./', link.url):
link = Link(
path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
wheel = Wheel(link.filename) # can raise InvalidWheelFilename
req_as_string = "{wheel.name}=={wheel.version}".format(**locals())
else:
# set the req to the egg fragment. when it's not there, this
# will become an 'unnamed' requirement
req_as_string = link.egg_fragment
# a requirement specifier
else:
req_as_string = name
extras = convert_extras(extras_as_string)
def with_source(text):
# type: (str) -> str
if not line_source:
return text
return '{} (from {})'.format(text, line_source)
if req_as_string is not None:
try:
req = Requirement(req_as_string)
except InvalidRequirement:
if os.path.sep in req_as_string:
add_msg = "It looks like a path."
add_msg += deduce_helpful_msg(req_as_string)
elif ('=' in req_as_string and
not any(op in req_as_string for op in operators)):
add_msg = "= is not a valid operator. Did you mean == ?"
else:
add_msg = ''
msg = with_source(
'Invalid requirement: {!r}'.format(req_as_string)
)
if add_msg:
msg += '\nHint: {}'.format(add_msg)
raise InstallationError(msg)
else:
# Deprecate extras after specifiers: "name>=1.0[extras]"
# This currently works by accident because _strip_extras() parses
# any extras in the end of the string and those are saved in
# RequirementParts
for spec in req.specifier:
spec_str = str(spec)
if spec_str.endswith(']'):
msg = "Extras after version '{}'.".format(spec_str)
replace = "moving the extras before version specifiers"
deprecated(msg, replacement=replace, gone_in="21.0")
else:
req = None
return RequirementParts(req, link, markers, extras)
def install_req_from_line(
name, # type: str
comes_from=None, # type: Optional[Union[str, InstallRequirement]]
use_pep517=None, # type: Optional[bool]
isolated=False, # type: bool
options=None, # type: Optional[Dict[str, Any]]
constraint=False, # type: bool
line_source=None, # type: Optional[str]
user_supplied=False, # type: bool
):
# type: (...) -> InstallRequirement
"""Creates an InstallRequirement from a name, which might be a
requirement, directory containing 'setup.py', filename, or URL.
:param line_source: An optional string describing where the line is from,
for logging purposes in case of an error.
"""
parts = parse_req_from_line(name, line_source)
return InstallRequirement(
parts.requirement, comes_from, link=parts.link, markers=parts.markers,
use_pep517=use_pep517, isolated=isolated,
install_options=options.get("install_options", []) if options else [],
global_options=options.get("global_options", []) if options else [],
hash_options=options.get("hashes", {}) if options else {},
constraint=constraint,
extras=parts.extras,
user_supplied=user_supplied,
)
def install_req_from_req_string(
req_string, # type: str
comes_from=None, # type: Optional[InstallRequirement]
isolated=False, # type: bool
use_pep517=None, # type: Optional[bool]
user_supplied=False, # type: bool
):
# type: (...) -> InstallRequirement
try:
req = Requirement(req_string)
except InvalidRequirement:
raise InstallationError("Invalid requirement: '{}'".format(req_string))
domains_not_allowed = [
PyPI.file_storage_domain,
TestPyPI.file_storage_domain,
]
if (req.url and comes_from and comes_from.link and
comes_from.link.netloc in domains_not_allowed):
# Explicitly disallow pypi packages that depend on external urls
raise InstallationError(
"Packages installed from PyPI cannot depend on packages "
"which are not also hosted on PyPI.\n"
"{} depends on {} ".format(comes_from.name, req)
)
return InstallRequirement(
req,
comes_from,
isolated=isolated,
use_pep517=use_pep517,
user_supplied=user_supplied,
)
def install_req_from_parsed_requirement(
parsed_req, # type: ParsedRequirement
isolated=False, # type: bool
use_pep517=None, # type: Optional[bool]
user_supplied=False, # type: bool
):
# type: (...) -> InstallRequirement
if parsed_req.is_editable:
req = install_req_from_editable(
parsed_req.requirement,
comes_from=parsed_req.comes_from,
use_pep517=use_pep517,
constraint=parsed_req.constraint,
isolated=isolated,
user_supplied=user_supplied,
)
else:
req = install_req_from_line(
parsed_req.requirement,
comes_from=parsed_req.comes_from,
use_pep517=use_pep517,
isolated=isolated,
options=parsed_req.options,
constraint=parsed_req.constraint,
line_source=parsed_req.line_source,
user_supplied=user_supplied,
)
return req
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/req/constructors.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/req/constructors.py",
"repo_id": "Django-locallibrary",
"token_count": 7091
}
| 12 |
import logging
from pip._internal.models.direct_url import (
DIRECT_URL_METADATA_NAME,
ArchiveInfo,
DirectUrl,
DirectUrlValidationError,
DirInfo,
VcsInfo,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.vcs import vcs
try:
from json import JSONDecodeError
except ImportError:
# PY2
JSONDecodeError = ValueError # type: ignore
if MYPY_CHECK_RUNNING:
from typing import Optional
from pip._internal.models.link import Link
from pip._vendor.pkg_resources import Distribution
logger = logging.getLogger(__name__)
def direct_url_as_pep440_direct_reference(direct_url, name):
# type: (DirectUrl, str) -> str
"""Convert a DirectUrl to a pip requirement string."""
direct_url.validate() # if invalid, this is a pip bug
requirement = name + " @ "
fragments = []
if isinstance(direct_url.info, VcsInfo):
requirement += "{}+{}@{}".format(
direct_url.info.vcs, direct_url.url, direct_url.info.commit_id
)
elif isinstance(direct_url.info, ArchiveInfo):
requirement += direct_url.url
if direct_url.info.hash:
fragments.append(direct_url.info.hash)
else:
assert isinstance(direct_url.info, DirInfo)
# pip should never reach this point for editables, since
# pip freeze inspects the editable project location to produce
# the requirement string
assert not direct_url.info.editable
requirement += direct_url.url
if direct_url.subdirectory:
fragments.append("subdirectory=" + direct_url.subdirectory)
if fragments:
requirement += "#" + "&".join(fragments)
return requirement
def direct_url_from_link(link, source_dir=None, link_is_in_wheel_cache=False):
# type: (Link, Optional[str], bool) -> DirectUrl
if link.is_vcs:
vcs_backend = vcs.get_backend_for_scheme(link.scheme)
assert vcs_backend
url, requested_revision, _ = (
vcs_backend.get_url_rev_and_auth(link.url_without_fragment)
)
# For VCS links, we need to find out and add commit_id.
if link_is_in_wheel_cache:
# If the requested VCS link corresponds to a cached
# wheel, it means the requested revision was an
# immutable commit hash, otherwise it would not have
# been cached. In that case we don't have a source_dir
# with the VCS checkout.
assert requested_revision
commit_id = requested_revision
else:
# If the wheel was not in cache, it means we have
# had to checkout from VCS to build and we have a source_dir
# which we can inspect to find out the commit id.
assert source_dir
commit_id = vcs_backend.get_revision(source_dir)
return DirectUrl(
url=url,
info=VcsInfo(
vcs=vcs_backend.name,
commit_id=commit_id,
requested_revision=requested_revision,
),
subdirectory=link.subdirectory_fragment,
)
elif link.is_existing_dir():
return DirectUrl(
url=link.url_without_fragment,
info=DirInfo(),
subdirectory=link.subdirectory_fragment,
)
else:
hash = None
hash_name = link.hash_name
if hash_name:
hash = "{}={}".format(hash_name, link.hash)
return DirectUrl(
url=link.url_without_fragment,
info=ArchiveInfo(hash=hash),
subdirectory=link.subdirectory_fragment,
)
def dist_get_direct_url(dist):
# type: (Distribution) -> Optional[DirectUrl]
"""Obtain a DirectUrl from a pkg_resource.Distribution.
Returns None if the distribution has no `direct_url.json` metadata,
or if `direct_url.json` is invalid.
"""
if not dist.has_metadata(DIRECT_URL_METADATA_NAME):
return None
try:
return DirectUrl.from_json(dist.get_metadata(DIRECT_URL_METADATA_NAME))
except (
DirectUrlValidationError,
JSONDecodeError,
UnicodeDecodeError
) as e:
logger.warning(
"Error parsing %s for %s: %s",
DIRECT_URL_METADATA_NAME,
dist.project_name,
e,
)
return None
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/direct_url_helpers.py",
"repo_id": "Django-locallibrary",
"token_count": 1909
}
| 13 |
from __future__ import absolute_import
import logging
import os
import subprocess
from pip._vendor.six.moves import shlex_quote
from pip._internal.cli.spinners import SpinnerInterface, open_spinner
from pip._internal.exceptions import InstallationError
from pip._internal.utils.compat import console_to_str, str_to_display
from pip._internal.utils.logging import subprocess_logger
from pip._internal.utils.misc import HiddenText, path_to_display
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import (
Any, Callable, Iterable, List, Mapping, Optional, Text, Union,
)
CommandArgs = List[Union[str, HiddenText]]
LOG_DIVIDER = '----------------------------------------'
def make_command(*args):
# type: (Union[str, HiddenText, CommandArgs]) -> CommandArgs
"""
Create a CommandArgs object.
"""
command_args = [] # type: CommandArgs
for arg in args:
# Check for list instead of CommandArgs since CommandArgs is
# only known during type-checking.
if isinstance(arg, list):
command_args.extend(arg)
else:
# Otherwise, arg is str or HiddenText.
command_args.append(arg)
return command_args
def format_command_args(args):
# type: (Union[List[str], CommandArgs]) -> str
"""
Format command arguments for display.
"""
# For HiddenText arguments, display the redacted form by calling str().
# Also, we don't apply str() to arguments that aren't HiddenText since
# this can trigger a UnicodeDecodeError in Python 2 if the argument
# has type unicode and includes a non-ascii character. (The type
# checker doesn't ensure the annotations are correct in all cases.)
return ' '.join(
shlex_quote(str(arg)) if isinstance(arg, HiddenText)
else shlex_quote(arg) for arg in args
)
def reveal_command_args(args):
# type: (Union[List[str], CommandArgs]) -> List[str]
"""
Return the arguments in their raw, unredacted form.
"""
return [
arg.secret if isinstance(arg, HiddenText) else arg for arg in args
]
def make_subprocess_output_error(
cmd_args, # type: Union[List[str], CommandArgs]
cwd, # type: Optional[str]
lines, # type: List[Text]
exit_status, # type: int
):
# type: (...) -> Text
"""
Create and return the error message to use to log a subprocess error
with command output.
:param lines: A list of lines, each ending with a newline.
"""
command = format_command_args(cmd_args)
# Convert `command` and `cwd` to text (unicode in Python 2) so we can use
# them as arguments in the unicode format string below. This avoids
# "UnicodeDecodeError: 'ascii' codec can't decode byte ..." in Python 2
# if either contains a non-ascii character.
command_display = str_to_display(command, desc='command bytes')
cwd_display = path_to_display(cwd)
# We know the joined output value ends in a newline.
output = ''.join(lines)
msg = (
# Use a unicode string to avoid "UnicodeEncodeError: 'ascii'
# codec can't encode character ..." in Python 2 when a format
# argument (e.g. `output`) has a non-ascii character.
u'Command errored out with exit status {exit_status}:\n'
' command: {command_display}\n'
' cwd: {cwd_display}\n'
'Complete output ({line_count} lines):\n{output}{divider}'
).format(
exit_status=exit_status,
command_display=command_display,
cwd_display=cwd_display,
line_count=len(lines),
output=output,
divider=LOG_DIVIDER,
)
return msg
def call_subprocess(
cmd, # type: Union[List[str], CommandArgs]
show_stdout=False, # type: bool
cwd=None, # type: Optional[str]
on_returncode='raise', # type: str
extra_ok_returncodes=None, # type: Optional[Iterable[int]]
command_desc=None, # type: Optional[str]
extra_environ=None, # type: Optional[Mapping[str, Any]]
unset_environ=None, # type: Optional[Iterable[str]]
spinner=None, # type: Optional[SpinnerInterface]
log_failed_cmd=True # type: Optional[bool]
):
# type: (...) -> Text
"""
Args:
show_stdout: if true, use INFO to log the subprocess's stderr and
stdout streams. Otherwise, use DEBUG. Defaults to False.
extra_ok_returncodes: an iterable of integer return codes that are
acceptable, in addition to 0. Defaults to None, which means [].
unset_environ: an iterable of environment variable names to unset
prior to calling subprocess.Popen().
log_failed_cmd: if false, failed commands are not logged, only raised.
"""
if extra_ok_returncodes is None:
extra_ok_returncodes = []
if unset_environ is None:
unset_environ = []
# Most places in pip use show_stdout=False. What this means is--
#
# - We connect the child's output (combined stderr and stdout) to a
# single pipe, which we read.
# - We log this output to stderr at DEBUG level as it is received.
# - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't
# requested), then we show a spinner so the user can still see the
# subprocess is in progress.
# - If the subprocess exits with an error, we log the output to stderr
# at ERROR level if it hasn't already been displayed to the console
# (e.g. if --verbose logging wasn't enabled). This way we don't log
# the output to the console twice.
#
# If show_stdout=True, then the above is still done, but with DEBUG
# replaced by INFO.
if show_stdout:
# Then log the subprocess output at INFO level.
log_subprocess = subprocess_logger.info
used_level = logging.INFO
else:
# Then log the subprocess output using DEBUG. This also ensures
# it will be logged to the log file (aka user_log), if enabled.
log_subprocess = subprocess_logger.debug
used_level = logging.DEBUG
# Whether the subprocess will be visible in the console.
showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level
# Only use the spinner if we're not showing the subprocess output
# and we have a spinner.
use_spinner = not showing_subprocess and spinner is not None
if command_desc is None:
command_desc = format_command_args(cmd)
log_subprocess("Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
for name in unset_environ:
env.pop(name, None)
try:
proc = subprocess.Popen(
# Convert HiddenText objects to the underlying str.
reveal_command_args(cmd),
stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, cwd=cwd, env=env,
)
assert proc.stdin
assert proc.stdout
proc.stdin.close()
except Exception as exc:
if log_failed_cmd:
subprocess_logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
while True:
# The "line" value is a unicode string in Python 2.
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
# Show the line immediately.
log_subprocess(line)
# Update the spinner.
if use_spinner:
assert spinner
spinner.spin()
try:
proc.wait()
finally:
if proc.stdout:
proc.stdout.close()
proc_had_error = (
proc.returncode and proc.returncode not in extra_ok_returncodes
)
if use_spinner:
assert spinner
if proc_had_error:
spinner.finish("error")
else:
spinner.finish("done")
if proc_had_error:
if on_returncode == 'raise':
if not showing_subprocess and log_failed_cmd:
# Then the subprocess streams haven't been logged to the
# console yet.
msg = make_subprocess_output_error(
cmd_args=cmd,
cwd=cwd,
lines=all_output,
exit_status=proc.returncode,
)
subprocess_logger.error(msg)
exc_msg = (
'Command errored out with exit status {}: {} '
'Check the logs for full command output.'
).format(proc.returncode, command_desc)
raise InstallationError(exc_msg)
elif on_returncode == 'warn':
subprocess_logger.warning(
'Command "%s" had error code %s in %s',
command_desc,
proc.returncode,
cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode={!r}'.format(
on_returncode))
return ''.join(all_output)
def runner_with_spinner_message(message):
# type: (str) -> Callable[..., None]
"""Provide a subprocess_runner that shows a spinner message.
Intended for use with for pep517's Pep517HookCaller. Thus, the runner has
an API that matches what's expected by Pep517HookCaller.subprocess_runner.
"""
def runner(
cmd, # type: List[str]
cwd=None, # type: Optional[str]
extra_environ=None # type: Optional[Mapping[str, Any]]
):
# type: (...) -> None
with open_spinner(message) as spinner:
call_subprocess(
cmd,
cwd=cwd,
extra_environ=extra_environ,
spinner=spinner,
)
return runner
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/subprocess.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/subprocess.py",
"repo_id": "Django-locallibrary",
"token_count": 4104
}
| 14 |
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
from pip._vendor.six.moves import configparser
from pip._internal.exceptions import BadCommand, SubProcessError
from pip._internal.utils.misc import display_path
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.temp_dir import TempDirectory
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url
from pip._internal.vcs.versioncontrol import (
VersionControl,
find_path_to_setup_from_repo_root,
vcs,
)
if MYPY_CHECK_RUNNING:
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import RevOptions
logger = logging.getLogger(__name__)
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = (
'hg', 'hg+file', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http',
)
@staticmethod
def get_base_rev_args(rev):
return [rev]
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the Hg repository at the url to the destination location"""
with TempDirectory(kind="export") as temp_dir:
self.unpack(temp_dir.path, url=url)
self.run_command(
['archive', location], cwd=temp_dir.path
)
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info(
'Cloning hg %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(make_command('clone', '--noupdate', '-q', url, dest))
self.run_command(
make_command('update', '-q', rev_options.to_args()),
cwd=dest,
)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = configparser.RawConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url.secret)
with open(repo_config, 'w') as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning(
'Could not switch Mercurial repository to %s: %s', url, exc,
)
else:
cmd_args = make_command('update', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
self.run_command(['pull', '-q'], cwd=dest)
cmd_args = make_command('update', '-q', rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
@classmethod
def get_remote_url(cls, location):
url = cls.run_command(
['showconfig', 'paths.default'],
cwd=location).strip()
if cls._is_local_repository(url):
url = path_to_url(url)
return url.strip()
@classmethod
def get_revision(cls, location):
"""
Return the repository-local changeset revision number, as an integer.
"""
current_revision = cls.run_command(
['parents', '--template={rev}'], cwd=location).strip()
return current_revision
@classmethod
def get_requirement_revision(cls, location):
"""
Return the changeset identification hash, as a 40-character
hexadecimal string
"""
current_rev_hash = cls.run_command(
['parents', '--template={node}'],
cwd=location).strip()
return current_rev_hash
@classmethod
def is_commit_id_equal(cls, dest, name):
"""Always assume the versions don't match"""
return False
@classmethod
def get_subdirectory(cls, location):
"""
Return the path to setup.py, relative to the repo root.
Return None if setup.py is in the repo root.
"""
# find the repo root
repo_root = cls.run_command(
['root'], cwd=location).strip()
if not os.path.isabs(repo_root):
repo_root = os.path.abspath(os.path.join(location, repo_root))
return find_path_to_setup_from_repo_root(location, repo_root)
@classmethod
def get_repository_root(cls, location):
loc = super(Mercurial, cls).get_repository_root(location)
if loc:
return loc
try:
r = cls.run_command(
['root'],
cwd=location,
log_failed_cmd=False,
)
except BadCommand:
logger.debug("could not determine if %s is under hg control "
"because hg is not available", location)
return None
except SubProcessError:
return None
return os.path.normpath(r.rstrip('\r\n'))
vcs.register(Mercurial)
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/vcs/mercurial.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/vcs/mercurial.py",
"repo_id": "Django-locallibrary",
"token_count": 2328
}
| 15 |
import hashlib
import os
from textwrap import dedent
from ..cache import BaseCache
from ..controller import CacheController
try:
FileNotFoundError
except NameError:
# py2.X
FileNotFoundError = (IOError, OSError)
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(
self,
directory,
forever=False,
filemode=0o0600,
dirmode=0o0700,
use_dir_lock=None,
lock_class=None,
):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
try:
from lockfile import LockFile
from lockfile.mkdirlockfile import MkdirLockFile
except ImportError:
notice = dedent(
"""
NOTE: In order to use the FileCache you must have
lockfile installed. You can install it via pip:
pip install lockfile
"""
)
raise ImportError(notice)
else:
if use_dir_lock:
lock_class = MkdirLockFile
elif lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
try:
with open(name, "rb") as fh:
return fh.read()
except FileNotFoundError:
return None
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
try:
os.remove(name)
except FileNotFoundError:
pass
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py",
"repo_id": "Django-locallibrary",
"token_count": 1834
}
| 16 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import logging
from .enums import MachineState
class CodingStateMachine(object):
"""
A state machine to verify a byte sequence for a particular encoding. For
each byte the detector receives, it will feed that byte to every active
state machine available, one byte at a time. The state machine changes its
state based on its previous state and the byte it receives. There are 3
states in a state machine that are of interest to an auto-detector:
START state: This is the state to start with, or a legal byte sequence
(i.e. a valid code point) for character has been identified.
ME state: This indicates that the state machine identified a byte sequence
that is specific to the charset it is designed for and that
there is no other possible encoding which can contain this byte
sequence. This will to lead to an immediate positive answer for
the detector.
ERROR state: This indicates the state machine identified an illegal byte
sequence for that encoding. This will lead to an immediate
negative answer for this encoding. Detector will exclude this
encoding from consideration from here on.
"""
def __init__(self, sm):
self._model = sm
self._curr_byte_pos = 0
self._curr_char_len = 0
self._curr_state = None
self.logger = logging.getLogger(__name__)
self.reset()
def reset(self):
self._curr_state = MachineState.START
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
byte_class = self._model['class_table'][c]
if self._curr_state == MachineState.START:
self._curr_byte_pos = 0
self._curr_char_len = self._model['char_len_table'][byte_class]
# from byte's class and state_table, we get its next state
curr_state = (self._curr_state * self._model['class_factor']
+ byte_class)
self._curr_state = self._model['state_table'][curr_state]
self._curr_byte_pos += 1
return self._curr_state
def get_current_charlen(self):
return self._curr_char_len
def get_coding_state_machine(self):
return self._model['name']
@property
def language(self):
return self._model['language']
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/codingstatemachine.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/codingstatemachine.py",
"repo_id": "Django-locallibrary",
"token_count": 1208
}
| 17 |
"""
This module exists only to simplify retrieving the version number of chardet
from within setup.py and from chardet subpackages.
:author: Dan Blanchard (dan.blanchard@gmail.com)
"""
__version__ = "3.0.4"
VERSION = __version__.split('.')
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/version.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/chardet/version.py",
"repo_id": "Django-locallibrary",
"token_count": 77
}
| 18 |
# -*- coding: utf-8 -*-
"""
requests._internal_utils
~~~~~~~~~~~~~~
Provides utility functions that are consumed internally by Requests
which depend on extremely few external helpers (such as compat)
"""
from .compat import is_py2, builtin_str, str
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/_internal_utils.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/requests/_internal_utils.py",
"repo_id": "Django-locallibrary",
"token_count": 402
}
| 19 |
__all__ = [
"__version__",
"AbstractProvider",
"AbstractResolver",
"BaseReporter",
"InconsistentCandidate",
"Resolver",
"RequirementsConflicted",
"ResolutionError",
"ResolutionImpossible",
"ResolutionTooDeep",
]
__version__ = "0.4.0"
from .providers import AbstractProvider, AbstractResolver
from .reporters import BaseReporter
from .resolvers import (
InconsistentCandidate,
RequirementsConflicted,
Resolver,
ResolutionError,
ResolutionImpossible,
ResolutionTooDeep,
)
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/resolvelib/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/resolvelib/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 194
}
| 20 |
"""Python module which parses and emits TOML.
Released under the MIT license.
"""
from pip._vendor.toml import encoder
from pip._vendor.toml import decoder
__version__ = "0.10.1"
_spec_ = "0.5.0"
load = decoder.load
loads = decoder.loads
TomlDecoder = decoder.TomlDecoder
TomlDecodeError = decoder.TomlDecodeError
TomlPreserveCommentDecoder = decoder.TomlPreserveCommentDecoder
dump = encoder.dump
dumps = encoder.dumps
TomlEncoder = encoder.TomlEncoder
TomlArraySeparatorEncoder = encoder.TomlArraySeparatorEncoder
TomlPreserveInlineDictEncoder = encoder.TomlPreserveInlineDictEncoder
TomlNumpyEncoder = encoder.TomlNumpyEncoder
TomlPreserveCommentEncoder = encoder.TomlPreserveCommentEncoder
TomlPathlibEncoder = encoder.TomlPathlibEncoder
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/toml/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/toml/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 263
}
| 21 |
from __future__ import absolute_import
import binascii
import codecs
import os
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup("utf-8")[3]
def choose_boundary():
"""
Our embarrassingly-simple replacement for mimetools.choose_boundary.
"""
boundary = binascii.hexlify(os.urandom(16))
if not six.PY2:
boundary = boundary.decode("ascii")
return boundary
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`urllib3.filepost.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b("--%s\r\n" % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b"\r\n")
body.write(b("--%s--\r\n" % (boundary)))
content_type = str("multipart/form-data; boundary=%s" % boundary)
return body.getvalue(), content_type
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/filepost.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/filepost.py",
"repo_id": "Django-locallibrary",
"token_count": 982
}
| 22 |
from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
IS_PYOPENSSL,
IS_SECURETRANSPORT,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
PROTOCOL_TLS,
)
from .timeout import current_time, Timeout
from .retry import Retry
from .url import get_host, parse_url, split_first, Url
from .wait import wait_for_read, wait_for_write
__all__ = (
"HAS_SNI",
"IS_PYOPENSSL",
"IS_SECURETRANSPORT",
"SSLContext",
"PROTOCOL_TLS",
"Retry",
"Timeout",
"Url",
"assert_fingerprint",
"current_time",
"is_connection_dropped",
"is_fp_closed",
"get_host",
"parse_url",
"make_headers",
"resolve_cert_reqs",
"resolve_ssl_version",
"split_first",
"ssl_wrap_socket",
"wait_for_read",
"wait_for_write",
)
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 436
}
| 23 |
from __future__ import absolute_import
import errno
import warnings
import hmac
import sys
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from .url import IPV4_RE, BRACELESS_IPV6_ADDRZ_RE
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
from ..packages import six
SSLContext = None
HAS_SNI = False
IS_PYOPENSSL = False
IS_SECURETRANSPORT = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_REQUIRED
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try: # Platform-specific: Python 3.6
from ssl import PROTOCOL_TLS
PROTOCOL_SSLv23 = PROTOCOL_TLS
except ImportError:
try:
from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS
PROTOCOL_SSLv23 = PROTOCOL_TLS
except ImportError:
PROTOCOL_SSLv23 = PROTOCOL_TLS = 2
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
# security,
# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
# - disable NULL authentication, MD5 MACs, DSS, and other
# insecure ciphers for security reasons.
# - NOTE: TLS 1.3 cipher suites are managed through a different interface
# not exposed by CPython (yet!) and are enabled by default if they're available.
DEFAULT_CIPHERS = ":".join(
[
"ECDHE+AESGCM",
"ECDHE+CHACHA20",
"DHE+AESGCM",
"DHE+CHACHA20",
"ECDH+AESGCM",
"DH+AESGCM",
"ECDH+AES",
"DH+AES",
"RSA+AESGCM",
"RSA+AES",
"!aNULL",
"!eNULL",
"!MD5",
"!DSS",
]
)
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
class SSLContext(object): # Platform-specific: Python 2
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None, cadata=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
if cadata is not None:
raise SSLError("CA data not supported in older Pythons")
def set_ciphers(self, cipher_suite):
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
"A true SSLContext object is not available. This prevents "
"urllib3 from configuring SSL appropriately and may cause "
"certain SSL connections to fail. You can upgrade to a newer "
"version of Python to solve this. For more information, see "
"https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
"#ssl-warnings",
InsecurePlatformWarning,
)
kwargs = {
"keyfile": self.keyfile,
"certfile": self.certfile,
"ca_certs": self.ca_certs,
"cert_reqs": self.verify_mode,
"ssl_version": self.protocol,
"server_side": server_side,
}
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(":", "").lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError(
'Fingerprints did not match. Expected "{0}", got "{1}".'.format(
fingerprint, hexlify(cert_digest)
)
)
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_REQUIRED`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbreviation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_REQUIRED
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, "CERT_" + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_TLS
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, "PROTOCOL_" + candidate)
return res
return candidate
def create_urllib3_context(
ssl_version=None, cert_reqs=None, options=None, ciphers=None
):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from pip._vendor.urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or PROTOCOL_TLS)
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
# Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
# necessary for conditional client cert authentication with TLS 1.3.
# The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
# versions of Python. We only enable on Python 3.7.4+ or if certificate
# verification is enabled to work around Python issue #37428
# See: https://bugs.python.org/issue37428
if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr(
context, "post_handshake_auth", None
) is not None:
context.post_handshake_auth = True
context.verify_mode = cert_reqs
if (
getattr(context, "check_hostname", None) is not None
): # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(
sock,
keyfile=None,
certfile=None,
cert_reqs=None,
ca_certs=None,
server_hostname=None,
ssl_version=None,
ciphers=None,
ssl_context=None,
ca_cert_dir=None,
key_password=None,
ca_cert_data=None,
):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
:param key_password:
Optional password if the keyfile is encrypted.
:param ca_cert_data:
Optional string containing CA certificates in PEM format suitable for
passing as the cadata parameter to SSLContext.load_verify_locations()
"""
context = ssl_context
if context is None:
# Note: This branch of code and all the variables in it are no longer
# used by urllib3 itself. We should consider deprecating and removing
# this code.
context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
if ca_certs or ca_cert_dir or ca_cert_data:
try:
context.load_verify_locations(ca_certs, ca_cert_dir, ca_cert_data)
except IOError as e: # Platform-specific: Python 2.7
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
elif ssl_context is None and hasattr(context, "load_default_certs"):
# try to load OS default certs; works well on Windows (require Python3.4+)
context.load_default_certs()
# Attempt to detect if we get the goofy behavior of the
# keyfile being encrypted and OpenSSL asking for the
# passphrase via the terminal and instead error out.
if keyfile and key_password is None and _is_key_file_encrypted(keyfile):
raise SSLError("Client private key is encrypted, password is required")
if certfile:
if key_password is None:
context.load_cert_chain(certfile, keyfile)
else:
context.load_cert_chain(certfile, keyfile, key_password)
# If we detect server_hostname is an IP address then the SNI
# extension should not be used according to RFC3546 Section 3.1
# We shouldn't warn the user if SNI isn't available but we would
# not be using SNI anyways due to IP address for server_hostname.
if (
server_hostname is not None and not is_ipaddress(server_hostname)
) or IS_SECURETRANSPORT:
if HAS_SNI and server_hostname is not None:
return context.wrap_socket(sock, server_hostname=server_hostname)
warnings.warn(
"An HTTPS request has been made, but the SNI (Server Name "
"Indication) extension to TLS is not available on this platform. "
"This may cause the server to present an incorrect TLS "
"certificate, which can cause validation failures. You can upgrade to "
"a newer version of Python to solve this. For more information, see "
"https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
"#ssl-warnings",
SNIMissingWarning,
)
return context.wrap_socket(sock)
def is_ipaddress(hostname):
"""Detects whether the hostname given is an IPv4 or IPv6 address.
Also detects IPv6 addresses with Zone IDs.
:param str hostname: Hostname to examine.
:return: True if the hostname is an IP address, False otherwise.
"""
if not six.PY2 and isinstance(hostname, bytes):
# IDN A-label bytes are ASCII compatible.
hostname = hostname.decode("ascii")
return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname))
def _is_key_file_encrypted(key_file):
"""Detects if a key file is encrypted or not."""
with open(key_file, "r") as f:
for line in f:
# Look for Proc-Type: 4,ENCRYPTED
if "ENCRYPTED" in line:
return True
return False
|
Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_vendor/urllib3/util/ssl_.py",
"repo_id": "Django-locallibrary",
"token_count": 5849
}
| 24 |
"""distutils.command.install
Implements the Distutils 'install' command."""
import sys
import os
from distutils import log
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.util import get_platform
from distutils.errors import DistutilsOptionError
from site import USER_BASE
from site import USER_SITE
HAS_USER_SITE = True
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/$platlibdir/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short$abiflags/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/$platlibdir/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'nt': WINDOWS_SCHEME,
'pypy': {
'purelib': '$base/site-packages',
'platlib': '$base/site-packages',
'headers': '$base/include/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'pypy_nt': {
'purelib': '$base/site-packages',
'platlib': '$base/site-packages',
'headers': '$base/include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
}
# user site schemes
if HAS_USER_SITE:
INSTALL_SCHEMES['nt_user'] = {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
'scripts': '$userbase/Python$py_version_nodot/Scripts',
'data' : '$userbase',
}
INSTALL_SCHEMES['unix_user'] = {
'purelib': '$usersite',
'platlib': '$usersite',
'headers':
'$userbase/include/python$py_version_short$abiflags/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install(Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
]
boolean_options = ['compile', 'force', 'skip-build']
if HAS_USER_SITE:
user_options.append(('user', None,
"install in user site-package '%s'" % USER_SITE))
boolean_options.append('user')
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
"""Initializes options."""
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
self.user = 0
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_userbase = USER_BASE
self.install_usersite = USER_SITE
self.compile = None
self.optimize = None
# Deprecated
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options(self):
"""Finalizes options."""
# This method (and its helpers, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError(
"must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError(
"must supply either home or prefix/exec-prefix -- not both")
if self.user and (self.prefix or self.exec_prefix or self.home or
self.install_base or self.install_platbase):
raise DistutilsOptionError("can't combine user with prefix, "
"exec_prefix/home, or install_(plat)base")
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = sys.version.split()[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
try:
abiflags = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
abiflags = ''
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': '%d.%d' % sys.version_info[:2],
'py_version_nodot': '%d%d' % sys.version_info[:2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
'abiflags': abiflags,
'platlibdir': getattr(sys, 'platlibdir', 'lib'),
}
if HAS_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print("config vars:")
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Create directories in the home dir:
if self.user:
self.create_home_path()
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.ext_modules: # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers',
'userbase', 'usersite')
# Deprecated
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
def dump_dirs(self, msg):
"""Dumps the list of user options."""
if not DEBUG:
return
from distutils.fancy_getopt import longopt_xlate
log.debug(msg + ":")
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if opt_name in self.negative_opt:
opt_name = self.negative_opt[opt_name]
opt_name = opt_name.translate(longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = opt_name.translate(longopt_xlate)
val = getattr(self, opt_name)
log.debug(" %s: %s", opt_name, val)
def finalize_unix(self):
"""Finalizes options for posix platforms."""
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError(
"install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme("unix_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError(
"must not supply exec-prefix without prefix")
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
self.select_scheme("unix_prefix")
def finalize_other(self):
"""Finalizes options for non-posix platforms"""
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme(os.name + "_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError(
"I don't know how to install stuff on '%s'" % os.name)
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
if (hasattr(sys, 'pypy_version_info') and
not name.endswith(('_user', '_home'))):
if os.name == 'nt':
name = 'pypy_nt'
else:
name = 'pypy'
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data',])
def convert_paths(self, *names):
"""Call `convert_path` over `names`."""
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path(self):
"""Set `path_file` and `extra_dirs` using `extra_path`."""
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
log.warn(
"Distribution option extra_path is deprecated. "
"See issue27919 for details."
)
if isinstance(self.extra_path, str):
self.extra_path = self.extra_path.split(',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
path_file, extra_dirs = self.extra_path
else:
raise DistutilsOptionError(
"'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
def change_roots(self, *names):
"""Change the install directories pointed by name using root."""
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in self.config_vars.items():
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
# -- Command execution methods -------------------------------------
def run(self):
"""Runs the command."""
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# If we built for any other platform, we can't install.
build_plat = self.distribution.get_command_obj('build').plat_name
# check warn_dir - it is a clue that the 'install' is happening
# internally, and not to sys.path, so we don't check the platform
# matches what we are running.
if self.warn_dir and build_plat != get_platform():
raise DistutilsPlatformError("Can't install when "
"cross-compiling")
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
def create_path_file(self):
"""Creates the .pth file"""
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs(self):
"""Assembles the outputs of all the sub-commands."""
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs(self):
"""Returns the inputs of all the sub-commands"""
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib(self):
"""Returns true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers(self):
"""Returns true if the current distribution has any headers to
install."""
return self.distribution.has_headers()
def has_scripts(self):
"""Returns true if the current distribution has any scripts to.
install."""
return self.distribution.has_scripts()
def has_data(self):
"""Returns true if the current distribution has any data to.
install."""
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
|
Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/command/install.py",
"repo_id": "Django-locallibrary",
"token_count": 12224
}
| 25 |
"""distutils.errors
Provides exceptions used by the Distutils modules. Note that Distutils
modules may raise standard exceptions; in particular, SystemExit is
usually raised for errors that are obviously the end-user's fault
(eg. bad command-line arguments).
This module is safe to use in "from ... import *" mode; it only exports
symbols whose names start with "Distutils" and end with "Error"."""
class DistutilsError (Exception):
"""The root of all Distutils evil."""
pass
class DistutilsModuleError (DistutilsError):
"""Unable to load an expected module, or to find an expected class
within some module (in particular, command modules and classes)."""
pass
class DistutilsClassError (DistutilsError):
"""Some command class (or possibly distribution class, if anyone
feels a need to subclass Distribution) is found not to be holding
up its end of the bargain, ie. implementing some part of the
"command "interface."""
pass
class DistutilsGetoptError (DistutilsError):
"""The option table provided to 'fancy_getopt()' is bogus."""
pass
class DistutilsArgError (DistutilsError):
"""Raised by fancy_getopt in response to getopt.error -- ie. an
error in the command line usage."""
pass
class DistutilsFileError (DistutilsError):
"""Any problems in the filesystem: expected file not found, etc.
Typically this is for problems that we detect before OSError
could be raised."""
pass
class DistutilsOptionError (DistutilsError):
"""Syntactic/semantic errors in command options, such as use of
mutually conflicting options, or inconsistent options,
badly-spelled values, etc. No distinction is made between option
values originating in the setup script, the command line, config
files, or what-have-you -- but if we *know* something originated in
the setup script, we'll raise DistutilsSetupError instead."""
pass
class DistutilsSetupError (DistutilsError):
"""For errors that can be definitely blamed on the setup script,
such as invalid keyword arguments to 'setup()'."""
pass
class DistutilsPlatformError (DistutilsError):
"""We don't know how to do something on the current platform (but
we do know how to do it on some platform) -- eg. trying to compile
C files on a platform not supported by a CCompiler subclass."""
pass
class DistutilsExecError (DistutilsError):
"""Any problems executing an external program (such as the C
compiler, when compiling C files)."""
pass
class DistutilsInternalError (DistutilsError):
"""Internal inconsistencies or impossibilities (obviously, this
should never be seen if the code is working!)."""
pass
class DistutilsTemplateError (DistutilsError):
"""Syntax error in a file list template."""
class DistutilsByteCompileError(DistutilsError):
"""Byte compile error."""
# Exception classes used by the CCompiler implementation classes
class CCompilerError (Exception):
"""Some compile/link operation failed."""
class PreprocessError (CCompilerError):
"""Failure to preprocess one or more C/C++ files."""
class CompileError (CCompilerError):
"""Failure to compile one or more C/C++ source files."""
class LibError (CCompilerError):
"""Failure to create a static library from one or more C/C++ object
files."""
class LinkError (CCompilerError):
"""Failure to link one or more C/C++ object files into an executable
or shared library file."""
class UnknownFileError (CCompilerError):
"""Attempt to process an unknown file type."""
|
Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/errors.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/_distutils/errors.py",
"repo_id": "Django-locallibrary",
"token_count": 1008
}
| 26 |
import os
import sys
import itertools
from distutils.command.build_ext import build_ext as _du_build_ext
from distutils.file_util import copy_file
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
from distutils.errors import DistutilsError
from distutils import log
from setuptools.extension import Library
from setuptools.extern import six
if six.PY2:
import imp
EXTENSION_SUFFIXES = [
s for s, _, tp in imp.get_suffixes() if tp == imp.C_EXTENSION]
else:
from importlib.machinery import EXTENSION_SUFFIXES
try:
# Attempt to use Cython for building extensions, if available
from Cython.Distutils.build_ext import build_ext as _build_ext
# Additionally, assert that the compiler module will load
# also. Ref #1229.
__import__('Cython.Compiler.Main')
except ImportError:
_build_ext = _du_build_ext
# make sure _config_vars is initialized
get_config_var("LDSHARED")
from distutils.sysconfig import _config_vars as _CONFIG_VARS # noqa
def _customize_compiler_for_shlib(compiler):
if sys.platform == "darwin":
# building .dylib requires additional compiler flags on OSX; here we
# temporarily substitute the pyconfig.h variables so that distutils'
# 'customize_compiler' uses them before we build the shared libraries.
tmp = _CONFIG_VARS.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_CONFIG_VARS['LDSHARED'] = (
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
_CONFIG_VARS['CCSHARED'] = " -dynamiclib"
_CONFIG_VARS['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_CONFIG_VARS.clear()
_CONFIG_VARS.update(tmp)
else:
customize_compiler(compiler)
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
import dl
use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
except ImportError:
pass
def if_dl(s):
return s if have_rtld else ''
def get_abi3_suffix():
"""Return the file extension for an abi3-compliant Extension()"""
for suffix in EXTENSION_SUFFIXES:
if '.abi3' in suffix: # Unix
return suffix
elif suffix == '.pyd': # Windows
return suffix
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,
os.path.basename(filename))
src_filename = os.path.join(self.build_lib, filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self, fullname)
if fullname in self.ext_map:
ext = self.ext_map[fullname]
use_abi3 = (
not six.PY2
and getattr(ext, 'py_limited_api')
and get_abi3_suffix()
)
if use_abi3:
so_ext = get_config_var('EXT_SUFFIX')
filename = filename[:-len(so_ext)]
filename = filename + get_abi3_suffix()
if isinstance(ext, Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn, libtype)
elif use_stubs and ext._links_to_dynamic:
d, fn = os.path.split(filename)
return os.path.join(d, 'dl-' + fn)
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext, Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
# distutils 3.1 will also ask for module names
# XXX what to do with conflicts?
self.ext_map[fullname.split('.')[-1]] = ext
ltd = self.shlibs and self.links_to_dynamic(ext) or False
ns = ltd and use_stubs and not isinstance(ext, Library)
ext._links_to_dynamic = ltd
ext._needs_stub = ns
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib, filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
_customize_compiler_for_shlib(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext, Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self, ext)
def build_extension(self, ext):
ext._convert_pyx_sources_to_lang()
_compiler = self.compiler
try:
if isinstance(ext, Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self, ext)
if ext._needs_stub:
cmd = self.get_finalized_command('build_py').build_lib
self.write_stub(cmd, ext)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
return any(pkg + libname in libnames for libname in ext.libraries)
def get_outputs(self):
return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
def __get_stubs_outputs(self):
# assemble the base name for each extension that needs a stub
ns_ext_bases = (
os.path.join(self.build_lib, *ext._full_name.split('.'))
for ext in self.extensions
if ext._needs_stub
)
# pair each base with the extension
pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
return list(base + fnext for base, fnext in pairs)
def __get_output_extensions(self):
yield '.py'
yield '.pyc'
if self.get_finalized_command('build_py').optimize:
yield '.pyo'
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s", ext._full_name,
output_dir)
stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
'.py')
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file + " already exists! Please delete.")
if not self.dry_run:
f = open(stub_file, 'w')
f.write(
'\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources" + if_dl(", dl"),
" from importlib.machinery import ExtensionFileLoader",
" __file__ = pkg_resources.resource_filename"
"(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" ExtensionFileLoader(__name__,",
" __file__).load_module()",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
])
)
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name == 'nt':
# Build shared libraries
#
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(
self, objects, output_libname, output_dir=None, libraries=None,
library_dirs=None, runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
# libraries=None, library_dirs=None, runtime_library_dirs=None,
# export_symbols=None, extra_preargs=None, extra_postargs=None,
# build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang
)
|
Django-locallibrary/env/Lib/site-packages/setuptools/command/build_ext.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/build_ext.py",
"repo_id": "Django-locallibrary",
"token_count": 6317
}
| 27 |
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import distutils
import os
from setuptools.extern.six.moves import configparser
from setuptools import Command
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind == 'local':
return 'setup.cfg'
if kind == 'global':
return os.path.join(
os.path.dirname(distutils.__file__), 'distutils.cfg'
)
if kind == 'user':
dot = os.name == 'posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
log.debug("Reading configuration from %s", filename)
opts = configparser.RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option, value in options.items():
if value is None:
log.debug(
"Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section, option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section, option, value)
log.info("Writing %s", filename)
if not dry_run:
with open(filename, 'w') as f:
opts.write(f)
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames) > 1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-', '_'): self.set_value}
},
self.dry_run
)
|
Django-locallibrary/env/Lib/site-packages/setuptools/command/setopt.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/command/setopt.py",
"repo_id": "Django-locallibrary",
"token_count": 2257
}
| 28 |
import unicodedata
import sys
from setuptools.extern import six
# HFS Plus uses decomposed UTF-8
def decompose(path):
if isinstance(path, six.text_type):
return unicodedata.normalize('NFD', path)
try:
path = path.decode('utf-8')
path = unicodedata.normalize('NFD', path)
path = path.encode('utf-8')
except UnicodeError:
pass # Not UTF-8
return path
def filesys_decode(path):
"""
Ensure that the given path is decoded,
NONE when no expected encoding works
"""
if isinstance(path, six.text_type):
return path
fs_enc = sys.getfilesystemencoding() or 'utf-8'
candidates = fs_enc, 'utf-8'
for enc in candidates:
try:
return path.decode(enc)
except UnicodeDecodeError:
continue
def try_encode(string, enc):
"turn unicode encoding into a functional routine"
try:
return string.encode(enc)
except UnicodeEncodeError:
return None
|
Django-locallibrary/env/Lib/site-packages/setuptools/unicode_utils.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/setuptools/unicode_utils.py",
"repo_id": "Django-locallibrary",
"token_count": 412
}
| 29 |
The following code was used in an MQL5 Article about Gradient Descent linked here
https://www.mql5.com/en/articles/11200
### Support the project
Any thoughts and contribution will be appreciated. Buy me coffee if you appreciate the effort https://www.buymeacoffee.com/omegajoctan
### Hire me in MQL5
Create a personal work for me using this link https://www.mql5.com/en/job/new?prefered=omegajoctan on the following projects
* Support Vector Machine
* Linear Regression
* Logistic Regression
* Neural Network
* And many more on machine learning Aspects
|
Gradient-Descent-MQL5/Readme.md/0
|
{
"file_path": "Gradient-Descent-MQL5/Readme.md",
"repo_id": "Gradient-Descent-MQL5",
"token_count": 171
}
| 30 |
//+------------------------------------------------------------------+
//| CSimpleLinearRegression.mqh |
//| Copyright 2021, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2021, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CSimpleLinearRegression
{
private:
int m_handle;
string m_filename;
string m_delimiter;
double m_ypredicted[];
double x_values[];
double y_values[];
protected:
int m_rows, m_columns;
double mean(double& data[]);
void fileopen();
public:
CSimpleLinearRegression(void);
~CSimpleLinearRegression(void);
//---
void GetDataToArray(double& array[],string filename, string delimiter, int column_number);
void Init(double& x[], double & y[]);
void PrintFileDetails();
double y_intercept();
double coefficient_of_X();
void LinearRegressionMain(double& predict_y[]);
double r_squared(); //also known as coefficient of determinant
double corrcoef(double& x[], double &y[]); //also known as correlation coefficient
double corrcoef(); //also known as correlation coefficient
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CSimpleLinearRegression::CSimpleLinearRegression(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CSimpleLinearRegression::~CSimpleLinearRegression(void)
{
Print(" Simple Linear Regression Library ");
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CSimpleLinearRegression::fileopen(void)
{
m_handle = FileOpen(m_filename,FILE_READ|FILE_WRITE|FILE_CSV,m_delimiter);
if (m_handle==INVALID_HANDLE)
{
Print("Data to work with is nowhere to be found, Error = ",GetLastError()," ", __FUNCTION__);
}
//---
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CSimpleLinearRegression::GetDataToArray(double &array[],string file_name,string delimiter,int column_number)
{
m_filename = file_name;
m_delimiter = delimiter;
int column=0, columns_total=0;
int rows=0;
fileopen();
while (!FileIsEnding(m_handle))
{
string data = FileReadString(m_handle);
if (rows==0)
{
columns_total++;
}
column++;
//Get data by each Column
if (column==column_number) //if we are on the specific column that we want
{
ArrayResize(array,rows+1);
if (rows==0)
{
if ((double(data))!=0) //Just in case the first line of our CSV column has a name of the column
{
array[rows]= NormalizeDouble((double)data,Digits());
}
else { ArrayRemove(array,0,1); }
}
else
{
array[rows-1]= StringToDouble(data);
}
//Print("column ",column," "," Value ",(double)data);
}
//---
if (FileIsLineEnding(m_handle))
{
rows++;
column=0;
}
}
FileClose(m_handle);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CSimpleLinearRegression::Init(double& x[], double& y[])
{
ArrayCopy(x_values,x);
ArrayCopy(y_values,y);
//---
if (ArraySize(x_values)!=ArraySize(y_values))
Print(" Two of your Arrays seems to vary In Size, This could lead to inaccurate calculations ",__FUNCTION__);
int columns=0, columns_total=0;
int rows=0;
fileopen();
while (!FileIsEnding(m_handle))
{
string data = FileReadString(m_handle);
if (rows==0)
{
columns_total++;
}
columns++;
if (FileIsLineEnding(m_handle))
{
rows++;
columns=0;
}
}
m_rows = rows;
m_columns = columns;
FileClose(m_handle);
//---
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CSimpleLinearRegression::PrintFileDetails()
{
Print("Lines Total ", m_rows," Columns Total ",m_columns);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CSimpleLinearRegression::LinearRegressionMain(double &predict_y[])
{
double slope = coefficient_of_X();
double constant_y_intercept= y_intercept();
Print("The Linear Regression Model is "," Y =",DoubleToString(slope,5),"x+",DoubleToString(constant_y_intercept,5));
ArrayResize(predict_y,ArraySize(y_values));
for (int i=0; i<ArraySize(x_values); i++)
predict_y[i] = coefficient_of_X()*x_values[i]+y_intercept();
//---
// Copy the predicted values to m_ypredicted[], to be Accessed inside the library
ArrayCopy(m_ypredicted,predict_y);
}
//+------------------------------------------------------------------+
//| Slope of our model |
//+------------------------------------------------------------------+
double CSimpleLinearRegression::coefficient_of_X()
{
double m=0;
double x_mean=mean(x_values);
double y_mean=mean(y_values);;
//---
{
double x__x=0, y__y=0;
double numerator=0, denominator=0;
for (int i=0; i<(ArraySize(x_values)+ArraySize(y_values))/2; i++)
{
x__x = x_values[i] - x_mean; //right side of the numerator (x-side)
y__y = y_values[i] - y_mean; //left side of the numerator (y-side)
numerator += x__x * y__y; //summation of the product two sides of the numerator
denominator += MathPow(x__x,2);
}
m = numerator/denominator;
}
return (m);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CSimpleLinearRegression::y_intercept()
{
// c = y - mx
return (mean(y_values)-coefficient_of_X()*mean(x_values));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CSimpleLinearRegression::r_squared()
{
double error=0;
double numerator =0, denominator=0;
double y_mean = mean(y_values);
//---
if (ArraySize(m_ypredicted)==0)
Print("The Predicted values Array seems to have no values, Call the main Simple Linear Regression Funtion before any use of this function = ",__FUNCTION__);
else
{
for (int i=0; i<ArraySize(y_values); i++)
{
numerator += MathPow((y_values[i]-m_ypredicted[i]),2);
denominator += MathPow((y_values[i]-y_mean),2);
}
error = 1 - (numerator/denominator);
}
return(error);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CSimpleLinearRegression::corrcoef(void)
{
double r=0;
double numerator =0, denominator =0;
double x__x =0, y__y=0;
for(int i=0; i<ArraySize(x_values); i++)
{
numerator += (x_values[i]-mean(x_values))*(y_values[i]-mean(y_values));
x__x += MathPow((x_values[i]-mean(x_values)),2); //summation of x values minus it's mean squared
y__y += MathPow((y_values[i]-mean(y_values)),2); //summation of y values minus it's mean squared
}
denominator = MathSqrt(x__x)*MathSqrt(y__y); //left x side of the equation squared times right side of the equation squared
r = numerator/denominator;
return(r);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CSimpleLinearRegression::corrcoef(double &x[],double &y[])
{
double r=0;
double numerator =0, denominator =0;
double x__x =0, y__y=0;
for(int i=0; i<ArraySize(x); i++)
{
numerator += (x[i]-mean(x))*(y[i]-mean(y));
x__x += MathPow((x[i]-mean(x)),2); //summation of x values minus it's mean squared
y__y += MathPow((y[i]-mean(y)),2); //summation of y values minus it's mean squared
}
denominator = MathSqrt(x__x)*MathSqrt(y__y); //left x side of the equation squared times right side of the equation squared
r = numerator/denominator;
return(r);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CSimpleLinearRegression::mean(double &data[])
{
double x_y__bar=0;
for (int i=0; i<ArraySize(data); i++)
{
x_y__bar += data[i]; // all values summation
}
x_y__bar = x_y__bar/ArraySize(data); //total value after summation divided by total number of elements
return(x_y__bar);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
class CMultipleLinearRegression: public CSimpleLinearRegression
{
private:
int m_independent_vars;
public:
CMultipleLinearRegression(void);
~CMultipleLinearRegression(void);
double coefficient_of_X(double& x_arr[],double& y_arr[]);
void MultipleRegressionMain(double& predicted_y[],double& Y[],double& A[],double& B[]);
double y_interceptforMultiple(double& Y[],double& A[],double& B[]);
void MultipleRegressionMain(double& predicted_y[],double& Y[],double& A[],double& B[],double& C[],double& D[]);
double y_interceptforMultiple(double& Y[],double& A[],double& B[],double& C[],double& D[]);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMultipleLinearRegression::CMultipleLinearRegression(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMultipleLinearRegression::~CMultipleLinearRegression(void)
{
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMultipleLinearRegression::MultipleRegressionMain(double &predicted_y[],double &Y[],double &A[],double &B[])
{
// Multiple regression formula = y = M1X1+M2X2+M3X3+...+C
double constant_y_intercept=y_interceptforMultiple(Y,A,B);
double slope1 = coefficient_of_X(A,Y);
double slope2 = coefficient_of_X(B,Y);
Print("Multiple Regression Model is ","Y="+DoubleToString(slope1,2)+"A+"+DoubleToString(slope2,2)+"B+"+
DoubleToString(constant_y_intercept,2));
int ArrSize = (ArraySize(A)+ArraySize(B))/2;
ArrayResize(predicted_y,ArrSize);
for (int i=0; i<ArrSize; i++)
predicted_y[i] = slope1*A[i]+slope2*B[i]+constant_y_intercept;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CMultipleLinearRegression::coefficient_of_X(double &x_arr[],double &y_arr[])
{
double m=0;
double x_mean=mean(x_arr);
double y_mean=mean(y_arr);;
//---
//Print("mean x ",x_mean," y_mean ",y_mean," x arraysize ",ArraySize(x_arr));
if (ArraySize(x_arr)!=ArraySize(y_arr))
Print(" Two of your datasets seems to vary In Size, This could lead to inacurate results ",__FUNCTION__);
else
{
double x__x=0, y__y=0;
double numerator=0, denominator=0;
for (int i=0; i<(ArraySize(x_arr)+ArraySize(y_arr))/2; i++)
{
x__x = x_arr[i] - x_mean;
y__y = y_arr[i] - y_mean;
numerator += x__x * y__y;
denominator += MathPow(x__x,2);
}
m = numerator/denominator;
}
return (m);
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CMultipleLinearRegression::y_interceptforMultiple(double &Y[],double &A[],double &B[])
{
//formula c=Y-M1X1-M2X2-....;
return(mean(Y)-coefficient_of_X(A,Y)*mean(A)-coefficient_of_X(B,Y)*mean(B));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
void CMultipleLinearRegression::MultipleRegressionMain(double &predicted_y[],double &Y[],double &A[],double &B[],double &C[],double &D[])
{
double constant_y_intercept = y_interceptforMultiple(Y,A,B,C,D);
double slope1 = coefficient_of_X(A,Y);
double slope2 = coefficient_of_X(B,Y);
double slope3 = coefficient_of_X(C,Y);
double slope4 = coefficient_of_X(D,Y);
//---
Print("Multiple Regression Model is ","Y="+DoubleToString(slope1,2),"A+"+DoubleToString(slope2,2)+"B+"+
DoubleToString(slope3,2)+"C"+DoubleToString(slope4,2)+"D"+DoubleToString(constant_y_intercept,2));
//---
int ArrSize = (ArraySize(A)+ArraySize(B))/2;
ArrayResize(predicted_y,ArrSize);
for (int i=0; i<ArrSize; i++)
predicted_y[i] = slope1*A[i]+slope2*B[i]+slope3*C[i]+slope4*D[i]+constant_y_intercept;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
double CMultipleLinearRegression::y_interceptforMultiple(double &Y[],double &A[],double &B[],double &C[],double &D[])
{
return (mean(Y)-coefficient_of_X(A,Y)*mean(A)-coefficient_of_X(B,Y)*mean(B)-coefficient_of_X(C,Y)*mean(C)-coefficient_of_X(D,Y)*mean(D));
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
|
Linear-Regression-python-and-MQL5/LinearRegressionLib.mqh/0
|
{
"file_path": "Linear-Regression-python-and-MQL5/LinearRegressionLib.mqh",
"repo_id": "Linear-Regression-python-and-MQL5",
"token_count": 7518
}
| 31 |
//+------------------------------------------------------------------+
//| BaseClustering.mqh |
//| Copyright 2023, Omega Joctan |
//| https://www.mql5.com/en/users/omegajoctan |
//+------------------------------------------------------------------+
#property copyright "Copyright 2023, Omega Joctan"
#property link "https://www.mql5.com/en/users/omegajoctan"
#include <MALE5\MatrixExtend.mqh>
#include <MALE5\linalg.mqh>
#include <MALE5\MqPlotLib\plots.mqh>
//+------------------------------------------------------------------+
//| defines |
//+------------------------------------------------------------------+
class BaseClustering
{
public:
BaseClustering(void);
~BaseClustering(void);
static matrix Get(matrix &X, vector &index, int axis=0);
};
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
matrix BaseClustering::Get(matrix &X, vector &index, int axis=0)
{
matrix ret_matrix = {};
ulong row_col=0;
bool isRows = true;
switch(axis)
{
case 0:
row_col = X.Rows();
isRows = true;
break;
case 1:
row_col = X.Cols();
isRows = false;
break;
default:
printf("%s Invalid axis %d ",__FUNCTION__,axis);
return ret_matrix;
break;
}
//---
ret_matrix.Resize(isRows?index.Size():X.Rows(), isRows?X.Cols(): index.Size());
for (ulong i=0, count=0; i<row_col; i++)
for (ulong j=0; j<index.Size(); j++)
{
if (isRows)
{
if (i==index[j])
{
if (isRows)
ret_matrix.Row(X.Row(i), count);
else
ret_matrix.Col(X.Col(i), count);
count++;
}
}
}
return ret_matrix;
}
//+------------------------------------------------------------------+
//| |
//+------------------------------------------------------------------+
|
MALE5/Clustering/base.mqh/0
|
{
"file_path": "MALE5/Clustering/base.mqh",
"repo_id": "MALE5",
"token_count": 1242
}
| 32 |
## Theory Overview: Linear Regression
Linear regression is a statistical method for modeling the relationship between a dependent variable (what you want to predict) and one or more independent variables (what you are using to predict). It assumes a linear relationship between the variables, meaning the change in the dependent variable is proportional to the change in the independent variable(s).
The goal of linear regression is to find a line (for single independent variable) or hyperplane (for multiple independent variables) that best fits the data points. This line/hyperplane is represented by the following equation:
`y = β₀ + β₁x₁ + β₂x₂ + ... + βₙxₙ`
where:
* `y` is the dependent variable
* `x₁`, `x₂`, ..., `xₙ` are the independent variables
* `β₀` is the intercept (the y-value where the line/hyperplane crosses the y-axis)
* `β₁`, `β₂`, ..., `βₙ` are the coefficients (slopes) of the independent variables
By fitting the line/hyperplane to the data, linear regression allows us to:
* **Make predictions:** We can use the fitted model to predict the dependent variable for new data points based on their independent variable values.
* **Understand relationships:** The coefficients represent the strength and direction of the relationships between the independent and dependent variables.
## CLinearRegression Class Documentation
The `CLinearRegression` class implements linear regression functionality. It provides methods for both training and prediction:
**Public Functions:**
* `CLinearRegression(void)` Default constructor.
* `~CLinearRegression(void)` Destructor.
* `void fit_LeastSquare(matrix &x, vector &y)` Fits the model using the least squares method.
* This method trains the model by finding the coefficients (Betas) that minimize the sum of squared errors between the predicted and actual values.
* Requires `x` (matrix of independent variables) and `y` (vector of dependent variables) as input.
* `void fit_GradDescent(matrix &x, vector &y, double alpha, uint epochs = 1000)` Fits the model using gradient descent.
* This method trains the model iteratively, updating the coefficients based on the calculated gradients (slopes of the error function).
* Requires `x` (matrix of independent variables), `y` (vector of dependent variables), `alpha` (learning rate), and optional `epochs` (number of iterations) as input.
* `double predict(vector &x)` Predicts the dependent variable for a new data point represented by the input vector `x`.
* Requires a vector containing the values for the independent variables of the new data point.
* Assumes the model is already trained (trained flag checked internally).
* `vector predict(matrix &x)` Predicts the dependent variables for multiple new data points represented by the input matrix `x`.
* Requires a matrix where each row represents a new data point with its independent variable values.
* Assumes the model is already trained (trained flag checked internally).
**Additional Notes:**
* The class uses internal member variables to store the trained coefficients (`Betas` and `Betas_v`).
* Internal helper functions (`checkIsTrained`, `TrimNumber`, `dx_wrt_bo`, `dx_wrt_b1`) are not directly accessible from the user but support the core functionalities.
* The class checks if the model is trained before allowing predictions using the `checkIsTrained` function.
## CLogisticRegression Class: Logistic Regression
The `CLogisticRegression` class provides functionalities for implementing logistic regression in MQL5. This statistical method allows you to model the probability of a binary outcome (belonging to one of two classes) based on independent variables.
**Public Functions:**
* `CLogisticRegression(uint epochs=10, double alpha=0.01, double tol=1e-8)` Constructor, allows setting hyperparameters (epochs, learning rate, tolerance) for training.
* `~CLogisticRegression(void)` Destructor.
* `void fit(matrix &x, vector &y)` Trains the model using the provided training data (`x` - independent variables, `y` - binary classification labels).
* Internally performs gradient descent optimization to find the optimal weights and bias for the model.
* `int predict(vector &x)` Predicts the class label (0 or 1) for a new data point represented by the input vector `x`.
* Assumes the model is already trained (checked internally).
* `vector predict(matrix &x)` Predicts class labels for multiple new data points represented by the input matrix `x`.
* Each row in the matrix represents a new data point.
* Assumes the model is already trained (checked internally).
* `double predict_proba(vector &x)` Predicts the probability of belonging to class 1 for a new data point represented by the input vector `x`.
* Assumes the model is already trained (checked internally).
* `vector predict_proba(matrix &x)` Predicts the probabilities of belonging to class 1 for multiple new data points represented by the input matrix `x`.
* Each row in the matrix represents a new data point.
* Assumes the model is already trained (checked internally).
**Mathematical Theory (Basic Overview):**
Logistic regression uses the sigmoid function to map the linear combination of the input features (weighted sum) to a probability between 0 and 1.
The mathematical formula for the logistic function is:
```
f(z) = 1 / (1 + exp(-z))
```
where:
* `z` is the linear combination of weights and features (z = w₁x₁ + w₂x₂ + ... + wₙxₙ + b)
* `w_i` are the weights for each feature
* `b` is the bias term
* `f(z)` is the predicted probability of belonging to class 1
By adjusting the weights and bias through training (minimizing the error between predicted and actual labels), the logistic regression model learns to distinguish between the two classes based on the input features and predict probabilities accordingly.
**Additional Notes:**
* The class utilizes gradient descent to optimize the weights and bias during training.
* Hyperparameter tuning (epochs, learning rate, tolerance) can significantly impact model performance and should be considered based on the specific data and task.
## CPolynomialRegression Class: Polynomial Regression
The `CPolynomialRegression` class provides functionalities for implementing polynomial regression in MQL5. This technique extends linear regression by fitting a higher-degree polynomial function to the data, allowing for more complex relationships between the independent and dependent variables.
**Public Functions:**
* `CPolynomialRegression(int degree=2)` Constructor, allows setting the degree of the polynomial (default is 2).
* `~CPolynomialRegression(void)` Destructor.
* `void BIC(ulong k, vector &bic, int &best_degree)` Calculates the Bayesian Information Criterion (BIC) for different polynomial degrees (`k`) and recommends the "best" degree based on the lowest BIC value (stored in `best_degree`).
* Requires `k` (vector of degree values to evaluate), `bic` (output vector to store BIC values), and `best_degree` (output variable to store the recommended degree).
* `void fit(matrix &x, vector &y)` Trains the model using the provided training data (`x` - independent variables, `y` - dependent variables).
* Internally fits the polynomial function to the data and stores the coefficients in the `Betas` and `Betas_v` member variables.
* `double predict(vector &x)` Predicts the dependent variable for a new data point represented by the input vector `x`.
* Assumes the model is already trained (trained flag not explicitly mentioned but potentially implemented internally).
* `vector predict(matrix &x)` Predicts the dependent variables for multiple new data points represented by the input matrix `x`.
* Each row in the matrix represents a new data point.
* Assumes the model is already trained (trained flag not explicitly mentioned but potentially implemented internally).
**Mathematical Theory (Basic Overview):**
Polynomial regression models the relationship between the independent and dependent variables using a polynomial function of the form:
```
y = β₀ + β₁x + β₂x² + ... + βₙxⁿ
```
where:
* `y` is the dependent variable
* `x` is the independent variable
* `β₀`, `β₁`, ..., `βₙ` are the coefficients of the polynomial (model parameters)
* `n` is the degree of the polynomial (set during object creation or through the `BIC` function)
By increasing the degree of the polynomial, the model can capture more complex non-linear relationships in the data. However, it is crucial to find a balance between model complexity and overfitting (memorizing the training data poorly generalizing to unseen data).
**Additional Notes:**
* The `BIC` function can be used to help select an appropriate polynomial degree by balancing model complexity and goodness-of-fit.
* Choosing a high polynomial degree without sufficient data can lead to overfitting, so careful consideration and potentially additional techniques like regularization might be necessary.
**References**
* [Data Science and Machine Learning (Part 01): Linear Regression](https://www.mql5.com/en/articles/10459)
* [Data Science and Machine Learning (Part 02): Logistic Regression](https://www.mql5.com/en/articles/10626)
* [Data Science and Machine Learning (Part 07): Polynomial Regression](https://www.mql5.com/en/articles/11477)
* [Data Science and Machine Learning (Part 10): Ridge Regression ](https://www.mql5.com/en/articles/11735)
|
MALE5/Linear Models/README.md/0
|
{
"file_path": "MALE5/Linear Models/README.md",
"repo_id": "MALE5",
"token_count": 2442
}
| 33 |
{% extends "base_template.html" %}
{% block content %}
<h1>All Authors</h1>
{% if authors_list %}
<ul>
{% for author in authors_list %}
<li><a href="{{author.get_absolute_url}}">{{author.first_name}} {{author.last_name}}</a></li>
{% endfor %}
</ul>
{% endif %}
{% endblock %}
|
Django-locallibrary/LocalLibrary/catalog/Templates/authors.html/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/catalog/Templates/authors.html",
"repo_id": "Django-locallibrary",
"token_count": 182
}
| 0 |
from os import name
from django.urls import path
from . import urls
from . import views
urlpatterns = [
path('',views.index,name='index'),
path('books/',views.BookListView.as_view(),name='books_page'),
path('book/<int:pk>/',views.book_detail_view,name='book-detail'),
path('authors/',views.AuthorListView.as_view(),name='authors_page'),
path('author/<int:pk>/',views.author_detail_view,name='author-detail'),
path('author/create/',views.AuthorCreate.as_view(),name='author-create'),
path('author/<int:pk>/delete/',views.AuthorDelete.as_view(),name='author-delete'),
path('author/<int:pk>/modify/',views.AuthorUpdate.as_view(),name='author-update'),
path('mybooks/',views.LoanedBooksByUser.as_view(),name='my-borrowed'),
path('book/<uuid:pk>/renew/',views.renew_book_librarian,name='renew_book_librarian'),
]
|
Django-locallibrary/LocalLibrary/catalog/urls.py/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/catalog/urls.py",
"repo_id": "Django-locallibrary",
"token_count": 321
}
| 1 |
{% extends "base_template.html" %}
{% block content %}
<h1>The password has been successfully changed</h1>
{% endblock %}
|
Django-locallibrary/LocalLibrary/templates/registration/password_reset_complete.html/0
|
{
"file_path": "Django-locallibrary/LocalLibrary/templates/registration/password_reset_complete.html",
"repo_id": "Django-locallibrary",
"token_count": 54
}
| 2 |
from __future__ import absolute_import
import os
import sys
# Remove '' and current working directory from the first entry
# of sys.path, if present to avoid using current directory
# in pip commands check, freeze, install, list and show,
# when invoked as python -m pip <command>
if sys.path[0] in ('', os.getcwd()):
sys.path.pop(0)
# If we are running from a wheel, add the wheel to sys.path
# This allows the usage python pip-*.whl/pip install pip-*.whl
if __package__ == '':
# __file__ is pip-*.whl/pip/__main__.py
# first dirname call strips of '/__main__.py', second strips off '/pip'
# Resulting path is the name of the wheel itself
# Add that to sys.path so we can import pip
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
from pip._internal.cli.main import main as _main # isort:skip # noqa
if __name__ == '__main__':
sys.exit(_main())
|
Django-locallibrary/env/Lib/site-packages/pip/__main__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/__main__.py",
"repo_id": "Django-locallibrary",
"token_count": 310
}
| 3 |
"""Subpackage containing all of pip's command line interface related code
"""
# This file intentionally does not import submodules
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/__init__.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/__init__.py",
"repo_id": "Django-locallibrary",
"token_count": 27
}
| 4 |
from contextlib import contextmanager
from pip._vendor.contextlib2 import ExitStack
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Iterator, ContextManager, TypeVar
_T = TypeVar('_T', covariant=True)
class CommandContextMixIn(object):
def __init__(self):
# type: () -> None
super(CommandContextMixIn, self).__init__()
self._in_main_context = False
self._main_context = ExitStack()
@contextmanager
def main_context(self):
# type: () -> Iterator[None]
assert not self._in_main_context
self._in_main_context = True
try:
with self._main_context:
yield
finally:
self._in_main_context = False
def enter_context(self, context_provider):
# type: (ContextManager[_T]) -> _T
assert self._in_main_context
return self._main_context.enter_context(context_provider)
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/command_context.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/cli/command_context.py",
"repo_id": "Django-locallibrary",
"token_count": 401
}
| 5 |
from __future__ import absolute_import
import hashlib
import logging
import sys
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES
from pip._internal.utils.misc import read_chunks, write_output
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import List
logger = logging.getLogger(__name__)
class HashCommand(Command):
"""
Compute a hash of a local package archive.
These can be used with --hash in a requirements file to do repeatable
installs.
"""
usage = '%prog [options] <file> ...'
ignore_require_venv = True
def add_options(self):
# type: () -> None
self.cmd_opts.add_option(
'-a', '--algorithm',
dest='algorithm',
choices=STRONG_HASHES,
action='store',
default=FAVORITE_HASH,
help='The hash algorithm to use: one of {}'.format(
', '.join(STRONG_HASHES)))
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
# type: (Values, List[str]) -> int
if not args:
self.parser.print_usage(sys.stderr)
return ERROR
algorithm = options.algorithm
for path in args:
write_output('%s:\n--hash=%s:%s',
path, algorithm, _hash_of_file(path, algorithm))
return SUCCESS
def _hash_of_file(path, algorithm):
# type: (str, str) -> str
"""Return the hash digest of a file."""
with open(path, 'rb') as archive:
hash = hashlib.new(algorithm)
for chunk in read_chunks(archive):
hash.update(chunk)
return hash.hexdigest()
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/hash.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/commands/hash.py",
"repo_id": "Django-locallibrary",
"token_count": 784
}
| 6 |
from pip._internal.distributions.base import AbstractDistribution
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional
from pip._vendor.pkg_resources import Distribution
from pip._internal.index.package_finder import PackageFinder
class InstalledDistribution(AbstractDistribution):
"""Represents an installed package.
This does not need any preparation as the required information has already
been computed.
"""
def get_pkg_resources_distribution(self):
# type: () -> Optional[Distribution]
return self.req.satisfied_by
def prepare_distribution_metadata(self, finder, build_isolation):
# type: (PackageFinder, bool) -> None
pass
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/distributions/installed.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/distributions/installed.py",
"repo_id": "Django-locallibrary",
"token_count": 243
}
| 7 |
import sys
from pip._internal.utils.compatibility_tags import (
get_supported,
version_info_to_nodot,
)
from pip._internal.utils.misc import normalize_version_info
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Tuple
from pip._vendor.packaging.tags import Tag
class TargetPython(object):
"""
Encapsulates the properties of a Python interpreter one is targeting
for a package install, download, etc.
"""
__slots__ = [
"_given_py_version_info",
"abi",
"implementation",
"platform",
"py_version",
"py_version_info",
"_valid_tags",
]
def __init__(
self,
platform=None, # type: Optional[str]
py_version_info=None, # type: Optional[Tuple[int, ...]]
abi=None, # type: Optional[str]
implementation=None, # type: Optional[str]
):
# type: (...) -> None
"""
:param platform: A string or None. If None, searches for packages
that are supported by the current system. Otherwise, will find
packages that can be built on the platform passed in. These
packages will only be downloaded for distribution: they will
not be built locally.
:param py_version_info: An optional tuple of ints representing the
Python version information to use (e.g. `sys.version_info[:3]`).
This can have length 1, 2, or 3 when provided.
:param abi: A string or None. This is passed to compatibility_tags.py's
get_supported() function as is.
:param implementation: A string or None. This is passed to
compatibility_tags.py's get_supported() function as is.
"""
# Store the given py_version_info for when we call get_supported().
self._given_py_version_info = py_version_info
if py_version_info is None:
py_version_info = sys.version_info[:3]
else:
py_version_info = normalize_version_info(py_version_info)
py_version = '.'.join(map(str, py_version_info[:2]))
self.abi = abi
self.implementation = implementation
self.platform = platform
self.py_version = py_version
self.py_version_info = py_version_info
# This is used to cache the return value of get_tags().
self._valid_tags = None # type: Optional[List[Tag]]
def format_given(self):
# type: () -> str
"""
Format the given, non-None attributes for display.
"""
display_version = None
if self._given_py_version_info is not None:
display_version = '.'.join(
str(part) for part in self._given_py_version_info
)
key_values = [
('platform', self.platform),
('version_info', display_version),
('abi', self.abi),
('implementation', self.implementation),
]
return ' '.join(
'{}={!r}'.format(key, value) for key, value in key_values
if value is not None
)
def get_tags(self):
# type: () -> List[Tag]
"""
Return the supported PEP 425 tags to check wheel candidates against.
The tags are returned in order of preference (most preferred first).
"""
if self._valid_tags is None:
# Pass versions=None if no py_version_info was given since
# versions=None uses special default logic.
py_version_info = self._given_py_version_info
if py_version_info is None:
version = None
else:
version = version_info_to_nodot(py_version_info)
tags = get_supported(
version=version,
platform=self.platform,
abi=self.abi,
impl=self.implementation,
)
self._valid_tags = tags
return self._valid_tags
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/models/target_python.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/models/target_python.py",
"repo_id": "Django-locallibrary",
"token_count": 1757
}
| 8 |
from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
from pip._internal.exceptions import NetworkConnectionError
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Dict, Iterator
# The following comments and HTTP headers were originally added by
# Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03.
#
# We use Accept-Encoding: identity here because requests defaults to
# accepting compressed responses. This breaks in a variety of ways
# depending on how the server is configured.
# - Some servers will notice that the file isn't a compressible file
# and will leave the file alone and with an empty Content-Encoding
# - Some servers will notice that the file is already compressed and
# will leave the file alone, adding a Content-Encoding: gzip header
# - Some servers won't notice anything at all and will take a file
# that's already been compressed and compress it again, and set
# the Content-Encoding: gzip header
# By setting this to request only the identity encoding we're hoping
# to eliminate the third case. Hopefully there does not exist a server
# which when given a file will notice it is already compressed and that
# you're not asking for a compressed file and will then decompress it
# before sending because if that's the case I don't think it'll ever be
# possible to make this work.
HEADERS = {'Accept-Encoding': 'identity'} # type: Dict[str, str]
def raise_for_status(resp):
# type: (Response) -> None
http_error_msg = u''
if isinstance(resp.reason, bytes):
# We attempt to decode utf-8 first because some servers
# choose to localize their reason strings. If the string
# isn't utf-8, we fall back to iso-8859-1 for all other
# encodings.
try:
reason = resp.reason.decode('utf-8')
except UnicodeDecodeError:
reason = resp.reason.decode('iso-8859-1')
else:
reason = resp.reason
if 400 <= resp.status_code < 500:
http_error_msg = u'%s Client Error: %s for url: %s' % (
resp.status_code, reason, resp.url)
elif 500 <= resp.status_code < 600:
http_error_msg = u'%s Server Error: %s for url: %s' % (
resp.status_code, reason, resp.url)
if http_error_msg:
raise NetworkConnectionError(http_error_msg, response=resp)
def response_chunks(response, chunk_size=CONTENT_CHUNK_SIZE):
# type: (Response, int) -> Iterator[bytes]
"""Given a requests Response, provide the data chunks.
"""
try:
# Special case for urllib3.
for chunk in response.raw.stream(
chunk_size,
# We use decode_content=False here because we don't
# want urllib3 to mess with the raw bytes we get
# from the server. If we decompress inside of
# urllib3 then we cannot verify the checksum
# because the checksum will be of the compressed
# file. This breakage will only occur if the
# server adds a Content-Encoding header, which
# depends on how the server was configured:
# - Some servers will notice that the file isn't a
# compressible file and will leave the file alone
# and with an empty Content-Encoding
# - Some servers will notice that the file is
# already compressed and will leave the file
# alone and will add a Content-Encoding: gzip
# header
# - Some servers won't notice anything at all and
# will take a file that's already been compressed
# and compress it again and set the
# Content-Encoding: gzip header
#
# By setting this not to decode automatically we
# hope to eliminate problems with the second case.
decode_content=False,
):
yield chunk
except AttributeError:
# Standard file-like object.
while True:
chunk = response.raw.read(chunk_size)
if not chunk:
break
yield chunk
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/network/utils.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/network/utils.py",
"repo_id": "Django-locallibrary",
"token_count": 1552
}
| 9 |
import logging
import os.path
from pip._internal.cli.spinners import open_spinner
from pip._internal.utils.setuptools_build import (
make_setuptools_bdist_wheel_args,
)
from pip._internal.utils.subprocess import (
LOG_DIVIDER,
call_subprocess,
format_command_args,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional, Text
logger = logging.getLogger(__name__)
def format_command_result(
command_args, # type: List[str]
command_output, # type: Text
):
# type: (...) -> str
"""Format command information for logging."""
command_desc = format_command_args(command_args)
text = 'Command arguments: {}\n'.format(command_desc)
if not command_output:
text += 'Command output: None'
elif logger.getEffectiveLevel() > logging.DEBUG:
text += 'Command output: [use --verbose to show]'
else:
if not command_output.endswith('\n'):
command_output += '\n'
text += 'Command output:\n{}{}'.format(command_output, LOG_DIVIDER)
return text
def get_legacy_build_wheel_path(
names, # type: List[str]
temp_dir, # type: str
name, # type: str
command_args, # type: List[str]
command_output, # type: Text
):
# type: (...) -> Optional[str]
"""Return the path to the wheel in the temporary build directory."""
# Sort for determinism.
names = sorted(names)
if not names:
msg = (
'Legacy build of wheel for {!r} created no files.\n'
).format(name)
msg += format_command_result(command_args, command_output)
logger.warning(msg)
return None
if len(names) > 1:
msg = (
'Legacy build of wheel for {!r} created more than one file.\n'
'Filenames (choosing first): {}\n'
).format(name, names)
msg += format_command_result(command_args, command_output)
logger.warning(msg)
return os.path.join(temp_dir, names[0])
def build_wheel_legacy(
name, # type: str
setup_py_path, # type: str
source_dir, # type: str
global_options, # type: List[str]
build_options, # type: List[str]
tempd, # type: str
):
# type: (...) -> Optional[str]
"""Build one unpacked package using the "legacy" build process.
Returns path to wheel if successfully built. Otherwise, returns None.
"""
wheel_args = make_setuptools_bdist_wheel_args(
setup_py_path,
global_options=global_options,
build_options=build_options,
destination_dir=tempd,
)
spin_message = 'Building wheel for {} (setup.py)'.format(name)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
try:
output = call_subprocess(
wheel_args,
cwd=source_dir,
spinner=spinner,
)
except Exception:
spinner.finish("error")
logger.error('Failed building wheel for %s', name)
return None
names = os.listdir(tempd)
wheel_path = get_legacy_build_wheel_path(
names=names,
temp_dir=tempd,
name=name,
command_args=wheel_args,
command_output=output,
)
return wheel_path
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/operations/build/wheel_legacy.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/operations/build/wheel_legacy.py",
"repo_id": "Django-locallibrary",
"token_count": 1436
}
| 10 |
import functools
import logging
from pip._vendor import six
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible
from pip._vendor.resolvelib import Resolver as RLResolver
from pip._internal.exceptions import InstallationError
from pip._internal.req.req_install import check_invalid_constraint_type
from pip._internal.req.req_set import RequirementSet
from pip._internal.resolution.base import BaseResolver
from pip._internal.resolution.resolvelib.provider import PipProvider
from pip._internal.utils.misc import dist_is_editable
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from .factory import Factory
if MYPY_CHECK_RUNNING:
from typing import Dict, List, Optional, Set, Tuple
from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.resolvelib.resolvers import Result
from pip._vendor.resolvelib.structs import Graph
from pip._internal.cache import WheelCache
from pip._internal.index.package_finder import PackageFinder
from pip._internal.operations.prepare import RequirementPreparer
from pip._internal.req.req_install import InstallRequirement
from pip._internal.resolution.base import InstallRequirementProvider
logger = logging.getLogger(__name__)
class Resolver(BaseResolver):
_allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
def __init__(
self,
preparer, # type: RequirementPreparer
finder, # type: PackageFinder
wheel_cache, # type: Optional[WheelCache]
make_install_req, # type: InstallRequirementProvider
use_user_site, # type: bool
ignore_dependencies, # type: bool
ignore_installed, # type: bool
ignore_requires_python, # type: bool
force_reinstall, # type: bool
upgrade_strategy, # type: str
py_version_info=None, # type: Optional[Tuple[int, ...]]
lazy_wheel=False, # type: bool
):
super(Resolver, self).__init__()
if lazy_wheel:
logger.warning(
'pip is using lazily downloaded wheels using HTTP '
'range requests to obtain dependency information. '
'This experimental feature is enabled through '
'--use-feature=fast-deps and it is not ready for production.'
)
assert upgrade_strategy in self._allowed_strategies
self.factory = Factory(
finder=finder,
preparer=preparer,
make_install_req=make_install_req,
wheel_cache=wheel_cache,
use_user_site=use_user_site,
force_reinstall=force_reinstall,
ignore_installed=ignore_installed,
ignore_requires_python=ignore_requires_python,
py_version_info=py_version_info,
lazy_wheel=lazy_wheel,
)
self.ignore_dependencies = ignore_dependencies
self.upgrade_strategy = upgrade_strategy
self._result = None # type: Optional[Result]
def resolve(self, root_reqs, check_supported_wheels):
# type: (List[InstallRequirement], bool) -> RequirementSet
constraints = {} # type: Dict[str, SpecifierSet]
user_requested = set() # type: Set[str]
requirements = []
for req in root_reqs:
if req.constraint:
# Ensure we only accept valid constraints
problem = check_invalid_constraint_type(req)
if problem:
raise InstallationError(problem)
if not req.match_markers():
continue
name = canonicalize_name(req.name)
if name in constraints:
constraints[name] = constraints[name] & req.specifier
else:
constraints[name] = req.specifier
else:
if req.user_supplied and req.name:
user_requested.add(canonicalize_name(req.name))
r = self.factory.make_requirement_from_install_req(
req, requested_extras=(),
)
if r is not None:
requirements.append(r)
provider = PipProvider(
factory=self.factory,
constraints=constraints,
ignore_dependencies=self.ignore_dependencies,
upgrade_strategy=self.upgrade_strategy,
user_requested=user_requested,
)
reporter = BaseReporter()
resolver = RLResolver(provider, reporter)
try:
try_to_avoid_resolution_too_deep = 2000000
self._result = resolver.resolve(
requirements, max_rounds=try_to_avoid_resolution_too_deep,
)
except ResolutionImpossible as e:
error = self.factory.get_installation_error(e)
six.raise_from(error, e)
req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
for candidate in self._result.mapping.values():
ireq = candidate.get_install_requirement()
if ireq is None:
continue
# Check if there is already an installation under the same name,
# and set a flag for later stages to uninstall it, if needed.
# * There isn't, good -- no uninstalltion needed.
# * The --force-reinstall flag is set. Always reinstall.
# * The installation is different in version or editable-ness, so
# we need to uninstall it to install the new distribution.
# * The installed version is the same as the pending distribution.
# Skip this distrubiton altogether to save work.
installed_dist = self.factory.get_dist_to_uninstall(candidate)
if installed_dist is None:
ireq.should_reinstall = False
elif self.factory.force_reinstall:
ireq.should_reinstall = True
elif installed_dist.parsed_version != candidate.version:
ireq.should_reinstall = True
elif dist_is_editable(installed_dist) != candidate.is_editable:
ireq.should_reinstall = True
else:
continue
link = candidate.source_link
if link and link.is_yanked:
# The reason can contain non-ASCII characters, Unicode
# is required for Python 2.
msg = (
u'The candidate selected for download or install is a '
u'yanked version: {name!r} candidate (version {version} '
u'at {link})\nReason for being yanked: {reason}'
).format(
name=candidate.name,
version=candidate.version,
link=link,
reason=link.yanked_reason or u'<none given>',
)
logger.warning(msg)
req_set.add_named_requirement(ireq)
return req_set
def get_installation_order(self, req_set):
# type: (RequirementSet) -> List[InstallRequirement]
"""Get order for installation of requirements in RequirementSet.
The returned list contains a requirement before another that depends on
it. This helps ensure that the environment is kept consistent as they
get installed one-by-one.
The current implementation creates a topological ordering of the
dependency graph, while breaking any cycles in the graph at arbitrary
points. We make no guarantees about where the cycle would be broken,
other than they would be broken.
"""
assert self._result is not None, "must call resolve() first"
graph = self._result.graph
weights = get_topological_weights(graph)
sorted_items = sorted(
req_set.requirements.items(),
key=functools.partial(_req_set_item_sorter, weights=weights),
reverse=True,
)
return [ireq for _, ireq in sorted_items]
def get_topological_weights(graph):
# type: (Graph) -> Dict[Optional[str], int]
"""Assign weights to each node based on how "deep" they are.
This implementation may change at any point in the future without prior
notice.
We take the length for the longest path to any node from root, ignoring any
paths that contain a single node twice (i.e. cycles). This is done through
a depth-first search through the graph, while keeping track of the path to
the node.
Cycles in the graph result would result in node being revisited while also
being it's own path. In this case, take no action. This helps ensure we
don't get stuck in a cycle.
When assigning weight, the longer path (i.e. larger length) is preferred.
"""
path = set() # type: Set[Optional[str]]
weights = {} # type: Dict[Optional[str], int]
def visit(node):
# type: (Optional[str]) -> None
if node in path:
# We hit a cycle, so we'll break it here.
return
# Time to visit the children!
path.add(node)
for child in graph.iter_children(node):
visit(child)
path.remove(node)
last_known_parent_count = weights.get(node, 0)
weights[node] = max(last_known_parent_count, len(path))
# `None` is guaranteed to be the root node by resolvelib.
visit(None)
# Sanity checks
assert weights[None] == 0
assert len(weights) == len(graph)
return weights
def _req_set_item_sorter(
item, # type: Tuple[str, InstallRequirement]
weights, # type: Dict[Optional[str], int]
):
# type: (...) -> Tuple[int, str]
"""Key function used to sort install requirements for installation.
Based on the "weight" mapping calculated in ``get_installation_order()``.
The canonical package name is returned as the second member as a tie-
breaker to ensure the result is predictable, which is useful in tests.
"""
name = canonicalize_name(item[0])
return weights[name], name
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/resolution/resolvelib/resolver.py",
"repo_id": "Django-locallibrary",
"token_count": 4229
}
| 11 |
"""
This code wraps the vendored appdirs module to so the return values are
compatible for the current pip code base.
The intention is to rewrite current usages gradually, keeping the tests pass,
and eventually drop this after all usages are changed.
"""
from __future__ import absolute_import
import os
from pip._vendor import appdirs as _appdirs
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List
def user_cache_dir(appname):
# type: (str) -> str
return _appdirs.user_cache_dir(appname, appauthor=False)
def user_config_dir(appname, roaming=True):
# type: (str, bool) -> str
path = _appdirs.user_config_dir(appname, appauthor=False, roaming=roaming)
if _appdirs.system == "darwin" and not os.path.isdir(path):
path = os.path.expanduser('~/.config/')
if appname:
path = os.path.join(path, appname)
return path
# for the discussion regarding site_config_dir locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dirs(appname):
# type: (str) -> List[str]
dirval = _appdirs.site_config_dir(appname, appauthor=False, multipath=True)
if _appdirs.system not in ["win32", "darwin"]:
# always look in /etc directly as well
return dirval.split(os.pathsep) + ['/etc']
return [dirval]
|
Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/appdirs.py/0
|
{
"file_path": "Django-locallibrary/env/Lib/site-packages/pip/_internal/utils/appdirs.py",
"repo_id": "Django-locallibrary",
"token_count": 498
}
| 12 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.