code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
"""
Base classes for features that are backwards-incompatible.
Usage:
features = Features()
features.add(Feature("py3k_feature", "power< 'py3k' any* >", "2.7"))
PATTERN = features.PATTERN
"""
pattern_unformatted = "%s=%s" # name=pattern, for dict lookups
message_unformatted = """
%s is only supported in Python %s and above."""
class Feature(object):
"""
A feature has a name, a pattern, and a minimum version of Python 2.x
required to use the feature (or 3.x if there is no backwards-compatible
version of 2.x)
"""
def __init__(self, name, PATTERN, version):
self.name = name
self._pattern = PATTERN
self.version = version
def message_text(self):
"""
Format the above text with the name and minimum version required.
"""
return message_unformatted % (self.name, self.version)
class Features(set):
"""
A set of features that generates a pattern for the features it contains.
This set will act like a mapping in that we map names to patterns.
"""
mapping = {}
def update_mapping(self):
"""
Called every time we care about the mapping of names to features.
"""
self.mapping = dict([(f.name, f) for f in iter(self)])
@property
def PATTERN(self):
"""
Uses the mapping of names to features to return a PATTERN suitable
for using the lib2to3 patcomp.
"""
self.update_mapping()
return " |\n".join([pattern_unformatted % (f.name, f._pattern) for f in iter(self)])
def __getitem__(self, key):
"""
Implement a simple mapping to get patterns from names.
"""
return self.mapping[key]
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/feature_base.py | feature_base.py |
"""
Fixer for complicated imports
"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, String, FromImport, Newline, Comma
from ..fixer_util import token, syms, Leaf, Node, Star, indentation, ImportAsName
TK_BASE_NAMES = ('ACTIVE', 'ALL', 'ANCHOR', 'ARC','BASELINE', 'BEVEL', 'BOTH',
'BOTTOM', 'BROWSE', 'BUTT', 'CASCADE', 'CENTER', 'CHAR',
'CHECKBUTTON', 'CHORD', 'COMMAND', 'CURRENT', 'DISABLED',
'DOTBOX', 'E', 'END', 'EW', 'EXCEPTION', 'EXTENDED', 'FALSE',
'FIRST', 'FLAT', 'GROOVE', 'HIDDEN', 'HORIZONTAL', 'INSERT',
'INSIDE', 'LAST', 'LEFT', 'MITER', 'MOVETO', 'MULTIPLE', 'N',
'NE', 'NO', 'NONE', 'NORMAL', 'NS', 'NSEW', 'NUMERIC', 'NW',
'OFF', 'ON', 'OUTSIDE', 'PAGES', 'PIESLICE', 'PROJECTING',
'RADIOBUTTON', 'RAISED', 'READABLE', 'RIDGE', 'RIGHT',
'ROUND', 'S', 'SCROLL', 'SE', 'SEL', 'SEL_FIRST', 'SEL_LAST',
'SEPARATOR', 'SINGLE', 'SOLID', 'SUNKEN', 'SW', 'StringTypes',
'TOP', 'TRUE', 'TclVersion', 'TkVersion', 'UNDERLINE',
'UNITS', 'VERTICAL', 'W', 'WORD', 'WRITABLE', 'X', 'Y', 'YES',
'wantobjects')
PY2MODULES = {
'urllib2' : (
'AbstractBasicAuthHandler', 'AbstractDigestAuthHandler',
'AbstractHTTPHandler', 'BaseHandler', 'CacheFTPHandler',
'FTPHandler', 'FileHandler', 'HTTPBasicAuthHandler',
'HTTPCookieProcessor', 'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler', 'HTTPError', 'HTTPErrorProcessor',
'HTTPHandler', 'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm', 'HTTPRedirectHandler',
'HTTPSHandler', 'OpenerDirector', 'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler', 'ProxyHandler', 'Request',
'StringIO', 'URLError', 'UnknownHandler', 'addinfourl',
'build_opener', 'install_opener', 'parse_http_list',
'parse_keqv_list', 'randombytes', 'request_host', 'urlopen'),
'urllib' : (
'ContentTooShortError', 'FancyURLopener','URLopener',
'basejoin', 'ftperrors', 'getproxies',
'getproxies_environment', 'localhost', 'pathname2url',
'quote', 'quote_plus', 'splitattr', 'splithost',
'splitnport', 'splitpasswd', 'splitport', 'splitquery',
'splittag', 'splittype', 'splituser', 'splitvalue',
'thishost', 'unquote', 'unquote_plus', 'unwrap',
'url2pathname', 'urlcleanup', 'urlencode', 'urlopen',
'urlretrieve',),
'urlparse' : (
'parse_qs', 'parse_qsl', 'urldefrag', 'urljoin',
'urlparse', 'urlsplit', 'urlunparse', 'urlunsplit'),
'dbm' : (
'ndbm', 'gnu', 'dumb'),
'anydbm' : (
'error', 'open'),
'whichdb' : (
'whichdb',),
'BaseHTTPServer' : (
'BaseHTTPRequestHandler', 'HTTPServer'),
'CGIHTTPServer' : (
'CGIHTTPRequestHandler',),
'SimpleHTTPServer' : (
'SimpleHTTPRequestHandler',),
'FileDialog' : TK_BASE_NAMES + (
'FileDialog', 'LoadFileDialog', 'SaveFileDialog',
'dialogstates', 'test'),
'tkFileDialog' : (
'Directory', 'Open', 'SaveAs', '_Dialog', 'askdirectory',
'askopenfile', 'askopenfilename', 'askopenfilenames',
'askopenfiles', 'asksaveasfile', 'asksaveasfilename'),
'SimpleDialog' : TK_BASE_NAMES + (
'SimpleDialog',),
'tkSimpleDialog' : TK_BASE_NAMES + (
'askfloat', 'askinteger', 'askstring', 'Dialog'),
'SimpleXMLRPCServer' : (
'CGIXMLRPCRequestHandler', 'SimpleXMLRPCDispatcher',
'SimpleXMLRPCRequestHandler', 'SimpleXMLRPCServer',
'list_public_methods', 'remove_duplicates',
'resolve_dotted_attribute'),
'DocXMLRPCServer' : (
'DocCGIXMLRPCRequestHandler', 'DocXMLRPCRequestHandler',
'DocXMLRPCServer', 'ServerHTMLDoc','XMLRPCDocGenerator'),
}
MAPPING = { 'urllib.request' :
('urllib2', 'urllib'),
'urllib.error' :
('urllib2', 'urllib'),
'urllib.parse' :
('urllib2', 'urllib', 'urlparse'),
'dbm.__init__' :
('anydbm', 'whichdb'),
'http.server' :
('CGIHTTPServer', 'SimpleHTTPServer', 'BaseHTTPServer'),
'tkinter.filedialog' :
('tkFileDialog', 'FileDialog'),
'tkinter.simpledialog' :
('tkSimpleDialog', 'SimpleDialog'),
'xmlrpc.server' :
('DocXMLRPCServer', 'SimpleXMLRPCServer'),
}
# helps match 'http', as in 'from http.server import ...'
simple_name = "name='{name}'"
# helps match 'server', as in 'from http.server import ...'
simple_attr = "attr='{attr}'"
# helps match 'HTTPServer', as in 'from http.server import HTTPServer'
simple_using = "using='{using}'"
# helps match 'urllib.request', as in 'import urllib.request'
dotted_name = "dotted_name=dotted_name< {fmt_name} '.' {fmt_attr} >"
# helps match 'http.server', as in 'http.server.HTTPServer(...)'
power_twoname = "pow=power< {fmt_name} trailer< '.' {fmt_attr} > trailer< '.' using=any > any* >"
# helps match 'dbm.whichdb', as in 'dbm.whichdb(...)'
power_onename = "pow=power< {fmt_name} trailer< '.' using=any > any* >"
# helps match 'from http.server import HTTPServer'
# also helps match 'from http.server import HTTPServer, SimpleHTTPRequestHandler'
# also helps match 'from http.server import *'
from_import = "from_import=import_from< 'from' {modules} 'import' (import_as_name< using=any 'as' renamed=any> | in_list=import_as_names< using=any* > | using='*' | using=NAME) >"
# helps match 'import urllib.request'
name_import = "name_import=import_name< 'import' ({fmt_name} | in_list=dotted_as_names< imp_list=any* >) >"
#############
# WON'T FIX #
#############
# helps match 'import urllib.request as name'
name_import_rename = "name_import_rename=dotted_as_name< {fmt_name} 'as' renamed=any >"
# helps match 'from http import server'
from_import_rename = "from_import_rename=import_from< 'from' {fmt_name} 'import' ({fmt_attr} | import_as_name< {fmt_attr} 'as' renamed=any > | in_list=import_as_names< any* ({fmt_attr} | import_as_name< {fmt_attr} 'as' renamed=any >) any* >) >"
def all_modules_subpattern():
"""
Builds a pattern for all toplevel names
(urllib, http, etc)
"""
names_dot_attrs = [mod.split(".") for mod in MAPPING]
ret = "( " + " | ".join([dotted_name.format(fmt_name=simple_name.format(name=mod[0]),
fmt_attr=simple_attr.format(attr=mod[1])) for mod in names_dot_attrs])
ret += " | "
ret += " | ".join([simple_name.format(name=mod[0]) for mod in names_dot_attrs if mod[1] == "__init__"]) + " )"
return ret
def all_candidates(name, attr, MAPPING=MAPPING):
"""
Returns all candidate packages for the name.attr
"""
dotted = name + '.' + attr
assert dotted in MAPPING, "No matching package found."
ret = MAPPING[dotted]
if attr == '__init__':
return ret + (name,)
return ret
def new_package(name, attr, using, MAPPING=MAPPING, PY2MODULES=PY2MODULES):
"""
Returns which candidate package for name.attr provides using
"""
for candidate in all_candidates(name, attr, MAPPING):
if using in PY2MODULES[candidate]:
break
else:
candidate = None
return candidate
def build_import_pattern(mapping1, mapping2):
"""
mapping1: A dict mapping py3k modules to all possible py2k replacements
mapping2: A dict mapping py2k modules to the things they do
This builds a HUGE pattern to match all ways that things can be imported
"""
# py3k: urllib.request, py2k: ('urllib2', 'urllib')
yield from_import.format(modules=all_modules_subpattern())
for py3k, py2k in mapping1.items():
name, attr = py3k.split('.')
s_name = simple_name.format(name=name)
s_attr = simple_attr.format(attr=attr)
d_name = dotted_name.format(fmt_name=s_name, fmt_attr=s_attr)
yield name_import.format(fmt_name=d_name)
yield power_twoname.format(fmt_name=s_name, fmt_attr=s_attr)
if attr == '__init__':
yield name_import.format(fmt_name=s_name)
yield power_onename.format(fmt_name=s_name)
yield name_import_rename.format(fmt_name=d_name)
yield from_import_rename.format(fmt_name=s_name, fmt_attr=s_attr)
def name_import_replacement(name, attr):
children = [Name("import")]
for c in all_candidates(name.value, attr.value):
children.append(Name(c, prefix=" "))
children.append(Comma())
children.pop()
replacement = Node(syms.import_name, children)
return replacement
class FixImports2(fixer_base.BaseFix):
run_order = 4
PATTERN = " | \n".join(build_import_pattern(MAPPING, PY2MODULES))
def transform(self, node, results):
# The patterns dictate which of these names will be defined
name = results.get("name")
attr = results.get("attr")
if attr is None:
attr = Name("__init__")
using = results.get("using")
in_list = results.get("in_list")
imp_list = results.get("imp_list")
power = results.get("pow")
before = results.get("before")
after = results.get("after")
d_name = results.get("dotted_name")
# An import_stmt is always contained within a simple_stmt
simple_stmt = node.parent
# The parent is useful for adding new import_stmts
parent = simple_stmt.parent
idx = parent.children.index(simple_stmt)
if any((results.get("from_import_rename") is not None,
results.get("name_import_rename") is not None)):
self.cannot_convert(node, reason="ambiguity: import binds a single name")
elif using is None and not in_list:
# import urllib.request, single-name import
replacement = name_import_replacement(name, attr)
replacement.prefix = node.prefix
node.replace(replacement)
elif using is None:
# import ..., urllib.request, math, http.sever, ...
for d_name in imp_list:
if d_name.type == syms.dotted_name:
name = d_name.children[0]
attr = d_name.children[2]
elif d_name.type == token.NAME and d_name.value + ".__init__" in MAPPING:
name = d_name
attr = Name("__init__")
else:
continue
if name.value + "." + attr.value not in MAPPING:
continue
candidates = all_candidates(name.value, attr.value)
children = [Name("import")]
for c in candidates:
children.append(Name(c, prefix=" "))
children.append(Comma())
children.pop()
# Put in the new statement.
indent = indentation(simple_stmt)
next_stmt = Node(syms.simple_stmt, [Node(syms.import_name, children), Newline()])
parent.insert_child(idx+1, next_stmt)
parent.insert_child(idx+1, Leaf(token.INDENT, indent))
# Remove the old imported name
test_comma = d_name.next_sibling
if test_comma and test_comma.type == token.COMMA:
test_comma.remove()
elif test_comma is None:
test_comma = d_name.prev_sibling
if test_comma and test_comma.type == token.COMMA:
test_comma.remove()
d_name.remove()
if not in_list.children:
simple_stmt.remove()
elif in_list is not None:
##########################################################
# "from urllib.request import urlopen, urlretrieve, ..." #
# Replace one import statement with potentially many. #
##########################################################
packages = dict([(n,[]) for n in all_candidates(name.value,
attr.value)])
# Figure out what names need to be imported from what
# Add them to a dict to be parsed once we're completely done
for imported in using:
if imported.type == token.COMMA:
continue
if imported.type == syms.import_as_name:
test_name = imported.children[0].value
if len(imported.children) > 2:
# 'as' whatever
rename = imported.children[2].value
else:
rename = None
elif imported.type == token.NAME:
test_name = imported.value
rename = None
pkg = new_package(name.value, attr.value, test_name)
packages[pkg].append((test_name, rename))
# Parse the dict to create new import statements to replace this one
imports = []
for new_pkg, names in packages.items():
if not names:
# Didn't import anything from that package, move along
continue
new_names = []
for test_name, rename in names:
if rename is None:
new_names.append(Name(test_name, prefix=" "))
else:
new_names.append(ImportAsName(test_name, rename, prefix=" "))
new_names.append(Comma())
new_names.pop()
imports.append(FromImport(new_pkg, new_names))
# Replace this import statement with one of the others
replacement = imports.pop()
replacement.prefix = node.prefix
node.replace(replacement)
indent = indentation(simple_stmt)
# Add the remainder of the imports as new statements.
while imports:
next_stmt = Node(syms.simple_stmt, [imports.pop(), Newline()])
parent.insert_child(idx+1, next_stmt)
parent.insert_child(idx+1, Leaf(token.INDENT, indent))
elif using.type == token.STAR:
# from urllib.request import *
nodes = [FromImport(pkg, [Star(prefix=" ")]) for pkg in
all_candidates(name.value, attr.value)]
replacement = nodes.pop()
replacement.prefix = node.prefix
node.replace(replacement)
indent = indentation(simple_stmt)
while nodes:
next_stmt = Node(syms.simple_stmt, [nodes.pop(), Newline()])
parent.insert_child(idx+1, next_stmt)
parent.insert_child(idx+1, Leaf(token.INDENT, indent))
elif power is not None:
# urllib.request.urlopen
# Replace it with urllib2.urlopen
pkg = new_package(name.value, attr.value, using.value)
# Remove the trailer node that contains attr.
if pkg:
if attr.parent:
attr.parent.remove()
name.replace(Name(pkg, prefix=name.prefix))
elif using.type == token.NAME:
# from urllib.request import urlopen
pkg = new_package(name.value, attr.value, using.value)
if attr.value == "__init__" and pkg == name.value:
# Replacing "from abc import xyz" with "from abc import xyz"
# Just leave it alone so as not to mess with other fixers
return
else:
node.replace(FromImport(pkg, [using]))
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_imports2.py | fix_imports2.py |
"""
Fixer for "class Foo: ..." -> "class Foo(object): ..."
"""
from lib2to3 import fixer_base
from ..fixer_util import Node, Leaf, token, syms, LParen, RParen, Name
def insert_object(node, idx):
node.insert_child(idx, RParen())
node.insert_child(idx, Name("object"))
node.insert_child(idx, LParen())
class FixNewstyle(fixer_base.BaseFix):
PATTERN = "classdef< 'class' NAME colon=':' any >"
def transform(self, node, results):
colon = results["colon"]
idx = node.children.index(colon)
insert_object(node, idx)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_newstyle.py | fix_newstyle.py |
"""
Fixer for (metaclass=X) -> __metaclass__ = X
Some semantics (see PEP 3115) may be altered in the translation."""
from lib2to3 import fixer_base
from ..fixer_util import Name, syms, Node, Leaf, Newline, find_root, indentation, suitify
from lib2to3.pygram import token
def has_metaclass(parent):
results = None
for node in parent.children:
kids = node.children
if node.type == syms.argument:
if kids[0] == Leaf(token.NAME, "metaclass") and \
kids[1] == Leaf(token.EQUAL, "=") and \
kids[2]:
#Hack to avoid "class X(=):" with this case.
results = [node] + kids
break
elif node.type == syms.arglist:
# Argument list... loop through it looking for:
# Node(*, [*, Leaf(token.NAME, u"metaclass"), Leaf(token.EQUAL, u"="), Leaf(*, *)]
for child in node.children:
if results: break
if child.type == token.COMMA:
#Store the last comma, which precedes the metaclass
comma = child
elif type(child) == Node:
meta = equal = name = None
for arg in child.children:
if arg == Leaf(token.NAME, "metaclass"):
#We have the (metaclass) part
meta = arg
elif meta and arg == Leaf(token.EQUAL, "="):
#We have the (metaclass=) part
equal = arg
elif meta and equal:
#Here we go, we have (metaclass=X)
name = arg
results = (comma, meta, equal, name)
break
return results
class FixMetaclass(fixer_base.BaseFix):
PATTERN = """
classdef<any*>
"""
def transform(self, node, results):
meta_results = has_metaclass(node)
if not meta_results: return
for meta in meta_results:
meta.remove()
target = Leaf(token.NAME, "__metaclass__")
equal = Leaf(token.EQUAL, "=", prefix=" ")
# meta is the last item in what was returned by has_metaclass(): name
name = meta
name.prefix = " "
stmt_node = Node(syms.atom, [target, equal, name])
suitify(node)
for item in node.children:
if item.type == syms.suite:
for stmt in item.children:
if stmt.type == token.INDENT:
# Insert, in reverse order, the statement, a newline,
# and an indent right after the first indented line
loc = item.children.index(stmt) + 1
# Keep consistent indentation form
ident = Leaf(token.INDENT, stmt.value)
item.insert_child(loc, ident)
item.insert_child(loc, Newline())
item.insert_child(loc, stmt_node)
break
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_metaclass.py | fix_metaclass.py |
"""
Fixer for:
(a,)* *b (,c)* [,] = s
for (a,)* *b (,c)* [,] in d: ...
"""
from lib2to3 import fixer_base
from itertools import count
from ..fixer_util import Assign, Comma, Call, Newline, Name, Number, indentation, suitify, commatize, token, syms, Node, Leaf
def assignment_source(num_pre, num_post, LISTNAME, ITERNAME):
"""
Accepts num_pre and num_post, which are counts of values
before and after the starg (not including the starg)
Returns a source fit for Assign() from fixer_util
"""
children = []
pre = str(num_pre)
post = str(num_post)
# This code builds the assignment source from lib2to3 tree primitives.
# It's not very readable, but it seems like the most correct way to do it.
if num_pre > 0:
pre_part = Node(syms.power, [Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, "["), Node(syms.subscript, [Leaf(token.COLON, ":"), Number(pre)]), Leaf(token.RSQB, "]")])])
children.append(pre_part)
children.append(Leaf(token.PLUS, "+", prefix=" "))
main_part = Node(syms.power, [Leaf(token.LSQB, "[", prefix=" "), Name(LISTNAME), Node(syms.trailer, [Leaf(token.LSQB, "["), Node(syms.subscript, [Number(pre) if num_pre > 0 else Leaf(1, ""), Leaf(token.COLON, ":"), Node(syms.factor, [Leaf(token.MINUS, "-"), Number(post)]) if num_post > 0 else Leaf(1, "")]), Leaf(token.RSQB, "]"), Leaf(token.RSQB, "]")])])
children.append(main_part)
if num_post > 0:
children.append(Leaf(token.PLUS, "+", prefix=" "))
post_part = Node(syms.power, [Name(LISTNAME, prefix=" "), Node(syms.trailer, [Leaf(token.LSQB, "["), Node(syms.subscript, [Node(syms.factor, [Leaf(token.MINUS, "-"), Number(post)]), Leaf(token.COLON, ":")]), Leaf(token.RSQB, "]")])])
children.append(post_part)
source = Node(syms.arith_expr, children)
return source
class FixUnpacking(fixer_base.BaseFix):
PATTERN = """
expl=expr_stmt< testlist_star_expr<
pre=(any ',')*
star_expr< '*' name=NAME >
post=(',' any)* [','] > '=' source=any > |
impl=for_stmt< 'for' lst=exprlist<
pre=(any ',')*
star_expr< '*' name=NAME >
post=(',' any)* [','] > 'in' it=any ':' suite=any>"""
def fix_explicit_context(self, node, results):
pre, name, post, source = (results.get(n) for n in ("pre", "name", "post", "source"))
pre = [n.clone() for n in pre if n.type == token.NAME]
name.prefix = " "
post = [n.clone() for n in post if n.type == token.NAME]
target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
# to make the special-case fix for "*z, = ..." correct with the least
# amount of modification, make the left-side into a guaranteed tuple
target.append(Comma())
source.prefix = ""
setup_line = Assign(Name(self.LISTNAME), Call(Name("list"), [source.clone()]))
power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
return setup_line, power_line
def fix_implicit_context(self, node, results):
"""
Only example of the implicit context is
a for loop, so only fix that.
"""
pre, name, post, it = (results.get(n) for n in ("pre", "name", "post", "it"))
pre = [n.clone() for n in pre if n.type == token.NAME]
name.prefix = " "
post = [n.clone() for n in post if n.type == token.NAME]
target = [n.clone() for n in commatize(pre + [name.clone()] + post)]
# to make the special-case fix for "*z, = ..." correct with the least
# amount of modification, make the left-side into a guaranteed tuple
target.append(Comma())
source = it.clone()
source.prefix = ""
setup_line = Assign(Name(self.LISTNAME), Call(Name("list"), [Name(self.ITERNAME)]))
power_line = Assign(target, assignment_source(len(pre), len(post), self.LISTNAME, self.ITERNAME))
return setup_line, power_line
def transform(self, node, results):
"""
a,b,c,d,e,f,*g,h,i = range(100) changes to
_3to2list = list(range(100))
a,b,c,d,e,f,g,h,i, = _3to2list[:6] + [_3to2list[6:-2]] + _3to2list[-2:]
and
for a,b,*c,d,e in iter_of_iters: do_stuff changes to
for _3to2iter in iter_of_iters:
_3to2list = list(_3to2iter)
a,b,c,d,e, = _3to2list[:2] + [_3to2list[2:-2]] + _3to2list[-2:]
do_stuff
"""
self.LISTNAME = self.new_name("_3to2list")
self.ITERNAME = self.new_name("_3to2iter")
expl, impl = results.get("expl"), results.get("impl")
if expl is not None:
setup_line, power_line = self.fix_explicit_context(node, results)
setup_line.prefix = expl.prefix
power_line.prefix = indentation(expl.parent)
setup_line.append_child(Newline())
parent = node.parent
i = node.remove()
parent.insert_child(i, power_line)
parent.insert_child(i, setup_line)
elif impl is not None:
setup_line, power_line = self.fix_implicit_context(node, results)
suitify(node)
suite = [k for k in node.children if k.type == syms.suite][0]
setup_line.prefix = ""
power_line.prefix = suite.children[1].value
suite.children[2].prefix = indentation(suite.children[2])
suite.insert_child(2, Newline())
suite.insert_child(2, power_line)
suite.insert_child(2, Newline())
suite.insert_child(2, setup_line)
results.get("lst").replace(Name(self.ITERNAME, prefix=" "))
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_unpacking.py | fix_unpacking.py |
"""
Fixer for division: from __future__ import division if needed
"""
from lib2to3 import fixer_base
from lib3to2.fixer_util import token, future_import
def match_division(node):
"""
__future__.division redefines the meaning of a single slash for division,
so we match that and only that.
"""
slash = token.SLASH
return node.type == slash and not node.next_sibling.type == slash and \
not node.prev_sibling.type == slash
class FixDivision(fixer_base.BaseFix):
def match(self, node):
"""
Since the tree needs to be fixed once and only once if and only if it
matches, then we can start discarding matches after we make the first.
"""
return match_division(node)
def transform(self, node, results):
future_import("division", node)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_division.py | fix_division.py |
"""Fixer for 'raise E(V).with_traceback(T)' -> 'raise E, V, T'"""
from lib2to3 import fixer_base
from ..fixer_util import Comma, Node, Leaf, token, syms
class FixRaise(fixer_base.BaseFix):
PATTERN = """
raise_stmt< 'raise' (power< name=any [trailer< '(' val=any* ')' >]
[trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' >] > | any) ['from' chain=any] >"""
def transform(self, node, results):
name, val, trc = (results.get("name"), results.get("val"), results.get("trc"))
chain = results.get("chain")
if chain is not None:
self.warning(node, "explicit exception chaining is not supported in Python 2")
chain.prev_sibling.remove()
chain.remove()
if trc is not None:
val = val[0] if val else Leaf(token.NAME, "None")
val.prefix = trc.prefix = " "
kids = [Leaf(token.NAME, "raise"), name.clone(), Comma(),
val.clone(), Comma(), trc.clone()]
raise_stmt = Node(syms.raise_stmt, kids)
node.replace(raise_stmt)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_raise.py | fix_raise.py |
"""
Fixer to remove function annotations
"""
from lib2to3 import fixer_base
from lib2to3.pgen2 import token
from lib2to3.fixer_util import syms
warning_text = "Removing function annotations completely."
def param_without_annotations(node):
return node.children[0]
class FixAnnotations(fixer_base.BaseFix):
warned = False
def warn_once(self, node, reason):
if not self.warned:
self.warned = True
self.warning(node, reason=reason)
PATTERN = """
funcdef< 'def' any parameters< '(' [params=any] ')' > ['->' ret=any] ':' any* >
"""
def transform(self, node, results):
"""
This just strips annotations from the funcdef completely.
"""
params = results.get("params")
ret = results.get("ret")
if ret is not None:
assert ret.prev_sibling.type == token.RARROW, "Invalid return annotation"
self.warn_once(node, reason=warning_text)
ret.prev_sibling.remove()
ret.remove()
if params is None: return
if params.type == syms.typedargslist:
# more than one param in a typedargslist
for param in params.children:
if param.type == syms.tname:
self.warn_once(node, reason=warning_text)
param.replace(param_without_annotations(param))
elif params.type == syms.tname:
# one param
self.warn_once(node, reason=warning_text)
params.replace(param_without_annotations(params))
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_annotations.py | fix_annotations.py |
"""
Warn about features that are not present in Python 2.5, giving a message that
points to the earliest version of Python 2.x (or 3.x, if none) that supports it
"""
from .feature_base import Feature, Features
from lib2to3 import fixer_base
FEATURES = [
#(FeatureName,
# FeaturePattern,
# FeatureMinVersion,
#),
("memoryview",
"power < 'memoryview' trailer < '(' any* ')' > any* >",
"2.7",
),
("numbers",
"""import_from< 'from' 'numbers' 'import' any* > |
import_name< 'import' ('numbers' dotted_as_names< any* 'numbers' any* >) >""",
"2.6",
),
("abc",
"""import_name< 'import' ('abc' dotted_as_names< any* 'abc' any* >) > |
import_from< 'from' 'abc' 'import' any* >""",
"2.6",
),
("io",
"""import_name< 'import' ('io' dotted_as_names< any* 'io' any* >) > |
import_from< 'from' 'io' 'import' any* >""",
"2.6",
),
("bin",
"power< 'bin' trailer< '(' any* ')' > any* >",
"2.6",
),
("formatting",
"power< any trailer< '.' 'format' > trailer< '(' any* ')' > >",
"2.6",
),
("nonlocal",
"global_stmt< 'nonlocal' any* >",
"3.0",
),
("with_traceback",
"trailer< '.' 'with_traceback' >",
"3.0",
),
]
class FixFeatures(fixer_base.BaseFix):
run_order = 9 # Wait until all other fixers have run to check for these
# To avoid spamming, we only want to warn for each feature once.
features_warned = set()
# Build features from the list above
features = Features([Feature(name, pattern, version) for \
name, pattern, version in FEATURES])
PATTERN = features.PATTERN
def match(self, node):
to_ret = super(FixFeatures, self).match(node)
# We want the mapping only to tell us the node's specific information.
try:
del to_ret['node']
except Exception:
# We want it to delete the 'node' from the results
# if it's there, so we don't care if it fails for normal reasons.
pass
return to_ret
def transform(self, node, results):
for feature_name in results:
if feature_name in self.features_warned:
continue
else:
curr_feature = self.features[feature_name]
if curr_feature.version >= "3":
fail = self.cannot_convert
else:
fail = self.warning
fail(node, reason=curr_feature.message_text())
self.features_warned.add(feature_name)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_features.py | fix_features.py |
"""
Fixer for from __future__ import with_statement
"""
from lib2to3 import fixer_base
from ..fixer_util import future_import
class FixWith(fixer_base.BaseFix):
PATTERN = "with_stmt"
def transform(self, node, results):
future_import("with_statement", node)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_with.py | fix_with.py |
"""Fixer for 'g.throw(E(V).with_traceback(T))' -> 'g.throw(E, V, T)'"""
from lib2to3 import fixer_base
from lib2to3.pytree import Node, Leaf
from lib2to3.pgen2 import token
from lib2to3.fixer_util import Comma
class FixThrow(fixer_base.BaseFix):
PATTERN = """
power< any trailer< '.' 'throw' >
trailer< '(' args=power< exc=any trailer< '(' val=any* ')' >
trailer< '.' 'with_traceback' > trailer< '(' trc=any ')' > > ')' > >
"""
def transform(self, node, results):
syms = self.syms
exc, val, trc = (results["exc"], results["val"], results["trc"])
val = val[0] if val else Leaf(token.NAME, "None")
val.prefix = trc.prefix = " "
kids = [exc.clone(), Comma(), val.clone(), Comma(), trc.clone()]
args = results["args"]
args.children = kids
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_throw.py | fix_throw.py |
"""
Fixer for except E as T -> except E, T
"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Comma
class FixExcept(fixer_base.BaseFix):
PATTERN = """except_clause< 'except' any as='as' any >"""
def transform(self, node, results):
results["as"].replace(Comma())
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_except.py | fix_except.py |
#empty
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/__init__.py | __init__.py |
"""
Fixer for getfullargspec -> getargspec
"""
from lib2to3 import fixer_base
from ..fixer_util import Name
warn_msg = "some of the values returned by getfullargspec are not valid in Python 2 and have no equivalent."
class FixFullargspec(fixer_base.BaseFix):
PATTERN = "'getfullargspec'"
def transform(self, node, results):
self.warning(node, warn_msg)
return Name("getargspec", prefix=node.prefix)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_fullargspec.py | fix_fullargspec.py |
"""
Fixer for:
def something(self):
super()
->
def something(self):
super(self.__class__, self)
"""
from lib2to3 import fixer_base
from ..fixer_util import Node, Leaf, token, syms, Name, Comma, Dot
dot_class = Node(syms.trailer, [Dot(), Name("__class__")])
def get_firstparam(super_node):
parent = super_node.parent
while parent.type != syms.funcdef and parent.parent:
parent = parent.parent
if parent.type != syms.funcdef:
# super() called without arguments outside of a funcdef
return None
children = parent.children
assert len(children) > 2
params = children[2]
assert params.type == syms.parameters
if len(params.children) < 3:
# Function has no parameters, therefore super() makes no sense here...
return None
args = params.children[1]
if args.type == token.NAME:
return args.value
elif args.type == syms.typedargslist:
assert len(args.children) > 0
if args.children[0].type == token.NAME:
return args.children[0].value
else:
# Probably a '*'
return None
def insert_args(name, rparen):
parent = rparen.parent
idx = parent.children.index(rparen)
parent.insert_child(idx, Name(name, prefix=" "))
parent.insert_child(idx, Comma())
parent.insert_child(idx, Node(syms.power, [Name(name), dot_class.clone()]))
class FixSuper(fixer_base.BaseFix):
PATTERN = "power< 'super' trailer< '(' rparen=')' > any* >"
def transform(self, node, results):
param = get_firstparam(node)
if param is None:
self.cannot_convert(node, "super() with no arguments must be called inside a function that has at least one parameter")
return
rparen = results["rparen"]
insert_args(param, rparen)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_super.py | fix_super.py |
"""
Fixer for input(s) -> raw_input(s).
"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name
class FixInput(fixer_base.BaseFix):
PATTERN = """
power< name='input' trailer< '(' [any] ')' > any* >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name("raw_input", prefix=name.prefix))
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_input.py | fix_input.py |
"""
Fixer for:
str -> unicode
chr -> unichr
"spam" -> u"spam"
"""
import re
from lib2to3.pgen2 import token
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name
_mapping = {"chr": "unichr", "str": "unicode"}
_literal_re = re.compile(r"[rR]?[\'\"]")
class FixStr(fixer_base.BaseFix):
order = "pre"
run_order = 4 # Run this before bytes objects are converted to str objects
PATTERN = "STRING | 'str' | 'chr'"
def transform(self, node, results):
new = node.clone()
if node.type == token.STRING:
# Simply add u to the beginning of the literal.
if _literal_re.match(new.value):
new.value = "u" + new.value
return new
elif node.type == token.NAME:
assert new.value in _mapping
new.value = _mapping[new.value]
return new
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_str.py | fix_str.py |
"""
Fixer for:
int -> long
123 -> 123L
"""
import re
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, is_probably_builtin, Number
from lib2to3.pgen2 import token
baseMAPPING = {'b':2, 'o':8, 'x':16}
class FixInt(fixer_base.BaseFix):
explicit = True # In most cases, 3.x ints will work just like 2.x ints.
PATTERN = "'int' | NUMBER"
static_long = Name("long")
def base(self, literal):
"""Returns the base of a valid py3k numeric literal."""
literal = literal.strip()
if not literal.startswith("0") or re.match(r"0+$",literal):
return 10
elif literal[1] not in "box":
return 0
return baseMAPPING[literal[1]]
def unmatch(self, node):
"""Don't match complex numbers, floats, or longs"""
val = node.value
#For whatever reason, some ints are being matched after we fix them.
if val.endswith("L"):
return "L"
for bad in "jJ+-.":
if bad in val: return bad
def match(self, node):
return super(FixInt, self).match(node) and not self.unmatch(node)
def transform(self, node, results):
val = node.value
if node.type == token.NUMBER and self.base(val) == 10:
assert not val[-1] in "lL", "Invalid py3k literal: " + str(val)
val += "L"
return Number(val, prefix=node.prefix)
elif is_probably_builtin(node):
assert node.type == token.NAME, "Sanity check failed: " + str(val)
new = self.static_long.clone()
new.prefix = node.prefix
return new
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_int.py | fix_int.py |
"""
Fixer for:
functools.reduce(f, it) -> reduce(f, it)
from functools import reduce -> (remove this line)
"""
from lib2to3 import fixer_base
from lib2to3.fixer_util import Call
from lib2to3.pytree import Node, Leaf
from lib2to3.pgen2 import token
class FixReduce(fixer_base.BaseFix):
PATTERN = """
power< 'functools' trailer< '.' 'reduce' >
args=trailer< '(' arglist< any* > ')' > > |
imported=import_from< 'from' 'functools' 'import' 'reduce' > |
import_from< 'from' 'functools' 'import' import_as_names< any* in_list='reduce' any* > >
"""
def transform(self, node, results):
syms = self.syms
args, imported = (results.get("args"), results.get("imported"))
in_list = results.get("in_list")
if imported:
next = imported.next_sibling
prev = imported.prev_sibling
parent = imported.parent
if next and next.type == token.SEMI:
next.remove()
next = imported.next_sibling
imported.remove()
if next is not None and next.type == token.NEWLINE:
# nothing after from_import on the line
if prev is not None:
if prev.type == token.SEMI:
prev.remove()
elif parent.next_sibling is not None:
# nothing before from_import either
parent.next_sibling.prefix = imported.prefix
parent.remove()
elif args:
args = args.clone()
prefix = node.prefix
return Node(syms.power, [Leaf(token.NAME, "reduce"), args],
prefix=prefix)
elif in_list:
next = in_list.next_sibling
if next is not None:
if next.type == token.COMMA:
next.remove()
else:
prev = in_list.prev_sibling
if prev is not None:
if prev.type == token.COMMA:
prev.remove()
in_list.remove()
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_reduce.py | fix_reduce.py |
"""
Fixer for:
it.__next__() -> it.next().
next(it) -> it.next().
"""
from lib2to3.pgen2 import token
from lib2to3.pygram import python_symbols as syms
from lib2to3 import fixer_base
from lib2to3.fixer_util import Name, Call, find_binding, Attr
bind_warning = "Calls to builtin next() possibly shadowed by global binding"
class FixNext(fixer_base.BaseFix):
PATTERN = """
power< base=any+ trailer< '.' attr='__next__' > any* >
|
power< head='next' trailer< '(' arg=any ')' > any* >
|
classdef< 'class' base=any+ ':'
suite< any*
funcdef< 'def'
attr='__next__'
parameters< '(' NAME ')' > any+ >
any* > >
"""
def transform(self, node, results):
assert results
base = results.get("base")
attr = results.get("attr")
head = results.get("head")
arg_ = results.get("arg")
if arg_:
arg = arg_.clone()
head.replace(Attr(Name(str(arg),prefix=head.prefix),
Name("next")))
arg_.remove()
elif base:
attr.replace(Name("next", prefix=attr.prefix))
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/fixes/fix_next.py | fix_next.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_annotations(lib3to2FixerTestCase):
fixer = "annotations"
def test_return_annotations_alone(self):
b = "def foo() -> 'bar': pass"
a = "def foo(): pass"
self.check(b, a, ignore_warnings=True)
b = """
def foo() -> "bar":
print "baz"
print "what's next, again?"
"""
a = """
def foo():
print "baz"
print "what's next, again?"
"""
self.check(b, a, ignore_warnings=True)
def test_single_param_annotations(self):
b = "def foo(bar:'baz'): pass"
a = "def foo(bar): pass"
self.check(b, a, ignore_warnings=True)
b = """
def foo(bar:"baz"="spam"):
print "what's next, again?"
print "whatever."
"""
a = """
def foo(bar="spam"):
print "what's next, again?"
print "whatever."
"""
self.check(b, a, ignore_warnings=True)
def test_multiple_param_annotations(self):
b = "def foo(bar:'spam'=False, baz:'eggs'=True, ham:False='spaghetti'): pass"
a = "def foo(bar=False, baz=True, ham='spaghetti'): pass"
self.check(b, a, ignore_warnings=True)
b = """
def foo(bar:"spam"=False, baz:"eggs"=True, ham:False="spam"):
print "this is filler, just doing a suite"
print "suites require multiple lines."
"""
a = """
def foo(bar=False, baz=True, ham="spam"):
print "this is filler, just doing a suite"
print "suites require multiple lines."
"""
self.check(b, a, ignore_warnings=True)
def test_mixed_annotations(self):
b = "def foo(bar=False, baz:'eggs'=True, ham:False='spaghetti') -> 'zombies': pass"
a = "def foo(bar=False, baz=True, ham='spaghetti'): pass"
self.check(b, a, ignore_warnings=True)
b = """
def foo(bar:"spam"=False, baz=True, ham:False="spam") -> 'air':
print "this is filler, just doing a suite"
print "suites require multiple lines."
"""
a = """
def foo(bar=False, baz=True, ham="spam"):
print "this is filler, just doing a suite"
print "suites require multiple lines."
"""
self.check(b, a, ignore_warnings=True)
b = "def foo(bar) -> 'brains': pass"
a = "def foo(bar): pass"
self.check(b, a, ignore_warnings=True)
def test_unchanged(self):
s = "def foo(): pass"
self.unchanged(s)
s = """
def foo():
pass
pass
"""
self.unchanged(s)
s = """
def foo(bar=baz):
pass
pass
"""
self.unchanged(s)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_annotations.py | test_annotations.py |
#!/usr/bin/env python3.1
"""
Runs all tests in the same directory named test_*.py
"""
import os.path
import os
from lib2to3 import pygram
from lib2to3.tests import support
from lib2to3.tests.test_fixers import FixerTestCase
class lib3to2FixerTestCase(FixerTestCase):
def setUp(self, fix_list=None, fixer_pkg="lib3to2"):
super(lib3to2FixerTestCase, self).setUp(fixer_pkg=fixer_pkg)
self.refactor.driver.grammar = pygram.python_grammar_no_print_statement
if __name__ == "__main__":
for module in os.listdir(os.path.split(__file__)[0]):
if module.endswith('.py') and module.startswith('test_'):
module = os.path.split(module)[1][:-3]
if module != os.path.split(__file__)[1][:-3]:
_module = __import__(module)
support.run_all_tests(_module)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_all_fixers.py | test_all_fixers.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_dctsetcomp(lib3to2FixerTestCase):
fixer = "dctsetcomp"
def test_dictcomp_straightforward(self):
b = "{key:val for (key, val) in tuple_of_stuff}"
a = "dict((key, val) for (key, val) in tuple_of_stuff)"
self.check(b, a)
def test_dictcomp_nestedstuff_noif(self):
b = "{hashlify(spam):valuate(ham).whatsthis(eggs) for \
(spam, ham, eggs) in spam_iterator}"
a = "dict((hashlify(spam), valuate(ham).whatsthis(eggs)) for \
(spam, ham, eggs) in spam_iterator)"
self.check(b, a)
def test_dictcomp_nestedstuff_withif(self):
b = "{moo:(lambda new: None)(cow) for (moo, cow) in \
farm_animal['cow'] if has_milk()}"
a = "dict((moo, (lambda new: None)(cow)) for (moo, cow) in \
farm_animal['cow'] if has_milk())"
self.check(b, a)
def test_setcomps(self):
"""
setcomp fixer should keep everything inside the same
and only replace the {} with a set() call on a gencomp
"""
tests = []
tests.append("milk.price for milk in find_milk(store)")
tests.append("compute_nth_prime(generate_complicated_thing(\
n.value(hashlifier))) for n in my_range_func(1, (how_far+offset))")
tests.append("compute_nth_prime(generate_complicated_thing(\
n.value(hashlifier))) for n in my_range_func(1, (how_far+offset))\
if a==b.spam()")
for comp in tests:
b = "{%s}" % comp
a = "set(%s)" % comp
self.check(b, a)
def test_prefixes(self):
b = "spam = {foo for foo in bar}"
a = "spam = set(foo for foo in bar)"
self.check(b, a)
b = "spam = {foo:bar for (foo, bar) in baz}"
a = "spam = dict((foo, bar) for (foo, bar) in baz)"
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_dctsetcomp.py | test_dctsetcomp.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_collections(lib3to2FixerTestCase):
fixer = "collections"
def test_from_UserDict(self):
b = """
from collections import UserDict"""
a = """
from UserDict import UserDict"""
self.check(b, a)
def test_from_UserList(self):
b = """
from collections import UserList"""
a = """
from UserList import UserList"""
self.check(b, a)
def test_from_UserString(self):
b = """
from collections import UserString"""
a = """
from UserString import UserString"""
self.check(b, a)
def test_using_UserDict(self):
b = """
class Scapegoat(collections.UserDict):
pass"""
a = """import UserDict
class Scapegoat(UserDict.UserDict):
pass"""
self.check(b, a)
def test_using_UserList(self):
b = """
class Scapegoat(collections.UserList):
pass"""
a = """import UserList
class Scapegoat(UserList.UserList):
pass"""
self.check(b, a)
def test_using_UserString(self):
b = """
class Scapegoat(collections.UserString):
pass"""
a = """import UserString
class Scapegoat(UserString.UserString):
pass"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_collections.py | test_collections.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_unpacking(lib3to2FixerTestCase):
fixer = 'unpacking'
def test_unchanged(self):
s = "def f(*args): pass"
self.unchanged(s)
s = "for i in range(s): pass"
self.unchanged(s)
s = "a, b, c = range(100)"
self.unchanged(s)
def test_forloop(self):
b = """
for a, b, c, *d, e in two_dim_array: pass"""
a = """
for _3to2iter in two_dim_array:
_3to2list = list(_3to2iter)
a, b, c, d, e, = _3to2list[:3] + [_3to2list[3:-1]] + _3to2list[-1:]
pass"""
self.check(b, a)
b = """
for a, b, *c in some_thing:
do_stuff"""
a = """
for _3to2iter in some_thing:
_3to2list = list(_3to2iter)
a, b, c, = _3to2list[:2] + [_3to2list[2:]]
do_stuff"""
self.check(b, a)
b = """
for *a, b, c, d, e, f, g in some_thing:
pass"""
a = """
for _3to2iter in some_thing:
_3to2list = list(_3to2iter)
a, b, c, d, e, f, g, = [_3to2list[:-6]] + _3to2list[-6:]
pass"""
self.check(b, a)
def test_assignment(self):
b = """
a, *b, c = range(100)"""
a = """
_3to2list = list(range(100))
a, b, c, = _3to2list[:1] + [_3to2list[1:-1]] + _3to2list[-1:]"""
self.check(b, a)
b = """
a, b, c, d, *e, f, g = letters"""
a = """
_3to2list = list(letters)
a, b, c, d, e, f, g, = _3to2list[:4] + [_3to2list[4:-2]] + _3to2list[-2:]"""
self.check(b, a)
b = """
*e, f, g = letters"""
a = """
_3to2list = list(letters)
e, f, g, = [_3to2list[:-2]] + _3to2list[-2:]"""
self.check(b, a)
b = """
a, b, c, d, *e = stuff"""
a = """
_3to2list = list(stuff)
a, b, c, d, e, = _3to2list[:4] + [_3to2list[4:]]"""
self.check(b, a)
b = """
*z, = stuff"""
a = """
_3to2list = list(stuff)
z, = [_3to2list[:]]"""
self.check(b, a)
b = """
while True:
a, *b, c = stuff
other_stuff = make_more_stuff(a, b, c)"""
a = """
while True:
_3to2list = list(stuff)
a, b, c, = _3to2list[:1] + [_3to2list[1:-1]] + _3to2list[-1:]
other_stuff = make_more_stuff(a, b, c)"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_unpacking.py | test_unpacking.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_printfunction(lib3to2FixerTestCase):
fixer = "printfunction"
def test_generic(self):
b = """print()"""
a = """from __future__ import print_function\nprint()"""
self.check(b,a)
def test_literal(self):
b = """print('spam')"""
a = """from __future__ import print_function\nprint('spam')"""
self.check(b,a)
def test_not_builtin_unchanged(self):
s = "this.shouldnt.be.changed.because.it.isnt.builtin.print()"
self.unchanged(s)
#XXX: Quoting this differently than triple-quotes, because with newline
#XXX: setting, I can't quite get the triple-quoted versions to line up.
def test_arbitrary_printing(self):
b = "import dinosaur.skull\nimport sys\nprint"\
"(skull.jaw, skull.jaw.biteforce, file=sys.stderr)"
a = "from __future__ import print_function\n"\
"import dinosaur.skull\nimport sys\nprint"\
"(skull.jaw, skull.jaw.biteforce, file=sys.stderr)"
self.check(b, a)
def test_long_arglist(self):
b = "print(spam, spam, spam, spam, spam, baked_beans, spam, spam,"\
"spam, spam, sep=', spam, ', end=wonderful_spam)\nprint()"
a = "from __future__ import print_function\n"\
"print(spam, spam, spam, spam, spam, baked_beans, spam, spam,"\
"spam, spam, sep=', spam, ', end=wonderful_spam)\nprint()"
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_printfunction.py | test_printfunction.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_memoryview(lib3to2FixerTestCase):
fixer = "memoryview"
def test_simple(self):
b = """x = memoryview(y)"""
a = """x = buffer(y)"""
self.check(b, a)
def test_slicing(self):
b = """x = memoryview(y)[1:4]"""
a = """x = buffer(y)[1:4]"""
self.check(b, a)
def test_prefix_preservation(self):
b = """x = memoryview( y )[1:4]"""
a = """x = buffer( y )[1:4]"""
self.check(b, a)
def test_nested(self):
b = """x = list(memoryview(y)[1:4])"""
a = """x = list(buffer(y)[1:4])"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_memoryview.py | test_memoryview.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_open(lib3to2FixerTestCase):
fixer = "open"
def test_imports(self):
b = """new_file = open("some_filename", newline="\\r")"""
a = """from io import open\nnew_file = open("some_filename", newline="\\r")"""
self.check(b, a)
def test_doesnt_import(self):
s = """new_file = nothing.open("some_filename")"""
self.unchanged(s)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_open.py | test_open.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_throw(lib3to2FixerTestCase):
fixer = 'throw'
def test_unchanged(self):
"""
Due to g.throw(E(V)) being valid in 2.5, this fixer fortunately doesn't
need to touch code that constructs exception objects without explicit
tracebacks.
"""
s = """g.throw(E(V))"""
self.unchanged(s)
s = """omg.throw(E("What?"))"""
self.unchanged(s)
def test_what_doesnt_work(self):
"""
These tests should fail, but don't. TODO: Uncomment successfully.
One potential way of making these work is a separate fix_exceptions
with a lower run order than fix_throw, to communicate to fix_throw how
to sort out that third argument.
These items are currently outside the scope of 3to2.
"""
b = """
E = BaseException(V).with_traceback(T)
gen.throw(E)
"""
#a = """
#E = BaseException(V)
#gen.throw(E, V, T)
#"""
#self.check(b, a)
self.unchanged(b)
b = """
E = BaseException(V)
E.__traceback__ = S
E.__traceback__ = T
gen.throw(E)
"""
#a = """
#E = BaseException(V)
#gen.throw(E, V, T)
#self.check(b, a)
self.unchanged(b)
def test_traceback(self):
"""
This stuff currently works, and is the opposite counterpart to the
2to3 version of fix_throw.
"""
b = """myGen.throw(E(V).with_traceback(T))"""
a = """myGen.throw(E, V, T)"""
self.check(b, a)
b = """fling.throw(E().with_traceback(T))"""
a = """fling.throw(E, None, T)"""
self.check(b, a)
b = """myVar.throw(E("Sorry, you cannot do that.").with_traceback(T))"""
a = """myVar.throw(E, "Sorry, you cannot do that.", T)"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_throw.py | test_throw.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_classdecorator(lib3to2FixerTestCase):
fixer = "classdecorator"
def test_basic_functionality(self):
b = """
@decor
class decorated(object):
pass"""
a = """
class decorated(object):
pass
decorated = decor(decorated)"""
self.check(b, a)
def test_whitespace(self):
b = """
@decor
class decorated(object):
pass
print("hello, there!")"""
a = """
class decorated(object):
pass
decorated = decor(decorated)
print("hello, there!")"""
self.check(b, a)
def test_chained(self):
b = """
@f1
@f2
@f3
class wow(object):
do_cool_stuff_here()"""
a = """
class wow(object):
do_cool_stuff_here()
wow = f1(f2(f3(wow)))"""
self.check(b, a)
def test_dots_and_parens(self):
b = """
@should_work.with_dots(and_parens)
@dotted.name
@with_args(in_parens)
class awesome(object):
inconsequential_stuff()"""
a = """
class awesome(object):
inconsequential_stuff()
awesome = should_work.with_dots(and_parens)(dotted.name(with_args(in_parens)(awesome)))"""
self.check(b, a)
def test_indentation(self):
b = """
if 1:
if 2:
if 3:
@something
@something_else
class foo(bar):
do_stuff()
elif 4:
pass"""
a = """
if 1:
if 2:
if 3:
class foo(bar):
do_stuff()
foo = something(something_else(foo))
elif 4:
pass"""
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_classdecorator.py | test_classdecorator.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_intern(lib3to2FixerTestCase):
fixer = "intern"
#XXX: Does not remove unused "import sys" lines.
def test_prefix_preservation(self):
b = """import sys\nx = sys.intern( a )"""
a = """import sys\nx = intern( a )"""
self.check(b, a)
b = """import sys\ny = sys.intern("b" # test
)"""
a = """import sys\ny = intern("b" # test
)"""
self.check(b, a)
b = """import sys\nz = sys.intern(a+b+c.d, )"""
a = """import sys\nz = intern(a+b+c.d, )"""
self.check(b, a)
def test(self):
b = """from sys import intern\nx = intern(a)"""
a = """\nx = intern(a)"""
self.check(b, a)
b = """import sys\nz = sys.intern(a+b+c.d,)"""
a = """import sys\nz = intern(a+b+c.d,)"""
self.check(b, a)
b = """import sys\nsys.intern("y%s" % 5).replace("y", "")"""
a = """import sys\nintern("y%s" % 5).replace("y", "")"""
self.check(b, a)
# These should not be refactored
def test_multimports(self):
b = """from sys import intern, path"""
a = """from sys import path"""
self.check(b, a)
b = """from sys import path, intern"""
a = """from sys import path"""
self.check(b, a)
b = """from sys import argv, intern, path"""
a = """from sys import argv, path"""
self.check(b, a)
def test_unchanged(self):
s = """intern(a=1)"""
self.unchanged(s)
s = """intern(f, g)"""
self.unchanged(s)
s = """intern(*h)"""
self.unchanged(s)
s = """intern(**i)"""
self.unchanged(s)
s = """intern()"""
self.unchanged(s)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_intern.py | test_intern.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_next(lib3to2FixerTestCase):
fixer = "next"
def test_1(self):
b = """next(it)"""
a = """it.next()"""
self.check(b, a)
def test_2(self):
b = """next(a.b.c.d)"""
a = """a.b.c.d.next()"""
self.check(b, a)
def test_3(self):
b = """next((a + b))"""
a = """(a + b).next()"""
self.check(b, a)
def test_4(self):
b = """next(a())"""
a = """a().next()"""
self.check(b, a)
def test_5(self):
b = """next(a()) + b"""
a = """a().next() + b"""
self.check(b, a)
def test_6(self):
b = """c( next(a()) + b)"""
a = """c( a().next() + b)"""
self.check(b, a)
def test_prefix_preservation_1(self):
b = """
for a in b:
foo(a)
next(a)
"""
a = """
for a in b:
foo(a)
a.next()
"""
self.check(b, a)
def test_prefix_preservation_2(self):
b = """
for a in b:
foo(a) # abc
# def
next(a)
"""
a = """
for a in b:
foo(a) # abc
# def
a.next()
"""
self.check(b, a)
def test_prefix_preservation_3(self):
b = """
next = 5
for a in b:
foo(a)
a.__next__()
"""
a = """
next = 5
for a in b:
foo(a)
a.next()
"""
self.check(b, a)
def test_prefix_preservation_4(self):
b = """
next = 5
for a in b:
foo(a) # abc
# def
a.__next__()
"""
a = """
next = 5
for a in b:
foo(a) # abc
# def
a.next()
"""
self.check(b, a)
def test_prefix_preservation_5(self):
b = """
next = 5
for a in b:
foo(foo(a), # abc
a.__next__())
"""
a = """
next = 5
for a in b:
foo(foo(a), # abc
a.next())
"""
self.check(b, a)
def test_prefix_preservation_6(self):
b = """
for a in b:
foo(foo(a), # abc
next(a))
"""
a = """
for a in b:
foo(foo(a), # abc
a.next())
"""
self.check(b, a)
def test_method_1(self):
b = """
class A:
def __next__(self):
pass
"""
a = """
class A:
def next(self):
pass
"""
self.check(b, a)
def test_method_2(self):
b = """
class A(object):
def __next__(self):
pass
"""
a = """
class A(object):
def next(self):
pass
"""
self.check(b, a)
def test_method_3(self):
b = """
class A:
def __next__(x):
pass
"""
a = """
class A:
def next(x):
pass
"""
self.check(b, a)
def test_method_4(self):
b = """
class A:
def __init__(self, foo):
self.foo = foo
def __next__(self):
pass
def __iter__(self):
return self
"""
a = """
class A:
def __init__(self, foo):
self.foo = foo
def next(self):
pass
def __iter__(self):
return self
"""
self.check(b, a)
def test_noncall_access_1(self):
b = """gnext = g.__next__"""
a = """gnext = g.next"""
self.check(b, a)
def test_noncall_access_2(self):
b = """f(g.__next__ + 5)"""
a = """f(g.next + 5)"""
self.check(b, a)
def test_noncall_access_3(self):
b = """f(g().__next__ + 5)"""
a = """f(g().next + 5)"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_next.py | test_next.py |
from test_all_fixers import lib3to2FixerTestCase
from itertools import count
class Test_division(lib3to2FixerTestCase):
fixer = "division"
counter = count(1)
divisions = [("1", "2"),
("spam","eggs"),
("lambda a: a(4)", "my_foot(your_face)"),
("temp(bob)", "4"),
("29.4", "green()")]
for top,bottom in divisions:
exec("def test_%d(self):\n b = \"%s/%s\"\n a = \"from __future__ import division\\n%s/%s\"\n self.check(b, a)" % (next(counter), top, bottom, top, bottom))
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_division.py | test_division.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_raise(lib3to2FixerTestCase):
fixer = 'raise'
def test_unchanged(self):
"""
Due to raise E(V) being valid in 2.5, this fixer fortunately doesn't
need to touch code that constructs exception objects without explicit
tracebacks.
"""
s = """raise E(V)"""
self.unchanged(s)
s = """raise E("What?")"""
self.unchanged(s)
s = """raise"""
self.unchanged(s)
def test_what_doesnt_work(self):
"""
These tests should fail, but don't. TODO: Uncomment successfully.
One potential way of making these work is a separate fix_exceptions
with a lower run order than fix_raise, to communicate to fix_raise how
to sort out that third argument.
These items are currently outside the scope of 3to2.
"""
b = """
E = BaseException(V).with_traceback(T)
raise E
"""
#a = """
#E = BaseException(V)
#raise E, V, T
#"""
#self.check(b, a)
self.unchanged(b)
b = """
E = BaseException(V)
E.__traceback__ = S
E.__traceback__ = T
raise E
"""
#a = """
#E = BaseException(V)
#raise E, V, T
#self.check(b, a)
self.unchanged(b)
def test_traceback(self):
"""
This stuff currently works, and is the opposite counterpart to the
2to3 version of fix_raise.
"""
b = """raise E(V).with_traceback(T)"""
a = """raise E, V, T"""
self.check(b, a)
b = """raise E().with_traceback(T)"""
a = """raise E, None, T"""
self.check(b, a)
b = """raise E("Sorry, you cannot do that.").with_traceback(T)"""
a = """raise E, "Sorry, you cannot do that.", T"""
self.check(b, a)
def test_chain(self):
b = "raise E(V).with_traceback(t) from exc"
a = "raise E, V, t"
self.check(b, a, ignore_warnings=True)
b = "raise E(V) from exc"
a = "raise E(V)"
self.check(b, a, ignore_warnings=True)
b = "raise eBob.exception from exc"
a = "raise eBob.exception"
self.check(b, a, ignore_warnings=True)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_raise.py | test_raise.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_fullargspec(lib3to2FixerTestCase):
fixer = "fullargspec"
def test_import(self):
b = "from inspect import blah, blah, getfullargspec, blah, blah"
a = "from inspect import blah, blah, getargspec, blah, blah"
self.warns(b, a, "some of the values returned by getfullargspec are not valid in Python 2 and have no equivalent.")
def test_usage(self):
b = "argspec = inspect.getfullargspec(func)"
a = "argspec = inspect.getargspec(func)"
self.warns(b, a, "some of the values returned by getfullargspec are not valid in Python 2 and have no equivalent.")
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_fullargspec.py | test_fullargspec.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_range(lib3to2FixerTestCase):
fixer = "range"
def test_notbuiltin_list(self):
b = "x.list(range(10))"
a = "x.list(xrange(10))"
self.check(b, a)
def test_prefix_preservation(self):
b = """x = range( 10 )"""
a = """x = xrange( 10 )"""
self.check(b, a)
b = """x = range( 1 , 10 )"""
a = """x = xrange( 1 , 10 )"""
self.check(b, a)
b = """x = range( 0 , 10 , 2 )"""
a = """x = xrange( 0 , 10 , 2 )"""
self.check(b, a)
def test_single_arg(self):
b = """x = range(10)"""
a = """x = xrange(10)"""
self.check(b, a)
def test_two_args(self):
b = """x = range(1, 10)"""
a = """x = xrange(1, 10)"""
self.check(b, a)
def test_three_args(self):
b = """x = range(0, 10, 2)"""
a = """x = xrange(0, 10, 2)"""
self.check(b, a)
def test_wrapped_in_list(self):
b = """x = list(range(10, 3, 9))"""
a = """x = range(10, 3, 9)"""
self.check(b, a)
b = """x = foo(list(range(10, 3, 9)))"""
a = """x = foo(range(10, 3, 9))"""
self.check(b, a)
b = """x = list(range(10, 3, 9)) + [4]"""
a = """x = range(10, 3, 9) + [4]"""
self.check(b, a)
b = """x = list(range(10))[::-1]"""
a = """x = range(10)[::-1]"""
self.check(b, a)
b = """x = list(range(10)) [3]"""
a = """x = range(10) [3]"""
self.check(b, a)
def test_range_in_for(self):
b = """for i in range(10):\n j=i"""
a = """for i in xrange(10):\n j=i"""
self.check(b, a)
b = """[i for i in range(10)]"""
a = """[i for i in xrange(10)]"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_range.py | test_range.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_str(lib3to2FixerTestCase):
fixer = "str"
def test_str_call(self):
b = """str(x, y, z)"""
a = """unicode(x, y, z)"""
self.check(b, a)
def test_chr_call(self):
b = """chr(a, t, m)"""
a = """unichr(a, t, m)"""
self.check(b, a)
def test_str_literal_1(self):
b = '''"x"'''
a = '''u"x"'''
self.check(b, a)
def test_str_literal_2(self):
b = """r'x'"""
a = """ur'x'"""
self.check(b, a)
def test_str_literal_3(self):
b = """R'''x'''"""
a = """uR'''x'''"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_str.py | test_str.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_numliterals(lib3to2FixerTestCase):
fixer = "numliterals"
def test_octal_1(self):
b = """0o755"""
a = """0755"""
self.check(b, a)
def test_octal_2(self):
b = """0o777"""
a = """0777"""
self.check(b, a)
def test_bin_1(self):
b = """0b10010110"""
a = """__builtins__.long("10010110", 2)"""
self.check(b, a)
def test_bin_2(self):
b = """spam(0b1101011010110)"""
a = """spam(__builtins__.long("1101011010110", 2))"""
self.check(b, a)
def test_comments_and_spacing_2(self):
b = """b = 0o755 # spam"""
a = """b = 0755 # spam"""
self.check(b, a)
def test_unchanged_str(self):
s = """'0x1400'"""
self.unchanged(s)
s = """'0b011000'"""
self.unchanged(s)
s = """'0o755'"""
self.unchanged(s)
def test_unchanged_other(self):
s = """5.0"""
self.unchanged(s)
s = """5.0e10"""
self.unchanged(s)
s = """5.4 + 4.9j"""
self.unchanged(s)
s = """4j"""
self.unchanged(s)
s = """4.4j"""
self.unchanged(s)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_numliterals.py | test_numliterals.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_newstyle(lib3to2FixerTestCase):
fixer = "newstyle"
def test_oneline(self):
b = """class Foo: pass"""
a = """class Foo(object): pass"""
self.check(b, a)
def test_suite(self):
b = """
class Foo:
do_stuff()"""
a = """
class Foo(object):
do_stuff()"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_newstyle.py | test_newstyle.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_reduce(lib3to2FixerTestCase):
fixer = "reduce"
def test_functools_import(self):
b = """
from functools import reduce
reduce(f, it)"""
a = """
reduce(f, it)"""
self.check(b, a)
b = """
do_other_stuff; from functools import reduce
reduce(f, it)"""
a = """
do_other_stuff
reduce(f, it)"""
self.check(b, a)
b = """
do_other_stuff; from functools import reduce; do_more_stuff
reduce(f, it)"""
a = """
do_other_stuff; do_more_stuff
reduce(f, it)"""
self.check(b, a)
def test_functools_reduce(self):
b = """
import functools
functools.reduce(spam, ['spam', 'spam', 'baked beans', 'spam'])
"""
a = """
import functools
reduce(spam, ['spam', 'spam', 'baked beans', 'spam'])
"""
self.check(b, a)
def test_prefix(self):
b = """
a = functools.reduce( self.thing, self.children , f( 3 ))
"""
a = """
a = reduce( self.thing, self.children , f( 3 ))
"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_reduce.py | test_reduce.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_imports(lib3to2FixerTestCase):
fixer = "imports"
def test_various_unchanged(self):
# Enclosed in a string
s = "'import queue'"
self.unchanged(s)
# Never was imported
s = "print(queue)"
self.unchanged(s)
def test_all_nodotted_names_solo(self):
b = "import configparser"
a = "import ConfigParser"
self.check(b, a)
b = "from winreg import *"
a = "from _winreg import *"
self.check(b, a)
b = "import copyreg"
a = "import copy_reg"
self.check(b, a)
b = "import queue"
a = "import Queue"
self.check(b, a)
b = "import socketserver"
a = "import SocketServer"
self.check(b, a)
b = "import _markupbase"
a = "import markupbase"
self.check(b, a)
b = "import builtins"
a = "import __builtin__"
self.check(b, a)
def test_nodotted_names_duo(self):
b = "import configparser, copyreg"
a = "import ConfigParser, copy_reg"
self.check(b, a)
b = "import _markupbase, queue as bob"
a = "import markupbase, Queue as bob"
self.check(b, a)
b = "import socketserver, builtins"
a = "import SocketServer, __builtin__"
self.check(b, a)
def test_nodotted_names_quad(self):
b = "import configparser, winreg, socketserver, _markupbase"
a = "import ConfigParser, _winreg, SocketServer, markupbase"
self.check(b, a)
b = "import queue, math, _markupbase, copyreg"
a = "import Queue, math, markupbase, copy_reg"
self.check(b, a)
def test_all_dotted_names_solo(self):
b = "import dbm.bsd as bsd"
a = "import dbhash as bsd"
self.check(b, a)
b = "import dbm.ndbm"
a = "import dbm"
self.check(b, a)
b = "import dbm.dumb"
a = "import dumbdbm"
self.check(b, a)
b = "from dbm import gnu"
a = "import gdbm as gnu"
self.check(b, a)
b = "import html.parser"
a = "import HTMLParser"
self.check(b, a)
b = "import html.entities"
a = "import htmlentitydefs"
self.check(b, a)
b = "from http import client"
a = "import httplib as client"
self.check(b, a)
b = "import http.cookies"
a = "import Cookie"
self.check(b, a)
b = "import http.cookiejar"
a = "import cookielib"
self.check(b, a)
b = "import tkinter.dialog"
a = "import Dialog"
self.check(b, a)
b = "import tkinter._fix"
a = "import FixTk"
self.check(b, a)
b = "import tkinter.scrolledtext"
a = "import ScrolledText"
self.check(b, a)
b = "import tkinter.tix"
a = "import Tix"
self.check(b, a)
b = "import tkinter.constants"
a = "import Tkconstants"
self.check(b, a)
b = "import tkinter.dnd"
a = "import Tkdnd"
self.check(b, a)
b = "import tkinter.__init__"
a = "import Tkinter"
self.check(b, a)
#TODO: Make this work (see the fix_imports)
#b = "import tkinter"
#a = "import Tkinter"
#self.check(b, a)
b = "import tkinter.colorchooser"
a = "import tkColorChooser"
self.check(b, a)
b = "import tkinter.commondialog"
a = "import tkCommonDialog"
self.check(b, a)
b = "from tkinter.font import *"
a = "from tkFont import *"
self.check(b, a)
b = "import tkinter.messagebox"
a = "import tkMessageBox"
self.check(b, a)
b = "import tkinter.turtle"
a = "import turtle"
self.check(b, a)
b = "import urllib.robotparser"
a = "import robotparser"
self.check(b, a)
b = "import test.support"
a = "import test.test_support"
self.check(b, a)
b = "from test import support"
a = "from test import test_support as support"
self.check(b, a)
b = "import xmlrpc.client"
a = "import xmlrpclib"
self.check(b, a)
b = "from test import support as spam, not_support as not_spam"
a = "from test import test_support as spam, not_support as not_spam"
self.check(b, a)
def test_dotted_names_duo(self):
b = "import tkinter.font, dbm.bsd"
a = "import tkFont, dbhash"
self.check(b, a)
b = "import test.support, http.cookies"
a = "import test.test_support, Cookie"
self.check(b, a)
def test_from_import(self):
b = "from test.support import things"
a = "from test.test_support import things"
self.check(b, a)
b = "from builtins import open"
a = "from __builtin__ import open"
self.check(b, a)
def test_dotted_names_quad(self):
b = "import html.parser as spam, math, tkinter.__init__, dbm.gnu #comment!"
a = "import HTMLParser as spam, math, Tkinter, gdbm #comment!"
self.check(b, a)
b = "import math, tkinter.dnd, dbm.ndbm as one, dbm.ndbm as two, urllib"
a = "import math, Tkdnd, dbm as one, dbm as two, urllib"
self.check(b, a)
def test_usage(self):
b = """
import queue as james
james.do_stuff()"""
a = """
import Queue as james
james.do_stuff()"""
self.check(b, a)
b = """
import queue
queue.do_stuff()"""
a = """
import Queue
Queue.do_stuff()"""
self.check(b, a)
b = """
import dbm.gnu
dbm.gnu.open('generic_file')"""
a = """
import gdbm
gdbm.open('generic_file')"""
self.check(b, a)
b = """
import tkinter.dialog, tkinter.colorchooser
tkinter = tkinter.dialog(tkinter.colorchooser("Just messing around"))
tkinter.test_should_work = True
tkinter.dialog.dont.code.like.this = True"""
a = """
import Dialog, tkColorChooser
tkinter = Dialog(tkColorChooser("Just messing around"))
tkinter.test_should_work = True
Dialog.dont.code.like.this = True"""
self.check(b, a)
b = """
open = bob
import builtins
myOpen = builtins.open"""
a = """
open = bob
import __builtin__
myOpen = __builtin__.open"""
self.check(b, a)
def test_bare_usage(self):
b = """
import builtins
hasattr(builtins, "quit")"""
a = """
import __builtin__
hasattr(__builtin__, "quit")"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_imports.py | test_imports.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_bytes(lib3to2FixerTestCase):
fixer = "bytes"
def test_bytes_call_1(self):
b = """bytes(x)"""
a = """str(x)"""
self.check(b, a)
def test_bytes_call_2(self):
b = """a = bytes(x) + b"florist" """
a = """a = str(x) + "florist" """
self.check(b, a)
def test_bytes_call_noargs(self):
b = """bytes()"""
a = """str()"""
self.check(b, a)
def test_bytes_call_args_1(self):
b = """bytes(x, y, z)"""
a = """str(x).encode(y, z)"""
self.check(b, a)
def test_bytes_call_args_2(self):
b = """bytes(encoding="utf-8", source="dinosaur", errors="dont-care")"""
a = """str("dinosaur").encode("utf-8", "dont-care")"""
self.check(b, a)
def test_bytes_literal_1(self):
b = '''b"\x41"'''
a = '''"\x41"'''
self.check(b, a)
def test_bytes_literal_2(self):
b = """b'x'"""
a = """'x'"""
self.check(b, a)
def test_bytes_literal_3(self):
b = """BR'''\x13'''"""
a = """R'''\x13'''"""
self.check(b, a)
def test_bytes_concatenation(self):
b = """b'bytes' + b'bytes'"""
a = """'bytes' + 'bytes'"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_bytes.py | test_bytes.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_funcattrs(lib3to2FixerTestCase):
fixer = "funcattrs"
def test_doc_unchanged(self):
b = """whats.up.__doc__"""
self.unchanged(b)
def test_defaults(self):
b = """myFunc.__defaults__"""
a = """myFunc.func_defaults"""
self.check(b, a)
def test_closure(self):
b = """fore.__closure__"""
a = """fore.func_closure"""
self.check(b, a)
def test_globals(self):
b = """funkFunc.__globals__"""
a = """funkFunc.func_globals"""
self.check(b, a)
def test_dict_unchanged(self):
b = """tricky.__dict__"""
self.unchanged(b)
def test_name_unchanged(self):
b = """sayMy.__name__"""
self.unchanged(b)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_funcattrs.py | test_funcattrs.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_methodattrs(lib3to2FixerTestCase):
fixer = "methodattrs"
attrs = ["func", "self"]
def test_methodattrs(self):
for attr in self.attrs:
b = "a.__%s__" % attr
a = "a.im_%s" % attr
self.check(b, a)
b = "self.foo.__%s__.foo_bar" % attr
a = "self.foo.im_%s.foo_bar" % attr
self.check(b, a)
b = "dir(self.foo.__self__.__class__)"
a = "dir(self.foo.im_self.__class__)"
self.check(b, a)
def test_unchanged(self):
for attr in self.attrs:
s = "foo(__%s__ + 5)" % attr
self.unchanged(s)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_methodattrs.py | test_methodattrs.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_itertoools(lib3to2FixerTestCase):
fixer = "itertools"
def test_map(self):
b = """map(a, b)"""
a = """from itertools import imap\nimap(a, b)"""
self.check(b, a)
def test_unchanged_nobuiltin(self):
s = """obj.filter(a, b)"""
self.unchanged(s)
s = """
def map():
pass
"""
self.unchanged(s)
def test_filter(self):
b = "a = filter( a, b)"
a = "from itertools import ifilter\na = ifilter( a, b)"
self.check(b, a)
def test_zip(self):
b = """for key, val in zip(a, b):\n\tdct[key] = val"""
a = """from itertools import izip\nfor key, val in izip(a, b):\n\tdct[key] = val"""
self.check(b, a)
def test_filterfalse(self):
b = """from itertools import function, filterfalse, other_function"""
a = """from itertools import function, ifilterfalse, other_function"""
self.check( b, a)
b = """filterfalse(a, b)"""
a = """ifilterfalse(a, b)"""
self.check(b, a )
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_itertools.py | test_itertools.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_bool(lib3to2FixerTestCase):
fixer = "bool"
def test_1(self):
b = """
class A:
def __bool__(self):
pass
"""
a = """
class A:
def __nonzero__(self):
pass
"""
self.check(b, a)
def test_2(self):
b = """
class A(object):
def __bool__(self):
pass
"""
a = """
class A(object):
def __nonzero__(self):
pass
"""
self.check(b, a)
def test_unchanged_1(self):
s = """
class A(object):
def __nonzero__(self):
pass
"""
self.unchanged(s)
def test_unchanged_2(self):
s = """
class A(object):
def __bool__(self, a):
pass
"""
self.unchanged(s)
def test_unchanged_func(self):
s = """
def __bool__(thing):
pass
"""
self.unchanged(s)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_bool.py | test_bool.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_int(lib3to2FixerTestCase):
fixer = "int"
def test_1(self):
b = """x = int(x)"""
a = """x = long(x)"""
self.check(b, a)
def test_2(self):
b = """y = isinstance(x, int)"""
a = """y = isinstance(x, long)"""
self.check(b, a)
def test_unchanged(self):
s = """int = True"""
self.unchanged(s)
s = """s.int = True"""
self.unchanged(s)
s = """def int(): pass"""
self.unchanged(s)
s = """class int(): pass"""
self.unchanged(s)
s = """def f(int): pass"""
self.unchanged(s)
s = """def f(g, int): pass"""
self.unchanged(s)
s = """def f(x, int=True): pass"""
self.unchanged(s)
def test_prefix_preservation(self):
b = """x = int( x )"""
a = """x = long( x )"""
self.check(b, a)
def test_literal_1(self):
b = """5"""
a = """5L"""
self.check(b, a)
def test_literal_2(self):
b = """a = 12"""
a = """a = 12L"""
self.check(b, a)
def test_literal_3(self):
b = """0"""
a = """0L"""
self.check(b, a)
def test_complex_1(self):
b = """5 + 4j"""
a = """5L + 4j"""
self.check(b, a)
def test_complex_2(self):
b = """35 + 2j"""
a = """35L + 2j"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_int.py | test_int.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_super(lib3to2FixerTestCase):
fixer = "super"
def test_noargs(self):
b = "def m(self):\n super()"
a = "def m(self):\n super(self.__class__, self)"
self.check(b, a)
def test_other_params(self):
b = "def m(a, self=None):\n super()"
a = "def m(a, self=None):\n super(a.__class__, a)"
self.check(b, a)
def test_no_with_stars(self):
s = "def m(*args, **kwargs):\n super()"
self.unchanged(s, ignore_warnings=True)
def test_no_with_noargs(self):
s = "def m():\n super()"
self.unchanged(s, ignore_warnings=True)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_super.py | test_super.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_bitlength(lib3to2FixerTestCase):
fixer = 'bitlength'
def test_fixed(self):
b = """a = something.bit_length()"""
a = """a = (len(bin(something)) - 2)"""
self.check(b, a, ignore_warnings=True)
def test_unfixed(self):
s = """a = bit_length(fire)"""
self.unchanged(s)
s = """a = s.bit_length('some_arg')"""
self.unchanged(s)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_bitlength.py | test_bitlength.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_kwargs(lib3to2FixerTestCase):
fixer = 'kwargs'
def test_basic_unchanged(self):
s = """
def spam(ham, eggs): funky()"""
self.unchanged(s)
def test_args_kwargs_unchanged(self):
s = """
def spam(ham, *args, **kwargs): funky()"""
self.unchanged(s)
def test_args_named_pos(self):
b = """
def spam(ham, *args, eggs, monkeys): funky()"""
a = """
def spam(ham, *args, **_3to2kwargs):
monkeys = _3to2kwargs['monkeys']; del _3to2kwargs['monkeys']
eggs = _3to2kwargs['eggs']; del _3to2kwargs['eggs']
funky()"""
self.check(b, a)
def test_args_named_pos_catchall(self):
b = """
def spam(ham, *args, eggs, monkeys, **stuff): funky()"""
a = """
def spam(ham, *args, **stuff):
monkeys = stuff['monkeys']; del stuff['monkeys']
eggs = stuff['eggs']; del stuff['eggs']
funky()"""
self.check(b, a)
def test_bare_star_named(self):
b = """
def spam(ham, *, eggs, monkeys):
funky()"""
a = """
def spam(ham, **_3to2kwargs):
monkeys = _3to2kwargs['monkeys']; del _3to2kwargs['monkeys']
eggs = _3to2kwargs['eggs']; del _3to2kwargs['eggs']
funky()"""
self.check(b, a)
def test_bare_star_named_simple_defaults(self):
b = """
def spam(ham, *, dinosaurs, eggs=3, monkeys=2):
funky()"""
a = """
def spam(ham, **_3to2kwargs):
if 'monkeys' in _3to2kwargs: monkeys = _3to2kwargs['monkeys']; del _3to2kwargs['monkeys']
else: monkeys = 2
if 'eggs' in _3to2kwargs: eggs = _3to2kwargs['eggs']; del _3to2kwargs['eggs']
else: eggs = 3
dinosaurs = _3to2kwargs['dinosaurs']; del _3to2kwargs['dinosaurs']
funky()"""
self.check(b, a)
def test_bare_star_named_simple_defaults_catchall(self):
b = """
def spam(ham, *, dinosaurs, eggs=3, monkeys=2, **stuff):
funky()"""
a = """
def spam(ham, **stuff):
if 'monkeys' in stuff: monkeys = stuff['monkeys']; del stuff['monkeys']
else: monkeys = 2
if 'eggs' in stuff: eggs = stuff['eggs']; del stuff['eggs']
else: eggs = 3
dinosaurs = stuff['dinosaurs']; del stuff['dinosaurs']
funky()"""
self.check(b, a)
def test_bare_star_named_complicated_defaults(self):
b = """
def spam(ham, *, dinosaurs, eggs=call_fn(lambda a: b), monkeys=[i.split() for i in something(args)]):
funky()"""
a = """
def spam(ham, **_3to2kwargs):
if 'monkeys' in _3to2kwargs: monkeys = _3to2kwargs['monkeys']; del _3to2kwargs['monkeys']
else: monkeys = [i.split() for i in something(args)]
if 'eggs' in _3to2kwargs: eggs = _3to2kwargs['eggs']; del _3to2kwargs['eggs']
else: eggs = call_fn(lambda a: b)
dinosaurs = _3to2kwargs['dinosaurs']; del _3to2kwargs['dinosaurs']
funky()"""
self.check(b, a)
def test_bare_star_named_complicated_defaults_catchall(self):
b = """
def spam(ham, *, dinosaurs, eggs=call_fn(lambda a: b), monkeys=[i.split() for i in something(args)], **stuff):
funky()"""
a = """
def spam(ham, **stuff):
if 'monkeys' in stuff: monkeys = stuff['monkeys']; del stuff['monkeys']
else: monkeys = [i.split() for i in something(args)]
if 'eggs' in stuff: eggs = stuff['eggs']; del stuff['eggs']
else: eggs = call_fn(lambda a: b)
dinosaurs = stuff['dinosaurs']; del stuff['dinosaurs']
funky()"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_kwargs.py | test_kwargs.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_imports2(lib3to2FixerTestCase):
fixer = "imports2"
def test_name_usage_simple(self):
b = """
import urllib.request
urllib.request.urlopen(spam)"""
a = """
import urllib2, urllib
urllib2.urlopen(spam)"""
self.check(b, a)
b = """
if True:
import http.server
else:
import this
while True:
http.server.HTTPServer(('localhost', 80), http.server.SimpleHTTPRequestHandler)
else:
import urllib.request"""
a = """
if True:
import CGIHTTPServer, SimpleHTTPServer, BaseHTTPServer
else:
import this
while True:
BaseHTTPServer.HTTPServer(('localhost', 80), SimpleHTTPServer.SimpleHTTPRequestHandler)
else:
import urllib2, urllib"""
self.check(b, a)
def test_name_scope_def(self):
b = """
import urllib.request
def importing_stuff():
import urllib.request
urllib.request.urlopen(stuff)
urllib.request.urlretrieve(stuff)"""
a = """
import urllib2, urllib
def importing_stuff():
import urllib2, urllib
urllib2.urlopen(stuff)
urllib.urlretrieve(stuff)"""
self.check(b, a)
b = """
import math, urllib.request, http.server, dbm
w = dbm.whichdb()
g = dbm.gnu()
a = dbm.open()"""
a = """
import math
import anydbm, whichdb, dbm
import CGIHTTPServer, SimpleHTTPServer, BaseHTTPServer
import urllib2, urllib
w = whichdb.whichdb()
g = dbm.gnu()
a = anydbm.open()"""
self.check(b, a)
def test_name_scope_if(self):
b = """
if thing:
import http.server
elif other_thing:
import xmlrpc.server
if related_thing:
myServ = http.server.HTTPServer(('localhost', '80'), http.server.CGIHTTPRequestHandler)
elif other_related_thing:
myServ = xmlrpc.server.SimpleXMLRPCServer(('localhost', '80'), CGIXMLRPCRequestHandler)
# just for kicks...
monkey_wrench_in_the_works = http.server.SimpleHTTPRequestHandler"""
a = """
if thing:
import CGIHTTPServer, SimpleHTTPServer, BaseHTTPServer
elif other_thing:
import DocXMLRPCServer, SimpleXMLRPCServer
if related_thing:
myServ = BaseHTTPServer.HTTPServer(('localhost', '80'), CGIHTTPServer.CGIHTTPRequestHandler)
elif other_related_thing:
myServ = SimpleXMLRPCServer.SimpleXMLRPCServer(('localhost', '80'), CGIXMLRPCRequestHandler)
# just for kicks...
monkey_wrench_in_the_works = SimpleHTTPServer.SimpleHTTPRequestHandler"""
self.check(b, a)
def test_name_scope_try_except(self):
b = """
try:
import http.server
except ImportError:
import xmlrpc.server
# some time has passed, and we know that http.server was bad.
srv = xmlrpc.server.DocXMLRPCServer(addr, xmlrpc.server.DocCGIXMLRPCRequestHandler)
# some more time has passed, and we know that http.server is good.
srv = http.server.HTTPServer(addr, http.server.CGIHTTPRequestHandler)"""
a = """
try:
import CGIHTTPServer, SimpleHTTPServer, BaseHTTPServer
except ImportError:
import DocXMLRPCServer, SimpleXMLRPCServer
# some time has passed, and we know that http.server was bad.
srv = DocXMLRPCServer.DocXMLRPCServer(addr, DocXMLRPCServer.DocCGIXMLRPCRequestHandler)
# some more time has passed, and we know that http.server is good.
srv = BaseHTTPServer.HTTPServer(addr, CGIHTTPServer.CGIHTTPRequestHandler)"""
self.check(b, a)
def test_name_multiple_imports(self):
b = """
import math, http.server, urllib.request, string"""
a = """
import math, string
import urllib2, urllib
import CGIHTTPServer, SimpleHTTPServer, BaseHTTPServer"""
self.check(b, a)
def test_name_mutiple_imports_indented(self):
b = """
def indented():
import math, http.server, urllib.request, string"""
a = """
def indented():
import math, string
import urllib2, urllib
import CGIHTTPServer, SimpleHTTPServer, BaseHTTPServer"""
self.check(b, a)
def test_from_single(self):
b = "from urllib.request import urlopen"
a = "from urllib2 import urlopen"
self.check(b, a)
b = "from urllib.request import urlopen\n"\
"from urllib.parse import urlencode"
a = "from urllib2 import urlopen\n"\
"from urllib import urlencode"
self.check(b, a)
b = "from tkinter.simpledialog import SimpleDialog"
a = "from SimpleDialog import SimpleDialog"
self.check(b, a)
def test_from_star(self):
b = """
def try_import(package):
try:
from http.server import *
print('success')
except ImportError:
print('failure', end="")
print('try again!')
"""
a = """
def try_import(package):
try:
from BaseHTTPServer import *
from CGIHTTPServer import *
from SimpleHTTPServer import *
print('success')
except ImportError:
print('failure', end="")
print('try again!')
"""
self.check(b, a, ignore_warnings=True)
b = """
def testing_http_server():
from http.server import *
test_all_imports()
def testing_xmlrpc_server():
from xmlrpc.server import *
test_all_imports()
"""
a = """
def testing_http_server():
from BaseHTTPServer import *
from CGIHTTPServer import *
from SimpleHTTPServer import *
test_all_imports()
def testing_xmlrpc_server():
from SimpleXMLRPCServer import *
from DocXMLRPCServer import *
test_all_imports()
"""
self.check(b, a, ignore_warnings=True)
def test_from_list(self):
b = """
with open('myFile', 'r') as myFile:
from urllib.request import install_opener, urlretrieve, unquote as billybob
fileList = [ln for ln in myFile]"""
a = """
with open('myFile', 'r') as myFile:
from urllib import urlretrieve, unquote as billybob
from urllib2 import install_opener
fileList = [ln for ln in myFile]"""
self.check(b, a, ignore_warnings=True)
if False:
def test_modulefrom(self):
b = """
if spam.is_good():
from urllib import request, parse
request.urlopen(spam_site)
parse.urlencode(spam_site)"""
a = """
if spam.is_good():
import urllib
import urllib2
urllib2.urlopen(spam_site)
urllib.urlencode(spam_site)"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_imports2.py | test_imports2.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_print(lib3to2FixerTestCase):
fixer = "print"
def test_generic(self):
b = """print()"""
a = """print"""
self.check(b,a)
def test_literal(self):
b = """print('spam')"""
a = """print 'spam'"""
self.check(b,a)
def test_not_builtin_unchanged(self):
s = "this.shouldnt.be.changed.because.it.isnt.builtin.print()"
self.unchanged(s)
#XXX: Quoting this differently than triple-quotes, because with newline
#XXX: setting, I can't quite get the triple-quoted versions to line up.
def test_arbitrary_printing(self):
b = "import dinosaur.skull\nimport sys\nprint"\
"(skull.jaw, skull.jaw.biteforce, file=sys.stderr)"
a = "import dinosaur.skull\nimport sys\nprint "\
">>sys.stderr, skull.jaw, skull.jaw.biteforce"
self.check(b, a)
def test_long_arglist(self):
b = "print(spam, spam, spam, spam, spam, baked_beans, spam, spam,"\
" spam, spam, sep=', spam, ', end=wonderful_spam)\nprint()"
a = "import sys\nprint ', spam, '.join([unicode(spam), unicode(spam), unicode(spam), unicode(spam), unicode(spam), unicode(baked_beans),"\
" unicode(spam), unicode(spam), unicode(spam), unicode(spam)]),; sys.stdout.write(wonderful_spam)\nprint"
self.check(b, a, ignore_warnings=True)
def test_nones(self):
b = "print(1,2,3,end=None, sep=None, file=None)"
a = "print 1,2,3"
self.check(b, a)
def test_argument_unpacking(self):
s = "print(*args)"
self.warns_unchanged(s, "-fprint does not support argument unpacking. fix using -xprint and then again with -fprintfunction.")
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_print.py | test_print.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_getcwd(lib3to2FixerTestCase):
fixer = "getcwd"
def test_prefix_preservation(self):
b = """ls = os.listdir( os.getcwd() )"""
a = """ls = os.listdir( os.getcwdu() )"""
self.check(b, a)
b = """whatdir = os.getcwd ( )"""
a = """whatdir = os.getcwdu ( )"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_getcwd.py | test_getcwd.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_with(lib3to2FixerTestCase):
fixer = "with"
def test_with_oneline(self):
b = "with a as b: pass"
a = "from __future__ import with_statement\nwith a as b: pass"
self.check(b, a)
def test_with_suite(self):
b = "with a as b:\n pass"
a = "from __future__ import with_statement\nwith a as b:\n pass"
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_with.py | test_with.py |
#empty
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/__init__.py | __init__.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_unittest(lib3to2FixerTestCase):
fixer = 'unittest'
def test_imported(self):
b = "import unittest"
a = "import unittest2"
self.check(b, a)
def test_used(self):
b = "unittest.AssertStuff(True)"
a = "unittest2.AssertStuff(True)"
self.check(b, a)
def test_from_import(self):
b = "from unittest import *"
a = "from unittest2 import *"
self.check(b, a)
def test_imported_from(self):
s = "from whatever import unittest"
self.unchanged(s)
def test_not_base(self):
s = "not_unittest.unittest.stuff()"
self.unchanged(s)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_unittest.py | test_unittest.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_setliteral(lib3to2FixerTestCase):
fixer = "setliteral"
def test_unchanged_dict(self):
s = """{"ghoul": 100, "zombie": 50, "gremlin": 40}"""
self.unchanged(s)
s = """{1: "spider", 2: "hills", 3: "bologna", None: "tapeworm"}"""
self.unchanged(s)
s = """{}"""
self.unchanged(s)
s = """{'a':'b'}"""
self.unchanged(s)
def test_simple_literal(self):
b = """{'Rm 101'}"""
a = """set(['Rm 101'])"""
self.check(b, a)
def test_multiple_items(self):
b = """{'Rm 101', 'Rm 102', spam, ham, eggs}"""
a = """set(['Rm 101', 'Rm 102', spam, ham, eggs])"""
self.check(b, a)
b = """{ a, b, c, d, e}"""
a = """set([ a, b, c, d, e])"""
self.check(b, a)
def test_simple_set_comprehension(self):
b = """{x for x in range(256)}"""
a = """set([x for x in range(256)])"""
self.check(b, a)
def test_complex_set_comprehension(self):
b = """{F(x) for x in range(256) if x%2}"""
a = """set([F(x) for x in range(256) if x%2])"""
self.check(b, a)
b = """{(lambda x: 2000 + x)(x) for x, y in {(5, 400), (6, 600), (7, 900), (8, 1125), (9, 1000)}}"""
a = """set([(lambda x: 2000 + x)(x) for x, y in set([(5, 400), (6, 600), (7, 900), (8, 1125), (9, 1000)])])"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_setliteral.py | test_setliteral.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_input(lib3to2FixerTestCase):
fixer = "input"
def test_prefix_preservation(self):
b = """x = input( )"""
a = """x = raw_input( )"""
self.check(b, a)
b = """x = input( '' )"""
a = """x = raw_input( '' )"""
self.check(b, a)
def test_1(self):
b = """x = input()"""
a = """x = raw_input()"""
self.check(b, a)
def test_2(self):
b = """x = input('a')"""
a = """x = raw_input('a')"""
self.check(b, a)
def test_3(self):
b = """x = input('prompt')"""
a = """x = raw_input('prompt')"""
self.check(b, a)
def test_4(self):
b = """x = input(foo(a) + 6)"""
a = """x = raw_input(foo(a) + 6)"""
self.check(b, a)
def test_5(self):
b = """x = input(invite).split()"""
a = """x = raw_input(invite).split()"""
self.check(b, a)
def test_6(self):
b = """x = input(invite) . split ()"""
a = """x = raw_input(invite) . split ()"""
self.check(b, a)
def test_7(self):
b = "x = int(input())"
a = "x = int(raw_input())"
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_input.py | test_input.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_except(lib3to2FixerTestCase):
fixer = "except"
def test_prefix_preservation(self):
a = """
try:
pass
except (RuntimeError, ImportError), e:
pass"""
b = """
try:
pass
except (RuntimeError, ImportError) as e:
pass"""
self.check(b, a)
def test_simple(self):
a = """
try:
pass
except Foo, e:
pass"""
b = """
try:
pass
except Foo as e:
pass"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_except.py | test_except.py |
from test_all_fixers import lib3to2FixerTestCase
class Test_metaclass(lib3to2FixerTestCase):
fixer = 'metaclass'
def test_unchanged(self):
self.unchanged("class X(): pass")
self.unchanged("class X(object): pass")
self.unchanged("class X(object1, object2): pass")
self.unchanged("class X(object1, object2, object3): pass")
s = """
class X():
def __metaclass__(self): pass
"""
self.unchanged(s)
s = """
class X():
a[23] = 74
"""
self.unchanged(s)
def test_comments(self):
a = """
class X():
# hi
__metaclass__ = AppleMeta
pass
"""
b = """
class X(metaclass=AppleMeta):
# hi
pass
"""
self.check(b, a)
a = """
class X():
__metaclass__ = Meta
pass
# Bedtime!
"""
b = """
class X(metaclass=Meta):
pass
# Bedtime!
"""
self.check(b, a)
def test_meta_noparent_odd_body(self):
# no-parent class, odd body
a = """
class X():
__metaclass__ = Q
pass
"""
b = """
class X(metaclass=Q):
pass
"""
self.check(b, a)
def test_meta_oneparent_no_body(self):
# one parent class, no body
a = """
class X(object):
__metaclass__ = Q
pass"""
b = """
class X(object, metaclass=Q): pass"""
self.check(b, a)
def test_meta_oneparent_simple_body_1(self):
# one parent, simple body
a = """
class X(object):
__metaclass__ = Meta
bar = 7
"""
b = """
class X(object, metaclass=Meta):
bar = 7
"""
self.check(b, a)
def test_meta_oneparent_simple_body_2(self):
a = """
class X():
__metaclass__ = Meta
x = 4; g = 23
"""
b = """
class X(metaclass=Meta):
x = 4; g = 23
"""
self.check(b, a)
def test_meta_oneparent_simple_body_3(self):
a = """
class X(object):
__metaclass__ = Meta
bar = 7
"""
b = """
class X(object, metaclass=Meta):
bar = 7
"""
self.check(b, a)
def test_meta_multiparent_simple_body_1(self):
# multiple inheritance, simple body
a = """
class X(clsA, clsB):
__metaclass__ = Meta
bar = 7
"""
b = """
class X(clsA, clsB, metaclass=Meta):
bar = 7
"""
self.check(b, a)
def test_meta_multiparent_simple_body_2(self):
# keywords in the class statement
a = """
class m(a, arg=23):
__metaclass__ = Meta
pass"""
b = """
class m(a, arg=23, metaclass=Meta):
pass"""
self.check(b, a)
def test_meta_expression_simple_body_1(self):
a = """
class X(expression(2 + 4)):
__metaclass__ = Meta
pass
"""
b = """
class X(expression(2 + 4), metaclass=Meta):
pass
"""
self.check(b, a)
def test_meta_expression_simple_body_2(self):
a = """
class X(expression(2 + 4), x**4):
__metaclass__ = Meta
pass
"""
b = """
class X(expression(2 + 4), x**4, metaclass=Meta):
pass
"""
self.check(b, a)
def test_meta_noparent_simple_body(self):
a = """
class X():
__metaclass__ = Meta
save.py = 23
out = 5
"""
b = """
class X(metaclass=Meta):
save.py = 23
out = 5
"""
self.check(b, a)
| 3to2_py3k | /3to2_py3k-1.0.tar.gz/3to2_py3k-1.0/lib3to2/tests/test_metaclass.py | test_metaclass.py |
##3xsd
3xsd is a native epoll server serving TCP/UDP connections, a high performance static web server, a
failover dns server, a http-based distributed file server, a load-balance proxy-cache server, and
a 'warp drive' server. Written in python, take the full power of multi-cores.
##Features in detail:
###3wsd - web server
supporting: static files, event driven(epoll), using mmap & sendfile to send files,
in-mem xcache, transparent gzip content transfer with fixed length(small file) &
chunked(large file), persistent storage of gzip files,
partial support of WebDAV(PUT/DELETE), pipelining support
###3nsd - dns server
supporting: only A record resolution, domainname failover(refer to conf file),
ip icmp probe & hide when fail, round robbin ip resolving
global DNS Left-Right Range Resolve(LRRR)(experimental)
###3zsd - proxy server
supporting: load balance backend servers, in-mem file caching &
persistent cache file storage
###3fsd - distribute web file system
supporting: mass unlimitted file storage, easy to expand,
O(1) location algorithm, non-centralized, can work with standard web server(WebDAV)
in proxy mode, file redundancy, file persistent caching
###3wdd - 'warp drive' server
supporting: data tunneling over UDT and tun,
better congestion control than TCP/UDP over wan link,
better thoughput(above 80%) over wan link, refer to this report:
http://www.c-s-a.org.cn/ch/reader/create_pdf.aspx?file_no=20091035
tunnel ip/mtu/txqueuelen/route define, auto create/recreate/destroy
encrypt packages through AES-128-ECB/CBC/CFB/CTR and Blowfish-CBC/CFB/CTR
tunnel on-the-fly compress with zlib/lzo, tunnel data relaying
route metric, routing data through different path, depending on tunnel rtt(choose the best one)
More to find in .conf file.
##Performance:
###3wsd:
Small file under 1KB single process test(full in-mem), contrast with nginx configuring
accept_mutex off, 80% performance.
Multi processes test, with reuse_port enabling kernel, 95% performance of nginx(and beyond,
may be 105% or more, based on process number, I tested 2-4).
The tests above is not quite strict, but I just want to say that it's fast enough.
And with pipelining enabled, 3wsd will perform better with 3-4 requests/send(5%-10%
performance increase), 2 requests/send have the same speed with non-piplining.
###3zsd:
About 80% performance of 3wsd.
###3nsd:
Fast enough...about 2800-3000 queries/s per processes, with 1GHz bcm2709 4-cores ARMv7
cpu testing, better when multi-processes with reuse_port enabling kernel.
###3fsd:
Same with 3zsd.
###3wdd:
Early testing indicated that:
UDT tunnel(no encrypt) performing 50%-60% speed of direct TCP connection with ZetaTCP,
and package lost rate remaining below 0.6%, while direct connection has 1.4%-3%.
(Test CN-US WAN link with 150ms-280ms latency, through the always-jammed CUCN submarine cable)
However, UDT tunnel beats normal TCP connection without ZetaTCP, with 50% - 4 times
(commonly 1-2 times) outperforming.(v)(Test link like above)
Update:
And an encrypted UDT tunnel with AES-CBC/CFB will has 50% performance decrease (because the
method itself processes doubled size of data, and extra iv/padding data transfer).
Now with a Blowfish-CTR method, tunnel data transfer performance is closed to raw non-encrypt
tunnel. I believe that with a intel AES-NI supported CPU(like XEON E3-1240/1270), AES-128-CTR
can also do it.
###More performance:
There are at lease two ways to increase the performance of 3xsd:
1.Install Cython, and rename _3xsd.py to _3xsd.pyx, run it.
Cython will compile _3xsd.py lib into a _3xsd.so file, using static type
declarations. This can gain about 5%-6% performance increasement.
2.Use PyPy.This can gain about 10%-15% performance increasement(or more).
#OS requirement & install:
CentOS 6/7 with python 2.6/2.7, Debian 6/7. Python 2.7 recommended.
Doing this before running the program(minimal requirement):
yum install python-gevent pysendfile python-setproctitle python-psutil python-pip
(python-pip is optional if install dpkt)
Dpkt module is also needed when running 3nsd DNS server, pip install it.
If you want to use 3wdd, python-pytun, pyudt4, pycrypto, python-lzo are also needed.
yum install python-crypto2.6 python-lzo (for centos6)
yum install python2-crypto (for centos7)
will quickly install pycrypto(probably do some 'linking' works) and lzo. The other two depended on pip install.
Probably you need this easy-install.pth file in python's site-packages dir:
import sys; sys.__plen = len(sys.path)
./pycrypto-2.6.1-py2.6-linux-x86_64.egg
./pyudt4-0.6.0-py2.6-linux-x86_64.egg
import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)
I provide pre-compiled package [pyudt_tun-centos6-x86_64.tar.gz](https://github.com/zihuaye/3xsd/blob/master/pyudt_tun-centos6-x86_64.tar.gz) and [pyudt_tun_lzo-centos7-x86_64.tar.gz](https://github.com/zihuaye/3xsd/blob/master/pyudt_tun_lzo-centos7-x86_64.tar.gz) to simplify
the installation procedure of pyudt4 & python-pytun.
Be aware of pyudt4 having some bugs, you'd better download it's source code of epoll-fixes branch and
apply the patch I offered. See changelog.txt v0.0.20 2016.03.07 fixed section for detail.
(Already included in [pyudt_tun-centos6-x86_64.tar.gz](https://github.com/zihuaye/3xsd/blob/master/pyudt_tun-centos6-x86_64.tar.gz) and [pyudt_tun_lzo-centos7-x86_64.tar.gz](https://github.com/zihuaye/3xsd/blob/master/pyudt_tun_lzo-centos7-x86_64.tar.gz))
Or, of cause you can let pip do it all for you(not including patching pyudt4):
pip install 3xsd
In a debian, you can use apt-get to install python-pip(pip) or python-setuptools(easy_install),
then to install the packages following.
Python Packages(Modules) version reference:
gevent==0.13.8(1.0.1, 1.1)
greenlet==0.4.2
pysendfile==2.0.1
setproctitle==1.0.1
psutil==0.6.1
dpkt==1.6(1.8.6)
python-pytun==2.2.1
pyudt4==0.6.0(epoll-fixes branch)
pycrypto==2.6.1
python-lzo==1.8
System libs version reference:
libevent-1.4.13-4(not actually used, just needed for gevent to function)
udt-4.11-6
lzo-2.03-3.1
To install a module of specific version(like gevent 0.13.8), you can:
pip install gevent==0.13.8
This will install the latest version of gevent(pypy will need it):
pip install git+git://github.com/surfly/gevent.git#egg=gevent
| 3xsd | /3xsd-0.0.26.tar.gz/3xsd-0.0.26/README.md | README.md |
# _3xsd module
#
# 3xsd is a native epoll server serving TCP/UDP connections, a high performance static web server, a failover dns server,
# a http-based distributed file server, a load-balance proxy-cache server, and a 'warp drive' server.
#
# The xHandler to handle web requests, and the xDNSHandler to handle DNS query.
# The xZHandler to handle proxy(load-balance/cache)requests, and the xDFSHandler to handle DFS requests.
# The xWHandler to handle tunneling data.
#
# Author: Zihua Ye (zihua.ye@qq.com, zihua.ye@gmail.com)
#
# Copyright (c) 2014-2015, licensed under GPLv2.
#
# All 3 party modules copyright go to their authors(see their licenses).
#
__version__ = "0.0.26"
import os, sys, io, time, calendar, random, multiprocessing, threading
import shutil, mmap, sendfile, zlib, gzip, lzo, copy, setproctitle
import _socket as socket
import select, errno, gevent, dpkt, ConfigParser, hashlib, struct, shelve
import pytun, udt4, subprocess, fcntl, geoip2.database
from distutils.version import StrictVersion
from datetime import datetime
from collections import deque
from gevent.server import StreamServer
from gevent.coros import Semaphore
from udt4 import pyudt as udt
from Crypto.Cipher import AES
from Crypto.Cipher import Blowfish
from Crypto.Util import Counter
from Crypto import Random
Port = 8000 #Listening port number
Backlog = 1000 #Listening backlog
Conns = None #gevent.pool type, connection limit
Workers = 0 #workers to fork, 0 for no worker
Homedir = None #3xsd working home(document root)
Handler = None #3xsd handler name, not an instance
Server = None #3xsd server instance, init at startup
X_shelf = 0 #persistent storage of xcache(3zsd&3fsd)
Z_mode = 0 #0 - RR(default), 1 - IP Hash, 2 - URL Hash, for 3zsd
_Name = '3xsd' #program name, changes at startup
o_socket = socket
o_sendfile = sendfile.sendfile
def Handle_Gevent_Stream(sock, addr):
#Handler __init__, pass params with no name, can help boosting call procedure
Handler(sock, addr, Server, False, True)
class _Z_StreamServer(StreamServer):
workers = 0
_worker_id = 0
xcache = {}
x_reqs = {}
max_accept = 500
server_mode = ''
zlb_mode = False
def __init__(self, server_address, RequestHandlerClass, backlog=1000, spawn=None):
StreamServer.__init__(self, server_address, RequestHandlerClass, backlog=backlog, spawn=spawn)
if StrictVersion(gevent.__version__) >= StrictVersion('1.0'):
self._stopped_event = self._stop_event
def pre_start(self):
if StrictVersion(gevent.__version__) >= StrictVersion('1.0'):
StreamServer.init_socket(self)
else:
StreamServer.pre_start(self)
def master_works(self):
#obsolete
if hasattr(socket, "SO_REUSEPORT"):
self.socket.close()
class _Z_EpollServer(StreamServer):
workers = 0
_worker_id = 0
max_accept = 500
reuse_port = True
server_mode = b''
zlb_mode = False
_fds = []
epoll = None
#for 3wsd - web server
conns = {}
addrs = {}
xcache = {}
xcache_stat = {}
x_reqs = {}
resume = {}
gzip_shelf = None
gzip_shelf_lock = None
_gzs = {}
#for 3zsd - z server
cb_conns = {}
k_conns = {}
zconns = {}
zidles = {}
zconns_stat = {}
zaddrs = {}
zhosts = {}
zcache = {}
zcache_stat = {}
z_reqs = {}
z_reqs_stat = {}
z_reqs_cnt = {}
z_resp_header = {}
z_path = {}
c_path = {}
xcache_shelf = None
xcache_shelf_lock = None
#for 3wdd - warp drive
zsess = {} #session <-> (tun, udt socket)
ztuns = {} #tunnels & fd
s_tuns = {} #sessions connected with tuns
s_udts = {} #sessions connected with udts
upolls = [] #the udt socket & tun epolls
udt_send_buf = {}
udt_thread_limit = 0
udt_conns_cnt = {}
udt_conns_cnt_lock = None
udt_conn_port = None
udt_conn_port_lock = None
def __init__(self, server_address, RequestHandlerClass, backlog=1000, spawn=None,
tcp=True, udt=False, recv_buf_size=16384, send_buf_size=65536):
if tcp:
self.recv_buf_size = recv_buf_size
self.send_buf_size = send_buf_size
elif udt:
#this buffer size is about 100Mbps bandwidth between CN&US(Bandwidth*RTT/8)
self.recv_buf_size = 2760000
self.send_buf_size = 2760000
#udt not work with reuse_port option
self.reuse_port = False
#self.udt_thread_limit = multiprocessing.cpu_count()
self.udt_thread_limit = 1 #set thread_limit to 1 for the GIL
self.udt_conns_cnt_lock = multiprocessing.Lock()
for i in xrange(Workers + 1):
self.udt_conns_cnt[i] = multiprocessing.Value('i', 0, lock=self.udt_conns_cnt_lock)
self.udt_conn_port_lock = multiprocessing.Lock()
self.udt_conn_port = multiprocessing.Value('i', Port, lock=self.udt_conn_port_lock)
else:
self.recv_buf_size = 65536
self.send_buf_size = 65536
self.tcp = tcp
self.udt = udt
StreamServer.__init__(self, server_address, RequestHandlerClass, backlog=backlog, spawn=spawn)
self.handler = RequestHandlerClass(None, None, self)
if hasattr(socket, "SO_REUSEPORT"):
print("Good, this kernel has SO_REUSEPORT support")
def master_works(self):
if hasattr(socket, "SO_REUSEPORT") and self.reuse_port and not self.udt:
#close master process's listening socket, because it never serving requests
self.socket.close()
def pre_start(self):
self.init_socket(tcp=self.tcp)
def set_socket_buf(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.recv_buf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.send_buf_size)
def init_socket(self, tcp=True):
if tcp:
if self.server_mode == 'z_lbs' or self.server_mode == 'x_dfs':
self.zlb_mode = True
self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
else:
if not self.udt:
#i_dns mode
socket.socket = gevent.socket.socket
self.socket = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
if not self.udt:
if self.reuse_addr == 1:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if hasattr(socket, "SO_REUSEPORT") and self.reuse_port:
#good, this kernel has SO_REUSEPORT support
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
self.set_socket_buf()
if tcp:
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_DEFER_ACCEPT, 1)
self.socket.bind(self.address)
self.socket.setblocking(0)
self.socket_fileno = self.socket.fileno()
if tcp:
self.socket.listen(self.backlog)
else:
self.handler.sock = self.socket
else:
if self._worker_id == 0 and self.workers > 0:
#delay socket init for udt worker
self.socket = None
return
udt4.startup()
if self.handler.wdd_mode == 'server' or self.handler.wdd_mode == 'hybird':
self.socket = udt.UdtSocket()
#if self.reuse_addr == 1:
# self.socket.setsockopt(udt4.UDT_REUSEADDR, True) #default on
#self.socket.setsockopt(udt4.UDT_MSS, 9000) #default 1500
self.socket.setsockopt(udt4.UDT_RCVBUF, self.recv_buf_size) #default 10MB
self.socket.setsockopt(udt4.UDT_SNDBUF, self.send_buf_size) #default 10MB
if self.workers > 0:
_ip, _port = self.address
_port = _port + self._worker_id - 1
self.address = (_ip, _port)
self.socket.bind(self.address)
self.socket.listen(self.backlog)
else:
self.socket = None
def cleanupall(self):
if self.tcp:
self.epoll.unregister(self.socket.fileno())
self.epoll.close()
self.conns.clear()
self.addrs.clear()
self.xcache.clear()
self.x_reqs.clear()
def cleanup(self, fd):
try:
self.epoll.unregister(fd)
self.conns[fd].close()
except:
pass
try:
self.conns.pop(fd)
self.addrs.pop(fd)
self.x_reqs.pop(fd)
except:
pass
def cleanz(self, fd):
try:
self.epoll.unregister(fd)
except IOError as e:
pass
try:
#clean c-z pair in cb_conns
if self.cb_conns.get(fd, None):
self.cb_conns[self.cb_conns[fd][1]] = None
self.cb_conns[fd] = None
self.keep_connection = 0
#self.zconns[fd].close() #will be closed by clean()
self.cb_conns.pop(fd, None)
self.zconns.pop(fd, None)
self.zconns_stat.pop(fd, None)
self.zaddrs.pop(fd, None)
self.z_reqs.pop(fd, None)
self.z_reqs_cnt.pop(fd, None)
self.z_reqs_stat.pop(fd, None)
self.zhosts.pop(fd, None)
self.zcache.pop(fd, None)
self.zcache_stat.pop(fd, None)
self.z_resp_header.pop(fd, None)
self.z_path.pop(fd, None)
except:
pass
def clean_zidles(self, fd):
for _host, _idle_list in self.zidles.iteritems():
if fd in _idle_list:
self.zidles[_host].remove(fd)
def cleanc(self, fd):
try:
self.epoll.unregister(fd)
except IOError as e:
pass
try:
if self.cb_conns.get(fd, None):
self.cb_conns[self.cb_conns[fd][1]] = None
self.cb_conns[fd] = None
self.keep_connection = 0
self.c_path.pop(fd, None)
self.k_conns.pop(fd, None)
except:
pass
def o_stack(self, o=None):
if o == None:
return
elif o == "resume":
print self.resume
elif o == "zconns":
print self.zconns
elif o == "cb_conns":
print self.cb_conns
elif o == "xcache":
print self.xcache
elif o == "zcache":
print self.zcache
elif o == "zcache_stat":
print self.zcache_stat
elif o == "z_path":
print self.z_path
elif o == "z_resp_header":
print self.z_resp_header
def o_udts(self):
print "-------------------------------------"
print "conns", self.conns
print "zsess", self.zsess
print "ztuns", self.ztuns
print "s_tuns", self.s_tuns
print "s_udts", self.s_udts
print "upolls", self.upolls
print "udt_send_buf", self.udt_send_buf
def o_mem(self):
#if os.path.exists('/tmp/3wdd_dumpit'):
# scanner.dump_all_objects('/tmp/3wdd_dump.txt')
pass
def check_3ws(self):
while 1:
try:
if self.handler.gzip_on and self.gzip_shelf and self.gzip_shelf.cache:
_over = len(self.gzip_shelf.cache) - 1000
_delx = int(_over/8)
if _over > 1000:
self.gzip_shelf.cache.clear()
elif _over > 0 and _delx > 9:
while _delx > 0:
self.gzip_shelf.cache.popitem()
_delx -= 1
if hasattr(self.gzip_shelf.dict, 'sync'):
#self.gzip_shelf.dict is an anydbm object, mostly gdbm or bsddb
with self.gzip_shelf_lock:
self.gzip_shelf.dict.sync()
#print self.xcache
except:
pass
time.sleep(15)
def get_tcp_stat(self, sock):
_fmt = "B"*7+"I"*21
_x = struct.unpack(_fmt, sock.getsockopt(socket.IPPROTO_TCP, socket.TCP_INFO, 92))
return _x[0]
def check_lbs(self):
while 1:
try:
#maintain mem caches & shelfies
if self.handler.z_xcache_shelf and self.xcache_shelf and self.xcache_shelf.cache:
_over = len(self.xcache_shelf.cache) - self.handler.z_cache_size
_delx = int(_over/8)
if _over > self.handler.z_cache_size:
self.xcache_shelf.cache.clear()
elif _over > 0 and _delx > 9:
while _delx > 0:
self.xcache_shelf.cache.popitem()
_delx -= 1
if hasattr(self.xcache_shelf.dict, 'sync'):
#self.xcache_shelf.dict is an anydbm object, mostly gdbm or bsddb
with self.xcache_shelf_lock:
self.xcache_shelf.dict.sync()
#maintain backend conns
#print "------------------------------------------"
#print "conns:", self.conns
#print "zconns:", self.zconns
#print "cb_conns:", self.cb_conns
#print "zidles:", self.zidles
_keys = self.zconns.keys()
if _keys:
for _f in _keys:
if self.zconns[_f].fileno() == -1 or self.get_tcp_stat(self.zconns[_f]) != 1:
#connection not in ESTABLISHED stat, being closed
self.clean_zidles(_f)
self.cleanz(_f)
_keys = self.conns.keys()
if _keys:
for _f in _keys:
if self.conns[_f].fileno() == -1:
#client connection being closed
self.conns.pop(_f)
self.cb_conns.pop(_f)
except:
pass
time.sleep(15)
def handle_event(self, events):
for f, ev in events:
if f == self.socket_fileno:
#new connection..
try:
#multi workers to accept same connection, only one can get it
conn, addr = self.socket.accept()
except:
continue
c = conn.fileno()
#conn.setblocking(0)
#self.epoll.register(c, select.EPOLLIN | select.EPOLLET)
self.epoll.register(c, select.EPOLLIN)
self.conns[c] = conn
self.addrs[c] = addr
elif ev & select.EPOLLIN:
#read event..
if f not in self.zconns:
self._fds.append((f, 0))
else:
self._fds.append((f, 2))
elif ev & select.EPOLLOUT:
#write event..
if f not in self.zconns:
self._fds.append((f, 1))
else:
self._fds.append((f, 3))
elif ev & select.EPOLLHUP:
#connection closed..
self.cleanup(f)
if len(self._fds) > 0:
#we have works to do, call handler
self.handler(self._fds)
del self._fds[:]
def if_reinit_socket(self):
if hasattr(socket, "SO_REUSEPORT") and self.reuse_port and self.workers > 0:
self.init_socket(tcp=self.tcp)
elif self.udt and self.workers > 0:
self.init_socket(tcp=self.tcp)
def serve_forever(self):
#see if reinit socket is neccessary
self.if_reinit_socket()
#single master gets an epoll, multi workers get multi epolls
self.epoll = select.epoll()
#register fd and events to poll
self.epoll.register(self.socket.fileno(), select.EPOLLIN)
try:
while 1:
self.handle_event(self.epoll.poll())
finally:
self.cleanupall()
self.socket.close()
def serve_dns(self):
self.if_reinit_socket()
self.epoll = select.epoll()
self.epoll.register(self.socket.fileno(), select.EPOLLIN | select.EPOLLET)
try:
gevent.spawn(self.handler.probe_ips) #a separate greenlet to perform ip stat checking
while 1:
gevent.sleep(1e-20) #a smallest float, works both at v0.13.8 and v1.0.x
_events = self.epoll.poll(10) #long-timeout helping cpu% lower
if len(_events) > 0:
self.handler(_events)
finally:
self.cleanupall()
self.socket.close()
def serve_lbs(self):
self.if_reinit_socket()
self.epoll = select.epoll()
self.epoll.register(self.socket.fileno(), select.EPOLLIN)
try:
t = threading.Timer(15, self.check_lbs)
t.start()
while 1:
self.handle_event(self.epoll.poll())
finally:
self.cleanupall()
self.socket.close()
def serve_wds(self): #www
try:
s = 0
while s < self.udt_thread_limit:
self.upolls.append(None)
s += 1
if self.handler.wdd_mode == 'client' or self.handler.wdd_mode == 'hybird':
if 1:
_idx = -1
for _session in self.handler.wdd_dial:
_idx += 1
if self.workers > 1 and (_idx % self.workers) + 1 != self._worker_id:
continue
if _session in self.handler.client_session:
t = threading.Thread(target=self.handler.connect_udt_server, args=(_session,))
t.daemon = True
t.start()
t = threading.Thread(target=self.check_3wd, args=())
t.daemon = True
t.start()
if self.handler.wdd_mode == 'server' or self.handler.wdd_mode == 'hybird':
self.if_reinit_socket()
while 1:
#accept incoming udt connection
_conn, _addr = self.socket.accept()
#launch setting up udt_tunnel
t = threading.Thread(target=self.handler.setup_udt_connection, args=(_conn,_addr,))
t.start()
if self.handler.wdd_mode == 'client':
while 1:
time.sleep(1000)
except:
raise
finally:
if self.socket:
self.socket.close()
def serve_3ws(self):
self.if_reinit_socket()
self.epoll = select.epoll()
self.epoll.register(self.socket.fileno(), select.EPOLLIN)
try:
t = threading.Timer(15, self.check_3ws)
t.start()
while 1:
self.handle_event(self.epoll.poll())
finally:
self.cleanupall()
self.socket.close()
def handle_event_udt_tun(self, index):
try:
while 1:
self.handler.handle_udt_tun_events(self.upolls[index].wait(True, True, -1, True))
except:
if self.upolls[index]:
self.upolls[index] = None
raise
def forward_tun_udt(self, _tun, _usock, _encrypt_mode, _compress, _session): #uuu
_zip = lambda s : eval(_compress).compress(s) if _compress and len(s) < _tun.mtu - 100 else s
_repack = lambda s : ''.join([struct.pack('!H', len(s)), s])
_forward_it=lambda s : _usock.send(_repack(self.handler.encrypt_package(_zip(s), _encrypt_mode, _session))) if _encrypt_mode else _usock.send(_repack(_zip(s)))
try:
while 1:
r = [_tun]; w = []; x = []; _b = None
r, w, x = select.select(r, w, x, 6.0)
if r:
_forward_it(_tun.read(_tun.mtu))
else:
if _tun.fileno() == -1:
#tunnel down
print "Thread forward_tun_udt of tunnel:", _session, "exit.."
break
except:
print "Thread forward_tun_udt of tunnel", _session, "exit.."
raise
def forward_udt_tun(self, _tun, _usock, _encrypt_mode, _compress, _session): #ttt
_magic = {'zlib':(''.join([chr(0x78), chr(0x9c)]), 2), 'lzo':(''.join([chr(0xf0), chr(0x0), chr(0x0)]), 3)}
_unzip = lambda s : eval(_compress).decompress(s) if _compress and _magic[_compress][0] in s[:_magic[_compress][1]] else s
_forward_it = lambda s : _tun.write(_unzip(self.handler.decrypt_package(s, _encrypt_mode, _session))) if _encrypt_mode else _tun.write(_unzip(s))
try:
while 1:
_forward_it(_usock.recv(struct.unpack('!H', _usock.recv(2))[0]))
except IOError as e:
if e.errno == errno.EINVAL:
#illegal data, maybe tunnel peer shutdown suddenly
_usock.close()
print "Thread forward_udt_tun of tunnel", _session, "exit.."
raise
except:
print "Thread forward_udt_tun of tunnel", _session, "exit.."
raise
def forward_udt_relay(self, _usock, _session):
_repack = lambda s : ''.join([struct.pack('!H', len(s)), s])
try:
_from, _to = self.handler.udt_relay[_session]
_relay_session = None
_first = True
while not _relay_session:
if _session == _from:
self.handler.udt_relay_thread_stat[_from] = True
_relay_session = self.zsess.get(_to, None)
else:
self.handler.udt_relay_thread_stat[_to] = True
_relay_session = self.zsess.get(_from, None)
if _first:
_first = False
else:
time.sleep(5)
else:
_to_usock = _relay_session[1]
while 1:
_to_usock.send(_repack(_usock.recv(struct.unpack('!H', _usock.recv(2))[0])))
except:
if _session == _from:
self.handler.udt_relay_thread_stat[_from] = False
else:
self.handler.udt_relay_thread_stat[_to] = False
print "Thread forward_udt_relay of tunnel", _session, "exit.."
raise
def check_3wd(self): #333
try:
while 1:
time.sleep(20)
_tun, _usock, _addr = [None, None, None]
if (self.handler.wdd_mode == 'client' or self.handler.wdd_mode == 'hybird'):
_idx = -1
for _session in self.handler.wdd_dial:
_idx += 1
if self.workers > 1 and (_idx % self.workers) + 1 != self._worker_id:
continue
if _session in self.handler.client_session:
_redial = False
_tun, _usock, _addr = self.zsess.get(_session, (None, None, None))
if _usock:
#{INIT = 1, OPENED, LISTENING, CONNECTING, CONNECTED, BROKEN, CLOSING, CLOSED, NONEXIST}
if _usock.getsockstate() > 5: #connection gone
self.handler.destroy_tunnel(_session)
_redial = True
else:
self.handler.tun_rtt[_session]= _usock.perfmon().msRTT
else:
#must connect failed before
_redial = True
if _redial:
t = threading.Thread(target=self.handler.connect_udt_server, args=(_session,))
t.daemon = True
t.start()
if self.handler.wdd_mode == 'server' or self.handler.wdd_mode == 'hybird':
for _session in self.handler.connected.keys():
if _session not in self.handler.wdd_dial or _session not in self.handler.client_session:
#server connection
_tun, _usock, _addr = self.zsess.get(_session, (None, None, None))
if _usock:
if _usock.getsockstate() > 5: #connection gone
self.handler.destroy_tunnel(_session)
else:
self.handler.tun_rtt[_session]= _usock.perfmon().msRTT
if os.path.exists('/tmp/usock_stat'):
udt4.dump_perfmon(_usock.perfmon())
if self.workers > 1:
self.wdd_idle_worker(9000)
for _session in self.handler.udt_relay:
if _session in self.handler.udt_relay_thread_stat and not self.handler.udt_relay_thread_stat[_session]:
#relaunch the udt relay thread, due to one side may be downed before
_tun, _usock, _addr = self.zsess.get(_session, (None, None, None))
if _usock:
print "Re-launching relay tunnel", _session
if self.handler.io_mode == self.handler.IO_NONBLOCK:
_n = _usock.UDTSOCKET.UDTSOCKET % self.udt_thread_limit
if self.upolls[_n] is None:
self.upolls[_n] = udt.Epoll()
self.upolls[_n].add_usock(_usock, udt4.UDT_EPOLL_IN)
t = threading.Thread(target=self.handle_event_udt_tun, args=(_n,))
t.daemon = True
t.start()
else:
self.upolls[_n].add_usock(_usock, udt4.UDT_EPOLL_IN)
self.handler.udt_relay_thread_stat[_session] = True
else:
t = threading.Thread(target=self.forward_udt_relay,args=(_usock,_session,))
t.daemon = True
t.start()
if self.handler.routing_metric:
with open(os.devnull, 'w') as devnull:
for _route in self.handler.route_metric:
if len(self.handler.route_metric[_route]) > 1:
_target_session = None
_target_session_rtt = -1
_fixed_metric = 0 #0 for dynamic, >0 for fixed
for _session in self.handler.route_metric[_route]:
if _route in self.handler.route_metric_fixed:
if _session in self.handler.route_metric_fixed[_route]:
_fixed_metric = self.handler.route_metric_fixed[_route][_session]
else:
_fixed_metric = 0
if _session in self.handler.tun_rtt:
_rtt_old=self.handler.route_metric[_route][_session]
_rtt = self.handler.route_metric[_route][_session] = int(self.handler.tun_rtt[_session] * 10)
if _rtt == 0:
_rtt = self.handler.route_metric[_route][_session] = 1
if _target_session_rtt == -1:
if _fixed_metric > 0:
_target_session_rtt = _fixed_metric
else:
_target_session_rtt = _rtt
_target_session = _session
else:
if _fixed_metric > 0:
if _target_session_rtt > _fixed_metric:
_target_session_rtt = _fixed_metric
_target_session = _session
elif _target_session_rtt > _rtt:
_target_session_rtt = _rtt
_target_session = _session
subprocess.call(['ip', 'route', 'del', _route, 'metric', str(_rtt_old), 'dev', ''.join([_session, '.', str(self._worker_id)])], stderr=devnull)
subprocess.call(['ip', 'route', 'add', _route, 'metric', str(_rtt), 'dev', ''.join([_session, '.', str(self._worker_id)])], stderr=devnull)
if _target_session:
#change the default outgoing path(dev) for a route
subprocess.call(['ip', 'route', 'replace', _route, 'metric', str('0'), 'dev', ''.join([_target_session, '.', str(self._worker_id)])], stderr=devnull)
_rtch_script = self.handler.rtch_script.get(_target_session, None)
if _rtch_script:
subprocess.call([_rtch_script, ''.join([_target_session, '.', str(self._worker_id)])], stderr=devnull)
else:
#only one path, no need to change
continue
del _tun
del _usock
del _addr
self.o_mem()
except:
raise
def wdd_idle_worker(self, port): #iii
conns = -1
worker_id = -1
#assume that more than 1 worker
for _worker_id, _conns in self.udt_conns_cnt.items():
if _worker_id == 0: continue
if conns == -1:
conns = _conns.value
worker_id = _worker_id
else:
if conns > _conns.value:
#locate the least connection worker
conns = _conns.value
worker_id = _worker_id
if self.udt_conns_cnt[port - Port + 1].value > conns:
#orig worker has more conns than the least one, redirect to new worker
#print "to new server", conns, worker_id
self.udt_conn_port.value = worker_id + Port -1
else:
#no need to redirect
#print "keep server", conns, worker_id
self.udt_conn_port.value = port
return self.udt_conn_port.value
class _xHandler:
http_version_11 = "HTTP/1.1"
http_version_10 = "HTTP/1.0"
HTTP11 = 1
HTTP10 = 0
HTTP_OK = 200
HTTP_NOT_MODIFIED = 304
HTTP_BAD_REQUEST = 400
HTTP_FORBITDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_SERVER_ERROR = 500
HTTP_SERVER_RESET = 502
HTTP_SERVER_BUSY = 503
HTTP_SERVER_TIMEOUT = 504
PARSE_OK = 0
PARSE_ERROR = -1
PARSE_AGAIN = 1
PARSE_MORE = 2
PARSE_MORE2 = 3
EOL1 = b'\n\n'
EOL2 = b'\n\r\n'
xR_OK = 0
xR_PARSE_AGAIN = 1
xR_ERR_PARSE = -1
xR_ERR_HANDLE = -2
xR_ERR_403 = -3
xR_ERR_404 = -4
xR_ERR_5xx = -5
xResult = 0
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
index_files = ["index.html", "index.htm"]
gzip_types = []
gzip_types_default = ["html", "htm", "js", "css", "txt", "xml"]
mimetype = {'html': 'text/html', 'htm': 'text/html', 'txt': 'text/plain',
'css': 'text/css', 'xml': 'text/xml', 'js': 'application/x-javascript',
'png': 'image/png', 'jpg': 'image/jpeg', 'gif': 'image/gif', 'bin': 'application/octet-stream'}
web_config = None
web_config_parsed = False
_r = b'' #request headers
xcache_ttl = 5 #normally, 5-10 seconds internal cache time of items
xcache_size = 1000000 #1 million items, about 1/3GB(333MB) mem used
x_shelf_size = 1000000 #1 million items in disk, about 30GB disk size with average item size 30KB
gzip_on = False
gzip_size = 1000 #default >1KB file size can be gzipped
gzip_max_size = 10000000 #default <=10MB file size can be gzipped
multip = {'k':1000, 'K':1000, 'm':1000000, 'M':1000000, 'g':1000000000, 'G':1000000000}
writers = []
multis = {'s':1, 'm':60, 'h':3600, 'd':86400, 'w':604800, 'y':31536000}
expire_types = {}
def __init__(self, conn, client_address, server, native_epoll=True,
gevent_stream=False, recv_buf_size=16384, send_buf_size=65536, pipelining=False):
self.server = server
self._native_epoll = native_epoll
self._gevent_stream = gevent_stream
if native_epoll:
self.recv_buf_size = self.server.recv_buf_size
self.send_buf_size = self.server.send_buf_size
else:
self.recv_buf_size = recv_buf_size
self.send_buf_size = send_buf_size
self.server_version = ''.join([_Name, '/', __version__])
self.server_pipelining = pipelining
self.in_headers = {}
self.out_headers = {}
self.homedir = Homedir
self.init_config()
self.init_handler(conn, client_address)
if self._gevent_stream:
sendfile.sendfile = gevent_sendfile
self.handle_request()
self.clean()
def init_config(self):
if not self.web_config_parsed:
try:
self.web_config = ConfigParser.ConfigParser()
if not self.web_config.read('3xsd.conf'):
self.web_config.read('/etc/3xsd.conf')
for name, value in self.web_config.items('3wsd'):
if name == 'root':
if value:
self.homedir = value
elif name == 'index':
self.index_files = []
for item in value.split(','):
if item:
self.index_files.append(item)
if not self.index_files:
self.index_files = ["index.html", "index.htm"]
elif name == 'mime_types':
for item in value.split(','):
if item:
k, v = item.split(':', 1)
if k and v:
self.mimetype[k] = v
elif name == 'gzip':
if value.lower() == "on":
self.gzip_on = True
if not self.server.gzip_shelf:
self.server.gzip_shelf = shelve.open('shelf.gzip', writeback=True)
if not self.server.gzip_shelf_lock:
self.server.gzip_shelf_lock = multiprocessing.Lock()
elif name == 'gzip_size':
if value[-1] in ['k','m','g','K','M','G']:
_multip = self.multip[value[-1]]
self.gzip_size = int(value[:-1])*_multip
else:
self.gzip_size = int(value)
elif name == 'gzip_max_size':
if value[-1] in ['k','m','g','K','M','G']:
_multip = self.multip[value[-1]]
self.gzip_max_size = int(value[:-1])*_multip
else:
self.gzip_max_size = int(value)
elif name == 'gzip_types':
self.gzip_types = copy.copy(self.gzip_types_default)
for item in value.split(','):
if item:
if item not in self.gzip_types:
if item[0] == '-':
self.gzip_types.remove(item[1:])
else:
self.gzip_types.append(item)
elif name == 'writers':
self.writers = []
if value:
a = value.split(',')
for item in a:
if item.find('-') < 0:
self.writers.append(item)
else:
_ip = item.split('.')
_last = _ip[3].split('-')
for i in range(int(_last[0]), int(_last[1])+1):
ip = '.'.join([_ip[0], _ip[1], _ip[2], str(i)])
self.writers.append(ip)
elif name == 'xcache_ttl':
self.xcache_ttl = int(value)
elif name == 'server_pipelining':
if value.lower() == "on":
self.server_pipelining = True
elif name == 'expire_types':
self.expire_types = {}
for item in value.split(','):
if item:
k, v = item.split(':', 1)
if k and v:
if v[-1] in ['s','m','h','d','w','y']:
_multis = self.multis[v[-1]]
self.expire_types[k] = int(v[:-1])*_multis
else:
self.expire_types[k] = int(v)
except:
raise
web_config_parsed = True
def init_handler(self, conn, client_address, rw_mode=0):
self.addr = client_address
self.sock = conn
if self.sock:
self.sock_fileno = conn.fileno()
self.out_body_file = self.out_body_mmap = self._c = self.accept_encoding = None
self.out_body_size = self.out_body_file_lmt = self.cmd_get = self.cmd_head = self.cmd_put = self.cmd_delete = self.if_modified_since = self.keep_connection = self.xResult = 0
self.has_resp_body = self.xcache_hit = False
self.canbe_gzipped = self.gzip_transfer = self.gzip_chunked = False
self.gzip_finished = self.next_request = True
#self.vhost_mode = False
self.transfer_completed = 1
self.command = self.path = self.resp_line = self.resp_msg = self.out_head_s = self._r = self._rb = self.hostname = self.xcache_key = b''
self.c_http_ver = self.s_http_ver = self.r_http_ver = 1
self.resp_code = self.HTTP_OK
self.resume_transfer = rw_mode
def __call__(self, fds):
#should be called by native epoll server, can handle multi requests at one handler call, like: do 10 read events at a time with 10 connections
for f, rw_mode in fds: #ccc
if rw_mode == 0:
self.init_handler(self.server.conns[f], self.server.addrs[f], rw_mode)
parse_stat = self.x_parse()
if parse_stat == self.PARSE_OK or parse_stat == self.PARSE_MORE2:
if self.cmd_get == 1 or self.cmd_head == 1:
self.x_GET()
self.x_response()
elif self.cmd_put == 1 or self.cmd_delete == 1:
self.x_PUT()
self.x_response()
else:
self.xResult = self.xR_ERR_HANDLE
elif self.server_pipelining and parse_stat == self.PARSE_MORE:
if self.cmd_get == 1 or self.cmd_head == 1:
self.x_GET()
self.x_response()
elif self.cmd_put == 1 or self.cmd_delete == 1:
self.x_PUT()
self.x_response()
else:
self.xResult = self.xR_ERR_HANDLE
self.server.epoll.modify(f, select.EPOLLIN|select.EPOLLOUT)
elif parse_stat == self.PARSE_AGAIN:
self.xResult = self.xR_PARSE_AGAIN
continue
else:
self.xResult = self.xR_ERR_PARSE
elif rw_mode == 1:
#continue sending a large file or pipeling
if f in self.server.resume:
#large file transfering
self.init_handler(self.server.conns[f], self.server.addrs[f], rw_mode)
self.x_response()
elif self.server_pipelining and f in self.server.x_reqs:
if self.server.x_reqs[f][1] == 1:
#add pipelining request
fds.append((f, 0))
else:
#maybe large PUT request recving
try:
self.server.epoll.modify(f, select.EPOLLIN)
except:
pass
self.keep_connection = 1
elif self.server_pipelining and f not in self.server.x_reqs:
self.transfer_completed = 0 #not do clean()
try:
self.server.epoll.modify(f, select.EPOLLIN)
except:
pass
else:
self.xResult = self.xR_ERR_PARSE
else:
self.xResult = self.xR_ERR_PARSE
self.clean()
def check_connection(self, c_http_ver, check_ims=True, gen_xcache_key=True):
if c_http_ver == "HTTP/1.1":
self.c_http_ver = 1
if self.s_http_ver == self.HTTP11:
self.r_http_ver = 1
else:
self.r_http_ver = 0
else:
self.c_http_ver = 0
self.r_http_ver = 0
if self.in_headers.get("Connection", "null").lower() == "keep-alive":
self.keep_connection = 1
#self.r_http_ver = 1
else:
self.keep_connection = 0
if self.server.zlb_mode:
if gen_xcache_key:
self.hostname = self.in_headers.get("Host", "127.0.0.1").split(':',1)[0]
self.xcache_key = ''.join([self.hostname, self.path])
else:
self.xcache_key = self.path
if check_ims:
if self.in_headers.get("If-Modified-Since"):
self.if_modified_since = 1
else:
self.if_modified_since = 0
def date_time_string(self, ts=None):
if ts is None: ts = time.time()
year, month, day, hh, mm, ss, wd, y, z = time.gmtime(ts)
s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
self.weekdayname[wd],
day, self.monthname[month], year,
hh, mm, ss)
return s
def set_out_header(self, key, value):
self.out_headers[key] = value
def set_resp_code(self, code):
if self.r_http_ver == self.HTTP11:
prefix = "HTTP/1.1"
else:
prefix = "HTTP/1.0"
self.has_resp_body = False
self.resp_code = code
if code == 200:
self.resp_msg = "OK"
self.has_resp_body = True
elif code == 201:
self.resp_msg = "Created"
elif code == 204:
self.resp_msg = "No Content"
elif code == 301:
self.resp_msg = "Move Permanent"
elif code == 302:
self.resp_msg = "Move Temporary"
elif code == 304:
self.resp_msg = "Not Modified"
elif code == 404:
self.resp_msg = "Not Found"
elif code == 403:
self.resp_msg = "Forbidden"
elif code == 500:
self.resp_msg = "Server Error"
elif code == 502:
self.resp_msg = "Server Reset"
elif code == 503:
self.resp_msg = "Service Unavailable"
elif code == 504:
self.resp_msg = "Server Timeout"
self.resp_line = ''.join([prefix, ' ', str(code), ' ', self.resp_msg])
def handle_request(self):
#called when running at gevent mode
while self.next_request:
if self.x_parse() == self.PARSE_OK:
if self.cmd_get == 1 or self.cmd_head == 1:
self.x_GET()
else:
return
self.x_response()
else:
self.xResult = self.xR_ERR_PARSE
self.next_request = False
if self.keep_connection == 0:
self.next_request = False
def x_parse(self):
#get the request headers, xxx
_doing_pipelining = _last_pipelining = _header_parsed = False
_eol_pos = -1
_cl = 0
_fn = self.sock_fileno
if _fn not in self.server.x_reqs:
r = self._r
_xreqs_empty = _first_pipelining = True
else:
r = self._r = self.server.x_reqs[_fn][0]
_xreqs_empty = _first_pipelining = False
if self.EOL2 in r or self.EOL1 in r:
if self.server.x_reqs[_fn][1] < 0:
#request body not finished recv, PUT method
_header_parsed = True
_first_pipelining = True
_cl = 0 - self.server.x_reqs[_fn][1]
_eol_pos = self.server.x_reqs[_fn][2]
elif self.server.x_reqs[_fn][1] == 0:
#not pipelining requests, must done before
self.server.x_reqs.pop(_fn)
r = self._r = b''
else:
#self.server.x_reqs[_fn][1] == 1
_doing_pipelining = True
while 1:
try:
if not _doing_pipelining:
b = self.sock.recv(self.recv_buf_size)
if b:
r = self._r = ''.join([r, b])
else:
#peer closed connection?
return self.PARSE_ERROR
if not _header_parsed:
_eol_pos = r.find(self.EOL2)
if _eol_pos > -1 and not _header_parsed:
#headers mostly end with EOL2 "\n\r\n"
if not self.server_pipelining:
if not _xreqs_empty:
#a big-headers request is all recieved
self.server.x_reqs.pop(_fn, None)
else:
#for http pipelining
if r.count(self.EOL2) > 1 or _eol_pos < len(r) - len(self.EOL2):
c = r.split(self.EOL2, 1)
r = c[0]
self.server.x_reqs[_fn] = [c[1], 1]
_doing_pipelining = True
else:
if not _xreqs_empty:
#doing the last pipelining, clear x_reqs
self.server.x_reqs.pop(_fn, None)
_last_pipelining = True
break
elif _eol_pos > -1 and _header_parsed:
#recving request body
self._rb = r[_eol_pos+len(self.EOL2):]
if _cl > len(self._rb):
#not finished recv request body
self.server.x_reqs[_fn] = [r, 0 - _cl, _eol_pos]
return self.PARSE_AGAIN
elif _cl < len(self._rb):
#full request body recv, there are other data, maybe pipelining requests
self.server.x_reqs[_fn] = [self._rb[_cl:], 1]
_doing_pipelining = True
break
else:
#whole body recv done
self.server.x_reqs.pop(_fn , None)
#vars should been re-setup, though header parsed before
break
else:
#not finished all headers, save recv data
self.server.x_reqs[_fn] = [r, 0]
return self.PARSE_AGAIN
#self.sock.setblocking(0)
except socket.error as e:
if e.errno == errno.EAGAIN:
#no more request data, see if the whole request headers should be recieved
if self.EOL2 in r or self.EOL1 in r:
break
else:
#keeping connection, no request has been sent..
#self.sock.setblocking(1)
return self.PARSE_AGAIN
else:
#peer closed connection?
return self.PARSE_ERROR
#a = r.split("\r\n", 1)
a = r[:_eol_pos].splitlines()
#if not a[0]:
if len(a) < 2:
#illeagal request headers
return self.PARSE_ERROR
try:
#"GET / HTTP/1.1"
self.command, self.path, _c_http_ver = a[0].split()
except:
#illeagal command/path line
return self.PARSE_ERROR
if self.command == 'GET':
self.cmd_get = 1
elif self.command == 'HEAD':
self.cmd_head = 1
elif self.command == 'PUT':
self.cmd_put = 1
elif self.command == 'DELETE':
self.cmd_delete = 1
else:
return self.PARSE_ERROR
"""
#all headers go to dict
if self.cmd_put == 0:
self.in_headers = dict((k, v) for k, v in (item.split(": ") for item in a[1].strip().split("\r\n")))
else:
self.in_headers = dict((k, v) for k, v in (item.split(": ") for item in a[1].split(self.EOL2, 1)[0].split("\r\n")))
"""
for _line in a[1:]:
_pos = _line.find(": ")
if _pos > 0:
self.in_headers[_line[:_pos]] = _line[_pos+2:]
self.check_connection(_c_http_ver)
if self.cmd_put == 1 and not _header_parsed:
if _eol_pos < len(r) - len(self.EOL2):
self._rb = r[_eol_pos+len(self.EOL2):] #request body
else:
self._rb = b''
_cl = int(self.in_headers.get("Content-Length", "0"))
if _cl == 0:
return self.PARSE_ERROR
elif _cl > len(self._rb):
#not finished recv request body
self.server.x_reqs[_fn] = [r, 0 - _cl, _eol_pos]
return self.PARSE_AGAIN
elif _cl < len(self._rb):
#full request body recv, there are other data, maybe pipelining requests
self.server.x_reqs[_fn] = [self._rb[_cl:], 1]
_doing_pipelining = True
else:
#full request body recv
self.server.x_reqs.pop(_fn, None)
if _fn not in self.server.x_reqs:
#no more requests to process, last pipelining or non-pipelining
return self.PARSE_OK
else:
if self.server.x_reqs[_fn][1] == 1:
#doing pipelining, not last
if _first_pipelining:
#first piplining
return self.PARSE_MORE
else:
#not first piplining
return self.PARSE_MORE2
def x_GET(self):
if self.if_modified_since == 0 and self.xcache_key in self.server.xcache:
self._c = self.server.xcache.get(self.xcache_key)
ttl = self._c[0]
if ttl >= time.time():
#cache hit
self.out_head_s, self.out_body_file, self.out_body_size, self.out_body_file_lmt, self.out_body_mmap, self.canbe_gzipped = self._c[1:]
self.has_resp_body = True
if self.r_http_ver == self.HTTP11:
self.resp_line = 'HTTP/1.1 200 OK'
else:
self.resp_line = 'HTTP/1.0 200 OK'
if self.canbe_gzipped and self.c_http_ver == self.HTTP11 and self.r_http_ver == self.HTTP11:
for x in self.in_headers.get("Accept-Encoding", "null").replace(' ','').split(','):
if x == "gzip":
self.gzip_transfer = True
break
if self.gzip_transfer:
if self.xcache_key in self.server.gzip_shelf:
if self.server.gzip_shelf[self.xcache_key][4]==self.out_body_file_lmt:
self.out_head_s=self.server.gzip_shelf[self.xcache_key][1]
self.out_body_file=self.server.gzip_shelf[self.xcache_key][2]
self.out_body_size=self.server.gzip_shelf[self.xcache_key][3]
self.gzip_chunked=self.server.gzip_shelf[self.xcache_key][7]
self.xcache_hit = True
return
else:
self.xcache_hit = False
else:
self.xcache_hit = False
else:
self.xcache_hit = True
return
else:
self.xcache_hit = True
return
else:
#cache item expired
if isinstance(self._c[2], file) and not self._c[2].closed: #close the file opened, if not closed
self._c[2].close()
if self._c[5]: #close the mmap maped, if exists
self._c[5].close()
self._c = None
self.server.xcache.pop(self.xcache_key)
self.xcache_hit = False
#cache miss or if_modified_since request
"""
if self.vhost_mode:
path = ''.join([self.homedir, '/', self.hostname, self.path])
else:
path = ''.join([self.homedir, self.path])
"""
path = ''.join([self.homedir, self.path])
if os.path.isdir(path):
if not path.endswith('/'):
self.set_resp_code(301)
self.set_out_header("Location", ''.join([path, "/"]))
return
for index in self.index_files:
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
try:
f = open(path, 'rb')
self.out_body_file = f
except IOError as e:
if e.errno == errno.EISDIR:
self.set_resp_code(403)
else:
self.set_resp_code(404)
return
try:
fs = os.fstat(f.fileno())
#Last Modified time
self.out_body_file_lmt = fs.st_mtime
lmt = self.date_time_string(fs.st_mtime)
lms = self.in_headers.get("If-Modified-Since")
if lmt == lms:
self.set_resp_code(304)
return
else:
self.set_out_header("Last-Modified", lmt)
self.out_body_size = fs[6]
self.set_out_header("Content-Length", str(fs[6]))
except:
self.set_resp_code(404)
f.close()
return
try:
a = path.rsplit('.')
content_type = self.mimetype.get(a[1])
if content_type:
self.set_out_header("Content-Type", content_type)
if self.gzip_on and a[1] in self.gzip_types and self.out_body_size > self.gzip_size and self.out_body_size <= self.gzip_max_size:
self.canbe_gzipped = True
else:
self.set_out_header("Content-Type", "application/octet-stream")
if a[1] in self.expire_types:
self.set_out_header("Cache-Control", ''.join(["max-age=", str(self.expire_types[a[1]])]))
self.set_out_header("Expires", self.date_time_string(time.time() + self.expire_types[a[1]]))
except:
self.set_out_header("Content-Type", "application/octet-stream")
self.set_resp_code(200)
def x_PUT(self):
try:
_peer_ip, _port = self.sock.getpeername()
except:
_peer_ip = b''
if _peer_ip not in self.writers:
self.xResult = self.xR_ERR_HANDLE
return
path = ''.join([self.homedir, self.path])
if not os.path.isdir(path):
if self.cmd_delete == 1:
if os.path.exists(path):
try:
os.unlink(path)
self.set_resp_code(204)
except:
self.set_resp_code(403)
else:
self.set_resp_code(204)
elif self.cmd_put == 1:
try:
_dir = path.rsplit('/', 1)[0]
if not os.path.exists(_dir):
os.makedirs(_dir, 0755)
f = open(path, 'wb')
f.write(self._rb)
f.close()
self.set_resp_code(201)
except IOError as e:
self.set_resp_code(403)
else:
if self.cmd_delete == 1:
try:
os.rmdir(path)
self.set_resp_code(204)
except:
self.set_resp_code(403)
else:
self.set_resp_code(403)
def send_out_all_headers(self, extra=''):
#if self.keep_connection == 1 and self.r_http_ver == self.HTTP11:
if self.keep_connection == 1:
self.sock.send(''.join([self.resp_line, "\n", self.out_head_s, "Connection: keep-alive\n\n", extra]))
#writev(self.sock_fileno, [self.resp_line, "\n", self.out_head_s, "Connection: keep-alive\n\n", extra])
else:
self.sock.send(''.join([self.resp_line, "\n", self.out_head_s, "Connection: close\n\n", extra]))
#writev(self.sock_fileno, [self.resp_line, "\n", self.out_head_s, "Connection: close\n\n", extra])
def send_out_all_headers2(self, extra=None):
if extra:
#if self.keep_connection == 1 and self.r_http_ver == self.HTTP11:
if self.keep_connection == 1:
self.sock.send(''.join([self.resp_line, "\n", self.out_head_s, "Connection: keep-alive\n\n", extra]))
else:
self.sock.send(''.join([self.resp_line, "\n", self.out_head_s, "Connection: close\n\n", extra]))
else:
#if self.keep_connection == 1 and self.r_http_ver == self.HTTP11:
if self.keep_connection == 1:
self.sock.send(''.join([self.resp_line, "\n", self.out_head_s, "Connection: keep-alive\n\n"]))
else:
self.sock.send(''.join([self.resp_line, "\n", self.out_head_s, "Connection: close\n\n"]))
def x_response(self):
if self.resume_transfer == 0:
sent = _sent = 0
elif self.resume_transfer == 1:
self.xcache_hit = self.has_resp_body = True
self.command = 'GET'
self.cmd_get = 1
_sent = 0
self.transfer_completed = 0
_rs = self.server.resume.get(self.sock_fileno)
if _rs:
self.out_body_file, self.out_body_size, sent, self.keep_connection, self.gzip_transfer, self.xcache_key = _rs
if self.gzip_transfer:
_org_file = self.server.xcache[self.xcache_key][2]
_org_size = self.server.xcache[self.xcache_key][3]
self.out_head_s = self.server.gzip_shelf[self.xcache_key][1]
self.out_body_file = self.server.gzip_shelf[self.xcache_key][2]
self.out_body_size = self.server.gzip_shelf[self.xcache_key][3]
_file_lmt = self.server.gzip_shelf[self.xcache_key][4]
_gzip_pos = self.server.gzip_shelf[self.xcache_key][5]
self.gzip_finished = self.server.gzip_shelf[self.xcache_key][6]
self.gzip_chunked = self.server.gzip_shelf[self.xcache_key][7]
else:
#no such resume, must be first trans
self.server.epoll.modify(self.sock_fileno, select.EPOLLIN)
self.resume_transfer = sent = 0
self.out_head_s = self.server.xcache[self.xcache_key][1]
self.out_body_file = self.server.xcache[self.xcache_key][2]
self.out_body_size = self.server.xcache[self.xcache_key][3]
#At this point, begin transfer response, first to roll out headers
if not self.xcache_hit:
_t = time.time()
if len(self.out_headers) > 0:
self.out_head_s = ''.join(["Server: ", self.server_version, "\nDate: ", self.date_time_string(_t), '\n', '\n'.join(['%s: %s' % (k, v) for k, v in self.out_headers.items()]), '\n'])
else:
self.out_head_s = ''.join(["Server: ", self.server_version, "\nDate: ", self.date_time_string(_t), '\n'])
if self.resp_code == self.HTTP_OK and self.out_body_size > 0:
#Only 200 and body_size > 0 response will be cached, [ttl, out_head_s, f, fsize, f_lmt, mmap], and file smaller than 1KB will be mmaped
if self.out_body_size < 1000 and not self.canbe_gzipped:
self.out_body_mmap = mmap.mmap(self.out_body_file.fileno(), 0, prot=mmap.PROT_READ)
else:
if self.canbe_gzipped and self.c_http_ver == self.HTTP11 and self.r_http_ver == self.HTTP11:
for x in self.in_headers.get("Accept-Encoding", "null").replace(' ','').split(','):
if x == "gzip":
self.gzip_transfer = True
break
if self.gzip_transfer:
#generate gzip cache item
try:
#gzip it
_gzf = zlib.compressobj(6,
zlib.DEFLATED,
zlib.MAX_WBITS | 16,
zlib.DEF_MEM_LEVEL,
0)
self.out_body_file.seek(0)
if self.out_body_size > self.send_buf_size/2:
self.gzip_chunked = True
_ss = _gzf.compress(self.out_body_file.read(self.send_buf_size/2))
_ss = ''.join([_ss, _gzf.flush(zlib.Z_SYNC_FLUSH)])
else:
_ss = _gzf.compress(self.out_body_file.read(self.out_body_size))
_ss = ''.join([_ss, _gzf.flush(zlib.Z_FINISH)])
_out_headers = copy.copy(self.out_headers)
_out_headers["Content-Encoding"] = "gzip"
if self.gzip_chunked:
_out_headers["Transfer-Encoding"] = "chunked"
try:
del _out_headers["Content-Length"]
except:
pass
else:
_out_headers["Content-Length"] = len(_ss)
_out_head_s = ''.join([self.resp_line, "\nServer: ", self.server_version, "\nDate: ", self.date_time_string(_t), '\n', '\n'.join(['%s: %s' % (k, v) for k, v in _out_headers.items()]), '\n'])
#moved to self.server.check_3ws()
#keep the mem cache of gzip_shelf limitted
#while len(self.server.gzip_shelf.cache) > 1000:
# self.server.gzip_shelf.cache.popitem()
#keep the disk cache of gzip_shelf limitted
if len(self.server.gzip_shelf) > self.x_shelf_size:
with self.server.gzip_shelf_lock:
self.server.gzip_shelf.popitem()
if self.gzip_chunked:
#[file size original, headers, content, body_size, file modified time, current gzip position, finished, chunked]
_sss = ''.join([hex(len(_ss))[2:], '\r\n', _ss, '\r\n'])
with self.server.gzip_shelf_lock:
self.server.gzip_shelf[self.xcache_key] = [self.out_body_size, _out_head_s, _sss, len(_sss), self.out_body_file_lmt, self.send_buf_size/2, False, self.gzip_chunked]
self.server._gzs[self.xcache_key] = _gzf
else:
with self.server.gzip_shelf_lock:
self.server.gzip_shelf[self.xcache_key] = [self.out_body_size, _out_head_s, _ss, len(_ss), self.out_body_file_lmt, self.out_body_size, True, self.gzip_chunked]
#moved to self.server.check_3ws()
#if hasattr(self.server.gzip_shelf.dict, 'sync'):
# with self.server.gzip_shelf_lock:
# self.server.gzip_shelf.dict.sync()
except:
pass #zzz
if len(self.server.xcache) > self.xcache_size:
self.server.xcache.popitem()
#put xcache item, every item take about 8+300+8+8+8+8+1=340 bytes
#3 items per 1KB mem, 3k items per 1MB mem, 3M items per 1GB mem
self.server.xcache[self.xcache_key] = [self.xcache_ttl + _t, self.out_head_s, self.out_body_file, self.out_body_size, self.out_body_file_lmt, self.out_body_mmap, self.canbe_gzipped]
if self.gzip_transfer:
_org_file = self.server.xcache[self.xcache_key][2]
_org_size = self.server.xcache[self.xcache_key][3]
self.out_head_s = self.server.gzip_shelf[self.xcache_key][1]
self.out_body_file = self.server.gzip_shelf[self.xcache_key][2]
self.out_body_size = self.server.gzip_shelf[self.xcache_key][3]
_file_lmt = self.server.gzip_shelf[self.xcache_key][4]
_gzip_pos = self.server.gzip_shelf[self.xcache_key][5]
self.gzip_finished = self.server.gzip_shelf[self.xcache_key][6]
self.gzip_chunked = self.server.gzip_shelf[self.xcache_key][7]
elif self.resp_code >= self.HTTP_BAD_REQUEST:
self.out_head_s = ''.join([self.out_head_s, "Content-Length: ", str(len(self.resp_msg) + 4), '\n'])
#send headers & body
if self.has_resp_body and self.out_body_file and self.cmd_get == 1:
if self.out_body_mmap:
if self.server_pipelining:
if self.sock_fileno in self.server.x_reqs:
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1)
self.send_out_all_headers(extra=self.out_body_mmap[:])
if self.sock_fileno not in self.server.x_reqs:
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 0)
else:
self.send_out_all_headers(extra=self.out_body_mmap[:])
elif isinstance(self.out_body_file, str) and self.out_body_size < 1000 and self.gzip_finished:
self.send_out_all_headers(extra=self.out_body_file)
else:
#Try send as much as data once in a TCP packet
#Because 2(1 header + 1 body) packets turn down performance up to 50% than 1(header + body) packet
if self.resume_transfer == 0:
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 1)
self.send_out_all_headers()
if self.out_body_size - sent >= self.send_buf_size:
if self._native_epoll:
_send_buf = self.send_buf_size
else:
_send_buf = self.out_body_size
else:
_send_buf = self.out_body_size - sent
try:
if isinstance(self.out_body_file, str):
_sent = self.sock.send(self.out_body_file[sent:_send_buf+sent])
else:
_sent = sendfile.sendfile(self.sock_fileno, self.out_body_file.fileno(),
sent, _send_buf)
sent += _sent
if self.resume_transfer == 0:
#after transfer snd_buf data, requeue event, let other event to be handled
if sent < self.out_body_size or not self.gzip_finished:
self.server.resume[self.sock_fileno] = [self.out_body_file,
self.out_body_size, sent, self.keep_connection,
self.gzip_transfer, self.xcache_key]
self.server.epoll.modify(self.sock_fileno, select.EPOLLOUT)
self.transfer_completed = 0
else:
if self.out_body_size == sent and self.gzip_finished:
self.server.resume.pop(self.sock_fileno)
self.server.epoll.modify(self.sock_fileno, select.EPOLLIN)
self.transfer_completed = 1
else:
self.server.resume[self.sock_fileno] = [self.out_body_file,
self.out_body_size, sent, self.keep_connection,
self.gzip_transfer, self.xcache_key]
except OSError as e: #rrr
if e.errno == errno.EAGAIN:
#send buffer full?just wait to resume transfer
#and gevent mode can't reach here, beacause gevent_sendfile intercepted the exception
self.server.resume[self.sock_fileno] = [self.out_body_file,
self.out_body_size, sent, self.keep_connection,
self.gzip_transfer, self.xcache_key]
elif e.errno == errno.EPIPE:
#peer closed connection
self.transfer_completed = 1
self.server.resume.pop(self.sock_fileno)
self.server.cleanup(self.sock_fileno);
else:
raise
if not self.gzip_finished:
#continue gen gzip chunked encoding file data, zzz
_gzf = self.server._gzs.get(self.xcache_key)
if not _gzf:
#this wrong, may cause error, just in case
_gzf = zlib.compressobj(6,
zlib.DEFLATED,
zlib.MAX_WBITS | 16,
zlib.DEF_MEM_LEVEL,
0)
if _org_size > _gzip_pos + self.send_buf_size/2:
_z_buf_size = self.send_buf_size/2
_flush_mode = zlib.Z_SYNC_FLUSH
else:
_z_buf_size = _org_size - _gzip_pos
self.gzip_finished = True
_flush_mode = zlib.Z_FINISH
_org_file.seek(_gzip_pos)
_ss = _gzf.compress(_org_file.read(_z_buf_size))
_ss = ''.join([_ss, _gzf.flush(_flush_mode)])
_sss = ''.join([self.out_body_file, hex(len(_ss))[2:], '\r\n', _ss, '\r\n'])
if self.gzip_finished:
_sss = ''.join([_sss, '0\r\n\r\n'])
self.server._gzs.pop(self.xcache_key)
with self.server.gzip_shelf_lock:
self.server.gzip_shelf[self.xcache_key] = [_org_size, self.out_head_s, _sss, len(_sss), _file_lmt, _gzip_pos + _z_buf_size, self.gzip_finished, self.gzip_chunked]
#moved to self.server.check_3ws()
#if hasattr(self.server.gzip_shelf.dict, 'sync'):
# self.server.gzip_shelf.dict.sync()
#Now, transfer complete, resume nature behavior of TCP/IP stack, as turned before
if self.keep_connection == 1 and self.resume_transfer == 0:
#no need to set TCP_CORK when keep_connection=0
#it will be cleared when socket closing and data will be flushed
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_CORK, 0)
elif self.resp_code == self.HTTP_NOT_MODIFIED:
self.send_out_all_headers()
elif self.resp_code == self.HTTP_OK and self.cmd_head == 1:
self.send_out_all_headers()
elif self.resp_code >= self.HTTP_BAD_REQUEST or self.resp_code == 201 or self.resp_code == 204:
self.send_out_all_headers(extra = "%d %s" % (self.resp_code, self.resp_msg))
def clean(self):
if self.transfer_completed == 1:
self.in_headers.clear()
self.out_headers.clear()
if self.keep_connection == 0:
self.sock.close()
def gevent_sendfile(out_fd, in_fd, offset, count):
#This function is borrowed from gevent's example code, thanks!
sent = 0
while sent < count:
try:
_sent = o_sendfile(out_fd, in_fd, offset + sent, count - sent)
sent += _sent
except OSError as ex:
if ex.args[0] == errno.EAGAIN:
gevent.socket.wait_write(out_fd)
else:
raise
return sent
class _xDNSHandler:
PARSE_OK = 0
PARSE_ERROR = -1
xR_OK = 0
xR_ERR_PARSE = -1
xR_ERR_HANDLE = -2
xResult = 0
sock = None
data = None
addr = None
rra = {}
rrn = {}
ttl = {}
stat = {}
geo = {}
_rra = None
_ttl = None
q = None
query_name = None
query_name_geo = None
answer = None
xcache_ttl = 10
probe_interval = 10 #icmp probe interval in seconds, see if the ip alive
LR_LEFT = 0
LR_RIGHT = 1
lr_peers = {}
lr_ttl = 3600
lr_left = ''
lr_right = ''
lr_range = ''
lr_range_suffix = ''
lr_prefix = 'lr'
lr_resolve = False
lr_left_measuring = False
lr_right_measuring = False
geoip_db = None
def __init__(self, conn, client_address, server, config_section='3nsd'):
self.server = server
#if server.workers > 1:
# self._wlock = multiprocessing.Lock()
#else:
# self._wlock = Semaphore()
self.init_nsd_config(config_section=config_section)
def init_nsd_config(self, config_section='3nsd'):
#for reload config
self.ttl = {}
self.rra = {}
self.rrn = {}
self.geo = {}
self.lr_peers = {}
self.lr_ttl = 3600
self.lr_left = ''
self.lr_right = ''
self.lr_range = ''
self.lr_range_suffix = ''
self.lr_prefix = 'lr'
self.lr_resolve = False
self.lr_left_measuring = False
self.lr_right_measuring = False
self.geoip_db = None
self.config = ConfigParser.ConfigParser()
if not self.config.read('3xsd.conf'):
self.config.read('/etc/3xsd.conf')
for name, value in self.config.items(config_section):
if name == "left":
if value:
self.lr_left = value.lower().strip()
elif name == "right":
if value:
self.lr_right = value.lower().strip()
elif name == "range":
if value:
self.lr_range = value.lower().strip()
elif name == "range_suffix":
if value:
self.lr_range_suffix = value.lower().strip()
elif name == "lr_ttl":
if value:
self.lr_ttl = int(value)
elif name == "lr_prefix":
if value:
self.lr_prefix = value.lower().strip()
elif name == "geoip_db":
if value:
self.geoip_db = geoip2.database.Reader(value)
else:
v = value.split(',', 1)
if len(v) > 1:
# [ttl, ip, ...]
if '@' in name:
_name, _geo = name.lower().split('@')
if _name not in self.geo:
self.geo[_name] = {}
for _cc in _geo.split('/'):
if _cc:
self.geo[_name][_cc] = name
self.ttl[name] = int(v[0])
self.rra[name] = self.ip_list(name, v[1], config_section)
self.rrn[name] = 0
def ip_list(self, name, ipstr, config_section):
#ip,ip,ip... can be the following format: #lll
#10.23.4.11 - single ip, 10.23.4.101-200 - multi ip
a = ipstr.split(',')
iplist = []
t = time.time()
for item in a:
if item.find('-') < 0:
iplist.append(item)
self.stat[item] = [True, self.ttl[name], t] #[stat, ttl, last-check-time]
else:
_ip = item.split('.')
if config_section == '3nsd':
_port = None
_last = _ip[3].split('-')
elif config_section == '3zsd' or config_section == '3fsd':
#10.23.4.101-200:8000
__l = _ip[3].split(':')
_port = __l[1]
_last = __l[0].split('-')
for i in range(int(_last[0]), int(_last[1])+1):
ip = '.'.join([_ip[0], _ip[1], _ip[2], str(i)])
if _port:
ip = ''.join([ip, ':', _port])
iplist.append(ip)
self.stat[ip] = [True, self.ttl[name], t]
if len(iplist) > 0:
return iplist
else:
self.stat.clear()
return None
def init_dns_handler(self):
self.data = None
self.addr = None
self._rra = None
self._ttl = None
self.q = None
self.query_name = None
self.query_name_geo = None
self.answer = None
def __call__(self, events):
#found that one event can contains multi dns query packages, so use while loop instead of for
while 1:
try:
self.data, self.addr = self.sock.recvfrom(1024)
if self.x_parse_query() == self.PARSE_OK:
if not self.lr_resolve:
self.x_gen_answer()
else:
self.x_lr_resolve()
self.x_send_out()
else:
self.xResult = self.xR_ERR_PARSE
except socket.error as e:
if e.errno == errno.EAGAIN:
self.init_dns_handler()
break
else:
raise
def shift(self, alist, n):
if len(alist) == 1:
return alist
else:
_n = n % len(alist)
return alist[_n:] + alist[:_n]
def x_check_range_resolve(self):
#check if it's a left-right range resolving name
self.lr_resolve = self.lr_left_measuring = self.lr_right_measuring = False
if self.lr_range_suffix in self.query_name[0-len(self.lr_range_suffix):] and self._rra and self._rra[0] == '0.0.0.0':
self.lr_resolve = True
if self.lr_left in self.query_name[0-len(self.lr_left):] and self.lr_prefix in self.query_name[:len(self.lr_prefix)]:
self.lr_resolve = True
self.lr_left_measuring = True
if self.lr_right in self.query_name[0-len(self.lr_right):] and self.lr_prefix in self.query_name[:len(self.lr_prefix)]:
self.lr_resolve = True
self.lr_right_measuring = True
if self.lr_resolve:
_peer, _ = self.addr
if _peer not in self.lr_peers:
self.lr_peers[_peer] = [int(time.time()) + self.lr_ttl, 0, 0] #[ttl, left-rtt,right-rtt], also as [ttl, a,b]
return self.lr_resolve
def x_check_peer_geo(self): #cpcp
if self.geoip_db:
try:
_rs = self.geoip_db.country(self.addr[0])
_cc = None
#the country code(_cc), first match continent code, then country's iso_code
if hasattr(_rs.continent, "code"):
_cc = _rs.continent.code.lower()
if _cc in self.geo[self.query_name]:
self.query_name_geo = self.geo[self.query_name][_cc]
if hasattr(_rs.country, "iso_code"):
_cc = _rs.country.iso_code.lower()
if _cc in self.geo[self.query_name]:
self.query_name_geo = self.geo[self.query_name][_cc]
#city has not iso_code, so what's next?
#if hasattr(_rs.city, "iso_code"):
# print "peer city code:", _rs.city.iso_code
#elif hasattr(_rs.city, "name"):
# print "peer city name:", _rs.city.name
print "peer:", self.addr[0], "geo:", self.query_name_geo, "cc:", _cc
except:
pass
def x_parse_query(self): #pqpq
self.q = dpkt.dns.DNS(self.data)
#we accept just A type query
if self.q.qd[0].cls != dpkt.dns.DNS_IN or self.q.qd[0].type != dpkt.dns.DNS_A:
return self.PARSE_ERROR
self.query_name = self.query_name_geo = self.q.qd[0].name
if self.query_name in self.geo:
self.x_check_peer_geo()
self._rra = self.rra.get(self.query_name_geo)
self._ttl = self.ttl.get(self.query_name_geo)
else:
self._rra = self.rra.get(self.query_name)
self._ttl = self.ttl.get(self.query_name)
if self.x_check_range_resolve():
#It's a left-right range resolve
return self.PARSE_OK
elif self._rra is not None and self._ttl is not None:
#ok, rr & ttl config found
return self.PARSE_OK
else:
#not my serving domain name
return self.PARSE_ERROR
def x_lr_resolve(self): #lrlr
_peer = self.addr[0]
_ttl, a, b = self.lr_peers[_peer]
_t = time.time() #_t: current time
#print "---------------------"
#print _peer, self.lr_peers[_peer]
if _t <= _ttl:
#cache of a,b not expired
if a > 0 and b > 0:
self.x_lr_range(a, b, ch=True) #ch = cache hit
return
else:
#cache of a,b expired
_ttl = int(_t) + self.lr_ttl
a = b = 0
self.lr_peers[_peer] = [_ttl, a, b]
if self.lr_left_measuring:
#doing left measure
_speer0, _sts0, _sts1, _sts2 = self.query_name.split('.')[0].split('-')[1:]
_ts0, _ts1, _ts2 = (int(_sts0), int(_sts1), int(_sts2))
_peer0 = socket.inet_ntoa(struct.pack('!I', int(_speer0)))
if _peer0 not in self.lr_peers:
self.lr_peers[_peer0] = [int(_t) + self.lr_ttl, 0, 0]
if _ts2 > 0:
b = self.lr_peers[_peer][2] = self.lr_peers[_peer0][2] = _ts2
if a == 0:
if _ts1 == 0:
self.x_lr_cname(self.LR_LEFT, _ts0, int((_t - _ts0) * 1000), _ts2)
return
else:
a = self.lr_peers[_peer][1] = self.lr_peers[_peer0][1] = int((_t - _ts0) * 1000) - _ts1 if _ts1>300000 else _ts1
if b == 0:
self.x_lr_cname(self.LR_RIGHT, _ts0, a, 0)
elif a > 0 and b > 0:
if a < _ts1:
#for debug purpose
self.x_lr_cname(self.LR_LEFT, _ts0, a, b)
else:
self.x_lr_range(a, b)
elif self.lr_right_measuring:
#doing right measure
_speer0, _sts0, _sts1, _sts2 = self.query_name.split('.')[0].split('-')[1:]
_ts0, _ts1, _ts2 = (int(_sts0), int(_sts1), int(_sts2))
_peer0 = socket.inet_ntoa(struct.pack('!I', int(_speer0)))
if _peer0 not in self.lr_peers:
self.lr_peers[_peer0] = [int(_t) + self.lr_ttl, 0, 0]
if _ts1 > 0:
a = self.lr_peers[_peer][1] = self.lr_peers[_peer0][1] = _ts1
if b == 0:
if _ts2 == 0:
self.x_lr_cname(self.LR_RIGHT, _ts0, _ts1, int((_t - _ts0) * 1000))
return
else:
b = self.lr_peers[_peer][2] = self.lr_peers[_peer0][2] = int((_t - _ts0) * 1000) - _ts2 if _ts2>300000 else _ts2
if a == 0:
self.x_lr_cname(self.LR_LEFT, _ts0, 0, b)
elif a > 0 and b > 0:
if b < _ts2:
#for debug purpose
self.x_lr_cname(self.LR_RIGHT, _ts0, a, b)
else:
self.x_lr_range(a, b)
else:
#doing initial query
#_ts0: base time stamp, in secs
_ts0 = int(_t - 300)
#_ts: offset time stamp from base time, in msecs
_ts = int((_t - _ts0) * 1000)
if self.lr_range == 'left':
#left
if a == 0 and b == 0:
if _ts0 % 2:
self.x_lr_cname(self.LR_LEFT, _ts0, _ts)
else:
self.x_lr_cname(self.LR_RIGHT, _ts0, 0, 0)
elif a == 0: #b > 0
self.x_lr_cname(self.LR_LEFT, _ts0, _ts, b)
elif b == 0: #a > 0
self.x_lr_cname(self.LR_RIGHT, _ts0, a, 0)
else: #a > 0, b > 0
self.x_lr_range(a, b, ch=True)
else:
#right
if a == 0 and b == 0:
if _ts0 % 2:
self.x_lr_cname(self.LR_RIGHT, _ts0, 0, _ts)
else:
self.x_lr_cname(self.LR_LEFT, _ts0, 0, 0)
elif b == 0: #a > 0
self.x_lr_cname(self.LR_RIGHT, _ts0, a, _ts)
elif a == 0: #b > 0
self.x_lr_cname(self.LR_LEFT, _ts0, 0, b)
else: #a > 0, b > 0
self.x_lr_range(a, b, ch=True)
def x_lr_range(self, a, b, ch=False): #lrlr
if self.lr_left_measuring:
_cname = self.query_name[self.query_name.find('.')+1:]
if a > b:
_cname = _cname.replace(self.lr_left, self.lr_right)
elif self.lr_right_measuring:
_cname = self.query_name[self.query_name.find('.')+1:]
if a < b:
_cname = _cname.replace(self.lr_right, self.lr_left)
else:
if self.lr_range == 'left':
_cname = self.query_name.replace(self.lr_range_suffix, self.lr_left)
elif self.lr_range == 'right':
_cname = self.query_name.replace(self.lr_range_suffix, self.lr_right)
#gen cname answer
self.q.op = dpkt.dns.DNS_RA
self.q.rcode = dpkt.dns.DNS_RCODE_NOERR
self.q.qr = dpkt.dns.DNS_R
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_CNAME
arr.name = self.query_name
arr.cname = _cname
arr.ttl = 0 if not ch else self.ttl.get(_cname, 0)
self.q.an.append(arr)
#I haven't understand what the Authority Record is going on..
if self.q.ar: del self.q.ar[:]
self.answer = str(self.q)
def x_lr_cname(self, _range, ts0, ts1, ts2=0): #lrlr
#query_name: ga.i.3xsd.net
#cname: ts0-ts1-ts2.ga.l.3xsd.net
#ts0: base time, in secs
#ts1: a measure time start point if ts2 = 0, or rtt of a if ts2 > 0, in msecs from base time
#ts2: b measure time start point if ts3 = 0, or rtt of b if ts3 > 0, in msecs from base time
if self.lr_right_measuring or self.lr_left_measuring:
_query_name = self.query_name[self.query_name.find('.')+1:]
else:
_query_name = self.query_name
if _range == self.LR_LEFT:
if self.lr_right_measuring:
_query_name = _query_name.replace(self.lr_right, self.lr_left)
else:
_query_name = _query_name.replace(self.lr_range_suffix, self.lr_left)
elif _range == self.LR_RIGHT:
if self.lr_left_measuring:
_query_name = _query_name.replace(self.lr_left, self.lr_right)
else:
_query_name = _query_name.replace(self.lr_range_suffix, self.lr_right)
#[prefix, peer_ip, ts0, ts1, ts2]
_cname = ''.join([self.lr_prefix, '-', str(struct.unpack('!I', socket.inet_aton(self.addr[0]))[0]), '-', str(ts0), '-', str(ts1), '-', str(ts2), '.', _query_name])
#gen cname answer
self.q.op = dpkt.dns.DNS_RA
self.q.rcode = dpkt.dns.DNS_RCODE_NOERR
self.q.qr = dpkt.dns.DNS_R
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_CNAME
arr.name = self.query_name
arr.cname = _cname
arr.ttl = 0
self.q.an.append(arr)
#I haven't understand what the Authority Record is going on..
if self.q.ar: del self.q.ar[:]
self.answer = str(self.q)
def x_gen_answer(self): #gaga
if self.query_name_geo in self.server.xcache:
_c = self.server.xcache.get(self.query_name_geo)
if _c[0] > time.time():
#cache item not expired, load it and rewrite the id field of answer to match queryer's
if self.q.id < 255:
self.answer = ''.join(['\x00', chr(self.q.id), _c[1][2:]])
else:
self.answer = ''.join([chr(self.q.id/256), chr(self.q.id % 256), _c[1][2:]])
return
else:
#expired, clear it
self.server.xcache.pop(self.query_name_geo)
#cache not hit, go on handling: first to turn query into answer
self.q.op = dpkt.dns.DNS_RA
self.q.rcode = dpkt.dns.DNS_RCODE_NOERR
self.q.qr = dpkt.dns.DNS_R
_alive = 0
#if not a geo resolving, self.query_name_geo is just equal to self.query_name, set by x_parse_query()
_query_name = self.query_name_geo
#for round robbin, shift ip list every time
self._rra = self.shift(self.rra.get(_query_name), self.rrn.get(_query_name))
self.rrn[_query_name] = (self.rrn[_query_name] + 1) % len(self.rra.get(_query_name))
#gen rr records for A resolve
while _alive == 0:
for _ip_s in self._rra:
#append rr record with ip not down
__stat = self.stat.get(_ip_s)
_stat = __stat[0]
if not _stat:
continue
else:
_alive += 1
arr = dpkt.dns.DNS.RR()
arr.cls = dpkt.dns.DNS_IN
arr.type = dpkt.dns.DNS_A
arr.name = self.query_name
arr.ip = socket.inet_aton(_ip_s)
arr.ttl = self._ttl
self.q.an.append(arr)
if _alive == 0:
#all ip go down, failover to backup config
_query_name = ''.join(['_', _query_name])
if self.rra.get(_query_name) is None:
break #backup go down too, just break and return empty answer
self._rra = self.shift(self.rra.get(_query_name), self.rrn.get(_query_name))
self.rrn[_query_name] += 1
self._ttl = self.ttl.get(_query_name)
#I haven't understand what the Authority Record is going on..
if self.q.ar: del self.q.ar[:]
self.answer = str(self.q)
#cache it, when expired at one ttl
self.server.xcache[self.query_name_geo] = [self.xcache_ttl + time.time(), self.answer]
def x_send_out(self):
#self._wlock.acquire()
try:
#send out answer, seems sending without mutex lock is ok
self.sock.sendto(self.answer, self.addr)
except:
raise
#finally:
#self._wlock.release()
def probe_ips(self):
gs = []
while 1:
gevent.sleep(self.probe_interval)
if len(self.stat) > 0:
if len(gs) > 0:
del gs[:]
for ip in self.stat.keys():
if ip == '0.0.0.0': continue
if time.time() > self.stat[ip][2] + self.stat[ip][1]: #last-check + ttl
gs.append(gevent.spawn(self.icmp_ping, ip)) #do works concurrently
gevent.joinall(gs)
#print "lr_peers:", self.lr_peers
#print "-------------------"
#print "self.rra: ", self.rra
#print "self.geo: ", self.geo
def icmp_ping(self, ip):
#be sure to be a gevent.socket, for concurrent reason
sock = gevent.socket.socket(gevent.socket.AF_INET, gevent.socket.SOCK_RAW, 1) #raw socket requiries root privilege
if StrictVersion(gevent.__version__) >= StrictVersion('1.0'):
sock.connect((ip, None))
else:
sock.connect((ip, 1))
sock.settimeout(1)
rcvd = 0
for i in xrange(2): #send icmp ping tests
icmp = dpkt.icmp.ICMP(type=8, data=dpkt.icmp.ICMP.Echo(id=random.randint(0, 0xffff),
seq=i, data='3nsd probe'))
try:
sock.send(str(icmp))
r = sock.recv(1024)
rip = dpkt.ip.IP(r)
if gevent.socket.inet_ntoa(rip.src) == ip:
ricmp = dpkt.icmp.ICMP(str(rip.data))
if ricmp.type == 0:
rcvd += 1
except gevent.socket.timeout:
pass
if rcvd > 0:
break
sock.close()
self.stat[ip][2] = time.time()
if rcvd == 0: #ip down
self.stat[ip][0] = False
return False
else: #ip alive
self.stat[ip][0] = True
return True
class _xZHandler(_xHandler, _xDNSHandler):
Z_RR = 0 #round robin
Z_IP = 1 #ip hash
Z_URL = 2 #url hash
None2 = [None, None]
_f = None #socket fileno
z_cache_size = 1000 #limit the xcache size in mem, about 30MB size with average file size 30KB
z_idle_ttl = 20 #backend connections have a idle timeout of 20 seconds
z_xcache_shelf = False #persistent storage of xcache
z_shelf_size = 1000000 #1 million files on-disk cache, about 30GB disk size with average file size 30KB
transparent_proxy = False
tp_host = {}
def __init__(self, conn, client_address, server, native_epoll=True,
gevent_stream=False, recv_buf_size=16384, send_buf_size=65536, z_mode=0):
_xHandler.__init__(self, conn, client_address, server, native_epoll, gevent_stream, recv_buf_size, send_buf_size)
self.server_pipelining = False
if Z_mode > z_mode:
z_mode = Z_mode
if z_mode >= 0:
_xDNSHandler.__init__(self, conn, client_address, server, config_section='3zsd')
if '*' in self.rra.keys():
self.transparent_proxy = True
self.z_mode = z_mode
else:
self.z_mode = 0
if X_shelf == 1:
self.z_xcache_shelf = True
self.server.xcache_shelf = shelve.open('shelf.xcache', writeback=True)
self.server.xcache_shelf_lock = multiprocessing.Lock()
def init_zsd_config(self, config_section='3zsd'):
_xHandler.init_config(self)
_xDNSHandler.init_nsd_config(self, config_section=config_section)
if '*' in self.rra.keys():
self.transparent_proxy = True
def init_handler(self, conn, client_address, rw_mode=0):
_xHandler.init_handler(self, conn, client_address, rw_mode)
self.z_hostname = self.z_backends = self.z_host_addr = self.z_host_sock = None
self.z_header_length = self.z_finished = self.z_body_size = self.chuncked_encoding = self.transfer_completed = 0
self.response_headers_parsed = False
if conn:
if self._f in self.server.xcache_stat:
self.xcache_hit = True
self.xcache_key = self.server.xcache_stat[self._f]
if rw_mode > 0:
_n = conn.fileno()
if _n in self.server.k_conns:
self.r_http_ver, self.keep_connection = self.server.k_conns[_n]
self.path = self.server.c_path.get(_n)
self.z_hostname = self.server.zhosts.get(_n)
self._r = self.server.z_reqs.get(_n)
if rw_mode > 1:
self.z_host_sock = self.server.zconns.get(self._f)
self.z_host_addr = self.server.zaddrs.get(self._f)
else:
return
def z_check_xcache(self, _f):
if self.if_modified_since == 0 and self.in_headers.get("Cache-Control", "null").find("no-cache") < 0:
_key_found = _in_shelf = False
self.accept_encoding = self.in_headers.get("Accept-Encoding")
if self.accept_encoding:
for x in self.accept_encoding.replace(' ','').split(','):
_key = ''.join([x, ':', self.xcache_key])
if _key in self.server.xcache:
_key_found = True
self.xcache_key = _key
break
elif self.z_xcache_shelf and _key in self.server.xcache_shelf:
_key_found = True
_in_shelf = True
self.xcache_key = _key
break
if not _key_found:
if self.xcache_key in self.server.xcache:
_key_found = True
elif self.z_xcache_shelf and self.xcache_key in self.server.xcache_shelf:
_key_found = True
_in_shelf = True
if _key_found:
if not _in_shelf:
self._c = self.server.xcache.get(self.xcache_key)
else:
self._c = self.server.xcache[self.xcache_key] = self.server.xcache_shelf.get(self.xcache_key)
ttl = self._c[0]
if ttl >= time.time():
#cache hit
self.out_head_s, self.out_body_file, self.out_body_size, self.out_body_file_lmt, self.out_body_mmap = self._c[1:]
self.has_resp_body = True
self.xcache_hit = True
self.server.xcache_stat[_f] = self.xcache_key
if self.r_http_ver == self.HTTP11:
self.resp_line = 'HTTP/1.1 200 OK'
else:
self.resp_line = 'HTTP/1.0 200 OK'
return
else:
#cache item expired
self._c = None
if not _in_shelf:
self.server.xcache.pop(self.xcache_key)
else:
try:
del self.server.xcache_shelf[self.xcache_key]
except:
#may be problem in concurrent mode
pass
if _f in self.server.xcache_stat:
self.server.xcache_stat.pop(_f)
self.xcache_hit =False
else:
self.xcache_hit =False
def __call__(self, fds):
for f, rw_mode in fds:
self._f = f
_do_clean = True
if rw_mode == 0: #ccc
#from client, resume_transfer = 0
#print "0 c->s" #client to server
self.init_handler(self.server.conns[f], self.server.addrs[f], rw_mode)
parse_stat = self.x_parse()
if parse_stat == self.PARSE_OK:
self.server.k_conns[f] = [self.r_http_ver, self.keep_connection]
self.server.c_path[f] = self.path
if self.cmd_get == 1 or self.cmd_head == 1:
self.z_check_xcache(self._f)
if self.xcache_hit:
self.x_response()
else:
self.z_GET_init()
if self.xResult < 0:
#something wrong
if self.xResult == self.xR_ERR_5xx:
#backend error
self.x_response()
else:
_do_clean = False
elif self.cmd_put == 1 or self.cmd_delete == 1:
if hasattr(self, "z_PUT_init"):
self.z_PUT_init()
if self.xResult < 0:
if self.xResult == self.xR_ERR_5xx:
self.x_response()
else:
_do_clean = False
else:
self.xResult = self.xR_ERR_HANDLE
else:
self.xResult = self.xR_ERR_HANDLE
elif parse_stat == self.PARSE_AGAIN:
self.xResult = self.xR_PARSE_AGAIN
continue
else:
self.xResult = self.xR_ERR_PARSE
#client may closed connection, clean cb_conns
self.server.cleanc(self._f)
elif rw_mode == 1:
#to client, resume_transfer = 1
#print "1 s->c" #server to client
self.init_handler(self.server.conns[f], self.server.addrs[f], rw_mode)
if self.xcache_hit:
self.x_response()
_cb = self.server.cb_conns.get(f)
if _cb:
_z_sock, _f = _cb
else:
_z_sock, _f = self.None2
if _z_sock:
#print "xcache_hit clean cb_conns pair:", f, _f
self.server.z_reqs_cnt[_f] -= 1
if self.server.z_reqs_cnt[_f] == 0:
#release z_conn & c_conn pair in cb_conns
self.server.cb_conns[_f] = None
self.server.cb_conns[f] = None
#add idle list
if str(self.server.zaddrs[_f]) in self.server.zidles:
if _f not in self.server.zidles[str(self.server.zaddrs[_f])]:
self.server.zidles[str(self.server.zaddrs[_f])].appendleft(_f)
else:
self.server.zidles[str(self.server.zaddrs[_f])] = deque([_f])
self.server.zconns_stat[_f] = [0, time.time()] #conn idle
#clean zcache
self.server.zcache.pop(_f, None)
self.server.zcache_stat.pop(_f, None)
else:
_do_clean = self.z_transfer_client(self._f)
elif rw_mode == 2:
#from backend, resume_transfer = 2
#print "2 b->s" #backend to server
self.init_handler(self.server.zconns[f], self.server.zaddrs[f], rw_mode)
parse_stat = self.z_transfer_backend(self._f)
if parse_stat == self.PARSE_ERROR:
self.server.cleanz(self._f)
elif parse_stat == self.PARSE_AGAIN:
self.z_hostname = self.server.zhosts[self._f]
self.path = self.server.z_path[self._f]
self._r = self.server.z_reqs[self._f]
#print "z_conns", self.server.zconns
#print "cb_conns", self.server.cb_conns
#print "idle_zconn", self.server.zidles
#print f
_do_clean = False
_cb = self.server.cb_conns.get(self._f)
if _cb:
_client_sock, _client_sock_fileno = _cb
else:
_client_sock, _client_sock_fileno = self.None2
if not _client_sock:
self.server.epoll.unregister(f)
self.server.cleanz(self._f)
else:
self.server.cleanz(self._f)
self.z_connect_backend(rw=2, client_sock=_client_sock)
self.z_send_request_init()
elif rw_mode == 3:
#to backend, resume_transfer = 3
#print "3 s->b" #server to backend
self.init_handler(self.server.zconns[f], self.server.zaddrs[f], rw_mode)
_do_clean = self.z_GET_backend()
else:
self.xResult = self.xR_ERR_PARSE
if _do_clean:
self.transfer_completed = 1
self.clean()
def z_parse_address(self, addr):
try:
host, port = addr.split(':', 1)
port = int(port)
except:
#return None
return addr, 80
return host, port
def z_resolve_request_host(self, _hostname):
#self.tp_host[_hostname] = {'ip', 'expire-time'}
#transparent hosts's hostname-ip resolve results
_t = time.time()
_ttl = self.ttl.get('*')
if _hostname in self.tp_host:
if _t > self.tp_host[_hostname]['expire-time']:
#renew ip
self.tp_host[_hostname]['ip'] = socket.gethostbyname(_hostname)
self.tp_host[_hostname]['expire-time'] = _ttl + _t
else:
#new resolve
self.tp_host[_hostname]= {'ip':socket.gethostbyname(_hostname), 'expire-time':_ttl+_t}
return ''.join([self.tp_host[_hostname]['ip'], ':80'])
def z_pick_a_backend(self):
if self.transparent_proxy and self.z_hostname not in self.rra.keys():
z_hostname = '*'
if self.rra['*'][0] == '0.0.0.0:0':
#a special backend 0.0.0.0:0 is defined,
#means that zsd should resolve the request hostname to ip and direct to it
return self.z_resolve_request_host(self.z_hostname)
else:
z_hostname = self.z_hostname
if self.z_mode == self.Z_RR:
self.z_backends = self.shift(self.rra.get(z_hostname), self.rrn.get(z_hostname))
self.rrn[z_hostname] = (self.rrn[z_hostname] + 1) % len(self.rra.get(z_hostname))
return self.z_backends[0]
elif self.z_mode == self.Z_IP:
try:
_ip_peer, _port_peer = self.sock.getpeername()
_ip_forward = self.in_headers.get('X-Forwarded-For')
if _ip_forward:
#get the real peer ip, for what via cache servers
_ip_peer = _ip_forward.split(',', 1)[0].strip()
except:
_ip_peer = b''
_ips = self.rra.get(z_hostname)
_ips_n = len(_ips)
if _ip_peer:
#ip hash, with c block num
_ip = _ip_peer.split('.')
_idx = __idx = (int(_ip[0])*65536 + int(_ip[1])*256 + int(_ip[2]))%_ips_n
_try = 1
while self.stat[_ips[_idx]][0] == False:
#server marked down
_t = time.time()
if _t - self.stat[_ips[_idx]][2] > self.stat[_ips[_idx]][1]:
#expires ttl, will be rechecked
self.stat[_ips[_idx]][0] = True
self.stat[_ips[_idx]][2] = _t
break
_idx = random.randint(0, _ips_n - 1)
if _idx == __idx:
_idx = (_idx + 1) % _ips_n
_try += 1
if _try >= _ips_n:
break
return _ips[_idx]
else:
return _ips[random.randint(0, _ips_n - 1)]
elif self.z_mode == self.Z_URL:
#url hash, with path md5's first 6 hex digist
md5 = hashlib.md5()
md5.update(self.path)
_path_md5 = md5.hexdigest()
_ips = self.rra.get(z_hostname)
_ips_n = len(_ips)
_idx = __idx = (int(_path_md5[0:1], base=16)*65536 + int(_path_md5[2:3], base=16)*256 + int(_path_md5[4:5], base=16))%_ips_n
_try = 1
while self.stat[_ips[_idx]][0] == False:
#server marked down
_t = time.time()
if _t - self.stat[_ips[_idx]][2] > self.stat[_ips[_idx]][1]:
#expires ttl, will be rechecked
self.stat[_ips[_idx]][0] = True
self.stat[_ips[_idx]][2] = _t
break
_idx = random.randint(0, _ips_n - 1)
if _idx == __idx:
_idx = (_idx + 1) % _ips_n
_try += 1
if _try >= _ips_n:
break
return _ips[_idx]
def z_GET_backend(self):
#resume send (large)request to backend
self._r = self.server.z_reqs[self._f]
return self.z_send_request_resume(self._r, self._f)
def z_connect_backend(self, rw=0, client_sock=None, addr=None, update_cb_conns=True):
#print "connecting to backend" #bbb
if addr:
self.z_host_addr = addr
else:
self.z_host_addr = self.z_parse_address(self.z_pick_a_backend())
self.z_host_sock = None
while str(self.z_host_addr) in self.server.zidles and len(self.server.zidles[str(self.z_host_addr)]) > 0:
#look for idle connection
self.z_host_sock = self.server.zconns.get(self.server.zidles[str(self.z_host_addr)].pop(), None)
_zsfn = self.z_host_sock.fileno()
if _zsfn == -1 or time.time() - self.server.zconns_stat[_zsfn][1] > self.z_idle_ttl:
self.z_host_sock = None
else:
break
if not self.z_host_sock:
#the idle conn may be closed, make a new connection
#self.z_host_sock = gevent.socket.create_connection(self.z_host_addr)
self.z_host_sock = socket.socket()
self.z_host_sock.settimeout(5)
try:
self.z_host_sock.connect(self.z_host_addr)
except socket.error as e: #ppp
_addr_s = ''.join([self.z_host_addr[0],':',str(self.z_host_addr[1])])
self.stat[_addr_s][0] = False
self.stat[_addr_s][2] = time.time()
if e.errno == errno.ECONNRESET:
self.set_resp_code(502)
elif e.errno == errno.ETIMEDOUT:
self.set_resp_code(504)
elif e.errno == errno.ECONNREFUSED:
self.set_resp_code(503)
else:
self.set_resp_code(500)
self.xResult = self.xR_ERR_5xx
return
self.z_host_sock.setblocking(0)
self.z_host_sock.setsockopt(socket.SOL_SOCKET,socket.SO_KEEPALIVE,1)
self.z_host_sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPIDLE, 20)
self.z_host_sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPCNT, 4)
self.z_host_sock.setsockopt(socket.SOL_TCP, socket.TCP_KEEPINTVL, 5)
#make client-backend socket pair, store them into cb_conns
#identify backend socket self.z_host_sock with client socket self._f
if update_cb_conns:
if rw == 0:
_c_sock_fileno = self._f
elif rw == 2:
_c_sock_fileno = client_sock.fileno()
self.server.cb_conns[_c_sock_fileno] = [self.z_host_sock, self.z_host_sock.fileno()]
self._f = self.z_host_sock.fileno()
#now self._f is backend socket, identidy it with client socket
if update_cb_conns:
self.server.zconns_stat[self._f] = [1, -1] #using backend connection
if rw == 0:
self.server.cb_conns[self._f] = [self.sock, self.sock.fileno()]
elif rw == 2:
self.server.cb_conns[self._f] = [client_sock, client_sock.fileno()]
#print "z_connect_backend create cb_conns pair:", _c_sock_fileno, self._f
#print "cb_conns:", self.server.cb_conns
#print "get zconn:", self._f
#print "self.sock:", self.sock
self.server.zconns[self._f] = self.z_host_sock
self.server.zaddrs[self._f] = self.z_host_addr
self.server.zhosts[self._f] = self.z_hostname
def z_send_request_init(self, no_recv=False):
#init send request, _r for request headers, _f for socket fileno to backend
self.server.z_reqs[self._f] = self._r
#self.server.z_resp_header[self._f] = None
self.server.z_path[self._f] = self.path
#if no_recv:
# self.z_host_sock.shutdown(socket.SHUT_RD)
try:
if len(self._r) > self.send_buf_size/2:
_buf_size = self.send_buf_size/2
_once = False
else:
_buf_size = len(self._r)
_once = True
sent = self.z_host_sock.send(self._r[:_buf_size])
if sent < _buf_size:
_once = False
if not _once:
self.server.z_reqs_stat[self._f] = [sent, no_recv]
try:
self.server.epoll.register(self._f, select.EPOLLIN | select.EPOLLOUT)
except IOError as e:
self.server.epoll.modify(self._f, select.EPOLLIN | select.EPOLLOUT)
except:
raise
else:
if self._f in self.server.z_reqs_stat:
self.server.z_reqs_stat.pop(self._f)
try:
self.server.epoll.register(self._f, select.EPOLLIN)
except IOError as e:
self.server.epoll.modify(self._f, select.EPOLLIN)
except:
raise
except socket.error as e:
return self.PARSE_ERROR
except:
raise
if no_recv:
if _once:
try:
self.server.epoll.unregister(self._f)
except:
raise
self.z_host_sock.close()
self.server.zconns.pop(self._f)
self.server.zaddrs.pop(self._f)
self.server.zhosts.pop(self._f)
else:
if self._f in self.server.z_reqs_cnt:
self.server.z_reqs_cnt[self._f] += 1
else:
self.server.z_reqs_cnt[self._f] = 1
def z_send_request_resume(self, _r, _f):
#resume request sending
if _f in self.server.z_reqs_stat:
begin, no_recv = self.server.z_reqs_stat[_f]
if len(_r[begin:]) > self.send_buf_size/2:
_buf_size = self.send_buf_size/2
else:
_buf_size = len(_r[begin:])
sent = self.z_host_sock.send(_r[begin:begin+_buf_size])
if begin + sent < len(_r):
self.server.z_reqs_stat[_f] = [begin + sent, no_recv]
else:
#all sent
self.server.z_reqs_stat.pop(_f)
if not no_recv:
self.server.epoll.modify(_f, select.EPOLLIN)
else:
try:
self.server.epoll.unregister(self._f)
except:
pass
self.z_host_sock.close()
self.server.zconns.pop(self._f)
self.server.zaddrs.pop(self._f)
self.server.zhosts.pop(self._f)
return False
def z_GET_init(self):
#init connection to backend, send request, ggg
_f = None
try:
self.z_hostname, _port = self.z_parse_address(self.in_headers.get("Host").lower())
if self.z_hostname not in self.rra.keys() and not self.transparent_proxy:
#not my serving hostname
self.xResult = self.xR_ERR_HANDLE
return
_cb = self.server.cb_conns.get(self._f)
if _cb:
self.z_host_sock, _f = _cb
else:
self.z_host_sock, _f = self.None2
if _f and self.server.zhosts.get(_f, None) == self.z_hostname:
#print "z_GET_init remake cb_conns pair:", self._f, _f
self._f = _f
self.server.cb_conns[_f] = [self.sock, self.sock.fileno()]
else:
#print "z_GET_init new conn:", self._f, _f
self.z_connect_backend()
_f = self._f
if self.xResult == self.xR_ERR_5xx:
return
self.z_send_request_init()
except:
self.xResult = self.xR_ERR_HANDLE
def z_transfer_client(self, __f):
#to client 222
_cb = self.server.cb_conns.get(__f)
if _cb:
_c, _f = _cb
else:
_c, _f = self.None2
if _f not in self.server.zcache_stat:
return False
blockno, begin = self.server.zcache_stat[_f][:2]
if blockno == len(self.server.zcache[_f]):
if self.server.zcache_stat[_f][4] == 1:
#finished all sent
self.server.epoll.modify(self.sock, select.EPOLLIN)
if self.server.zcache_stat[_f][7] == self.HTTP_OK:
if self.server.zcache_stat[_f][10] == 1 or self.server.zcache_stat[_f][5] > 0:
#only 200 and chuncked or body_size > 0 response item moved to xcache
#here may be wrong, should use self.cmd_get instead of self.server.zcache_stat[_f][5]
self.zcache_to_xcache(_f)
self.server.z_reqs_cnt[_f] -= 1
if self.server.z_reqs_cnt[_f] == 0:
#release z_conn & c_conn pair in cb_conns
#print "z_transfer_client clean cb_conns pair:", __f, _f
self.server.cb_conns[__f] = None
self.server.cb_conns[_f] = None
_backend_sock = self.server.zconns.get(_f)
if _backend_sock:
if _backend_sock.fileno() == -1:
#backend closed connection
self.server.zconns.pop(_f)
self.server.zconns_stat.pop(_f)
else:
#add idle list
if str(self.server.zaddrs[_f]) in self.server.zidles:
if _f not in self.server.zidles[str(self.server.zaddrs[_f])]:
#add to idle list
self.server.zidles[str(self.server.zaddrs[_f])].appendleft(_f)
else:
self.server.zidles[str(self.server.zaddrs[_f])] = deque([_f])
self.server.zconns_stat[_f] = [0, time.time()] #conn idle
#clean zcache
self.server.zcache.pop(_f)
self.server.zcache_stat.pop(_f)
#self.server.z_path.pop(_f)
#finished
return True
else:
#no more data yet or finished with no Content-Length? that's a problem.
_backend_sock = self.server.zconns.get(_f)
if _backend_sock:
if _backend_sock.fileno() == -1:
self.server.zcache_stat[_f][5] = self.server.zcache_stat[_f][2] - self.server.zcache_stat[_f][3]
self.server.zcache_stat[_f][4] = 1
return False
if len(self.server.zcache[_f][blockno][begin:]) > self.send_buf_size:
sent = self.sock.send(self.server.zcache[_f][blockno][begin:begin + self.send_buf_size])
should_sent = self.send_buf_size
self.server.zcache_stat[_f][1] += sent
else:
sent = self.sock.send(self.server.zcache[_f][blockno][begin:])
should_sent = len(self.server.zcache[_f][blockno][begin:])
if sent < should_sent:
self.server.zcache_stat[_f][1] += sent
else:
self.server.zcache_stat[_f][0] += 1
self.server.zcache_stat[_f][1] = 0
#print "sent block:", blockno, sent, len(self.server.zcache[_f][blockno]), self.send_buf_size
#print "zcache_stat:", self.server.zcache_stat[_f]
return False
def zcache_to_xcache(self, _f):
#remember that only 200 and body_size > 0 response item will be moved to xcache
_cc = self.server.z_resp_header[_f].get('Cache-Control')
_exp = self.server.z_resp_header[_f].get('Expires')
_ttl = 0
_t = time.time() #now
if _cc:
if "private" in _cc or "no-cache" in _cc:
_ttl = -1
elif "max-age=" in _cc:
_age_s = ''
_index = _cc.find('max-age=') + len('max-age=')
while _cc[_index] in ['1','2','3','4','5','6','7','8','9','0']:
_age_s = ''.join([_age_s, _cc[_index]])
_index += 1
if _index > len(_cc) - 1:
break
_ttl = _t + int(_age_s)
else:
if _exp:
#Expires: Tue, 20 Oct 2015 04:27:25 GMT
_ttl = calendar.timegm(time.strptime(_exp, '%a, %d %b %Y %H:%M:%S GMT'))
else:
_ttl = self.xcache_ttl + _t
else:
if _exp:
_ttl = calendar.timegm(time.strptime(_exp, '%a, %d %b %Y %H:%M:%S GMT'))
else:
_ttl = self.xcache_ttl + _t
if _ttl > _t:
if len(self.server.xcache) > self.z_cache_size:
self.server.xcache.popitem()
try:
self.server.z_resp_header[_f].pop("Connection")
except:
pass
self.out_head_s = ''.join(['\n'.join(['%s: %s' % (k, v) for k, v in self.server.z_resp_header[_f].items()]), '\n'])
_resp = ''.join(self.server.zcache[_f])
self.out_body_file = _resp[self.server.zcache_stat[_f][3]:]
self.out_body_size = len(self.out_body_file)
_xcache_key = b''
_content_encoding = self.server.z_resp_header[_f].get('Content-Encoding')
if _content_encoding:
_xcache_key = ''.join([_content_encoding, ':', self.server.zcache_stat[_f][9],self.server.zcache_stat[_f][8]])
else:
_xcache_key = ''.join([self.server.zcache_stat[_f][9],self.server.zcache_stat[_f][8]])
self.server.xcache[_xcache_key] = [_ttl, self.out_head_s, self.out_body_file, self.out_body_size, self.out_body_file_lmt, self.out_body_mmap]
if self.z_xcache_shelf:
try:
if len(self.server.xcache_shelf) > self.z_shelf_size:
if hasattr(self.server.xcache_shelf.dict, 'first'):
#dbhash format
k, v = self.server.xcache_shelf.dict.first()
with self.server.xcache_shelf_lock:
del self.server.xcache_shelf[k]
if hasattr(self.server.xcache_shelf.dict, 'firstkey'):
#gdbm format
with self.server.xcache_shelf_lock:
del self.server.xcache_shelf[self.server.xcache_shelf.dict.firstkey()]
except:
pass
try:
#moved to self.server.check_lbs
#while len(self.server.xcache_shelf.cache) > self.z_cache_size:
# self.server.xcache_shelf.cache.popitem()
with self.server.xcache_shelf_lock:
self.server.xcache_shelf[_xcache_key] = self.server.xcache[_xcache_key]
#moved to self.server.check_lbs
#if hasattr(self.server.xcache_shelf.dict, 'sync'):
# #self.server.xcache.dict is an anydbm object, mostly gdbm
# self.server.xcache_shelf.dict.sync()
except:
#may be problem in concurrent mode
pass
def z_transfer_backend(self, _f):
#from backend
try:
_b = self.z_host_sock.recv(self.recv_buf_size)
if not _b:
#peer closed connection?
#print "no content recieved, trying reconnect"
return self.PARSE_AGAIN
except socket.error as e:
if e.errno == errno.EAGAIN:
#no more request data, see if the whole request headers should be recieved
return self.PARSE_AGAIN
else:
#peer closed connection?
#return self.PARSE_AGAIN
return self.PARSE_ERROR
#self.server.zcache_stat[_f] sample:
#[2, 0, 25230, 217, 0, 25013, 1, 200, '/py/vms_rrd/vms-ping_day.png', 'vm0']
if _f in self.server.zcache and self.server.zcache_stat[_f][4] == 0 and self.z_finished == 0:
#continue recv
"""
self.server.zcache_stat[_f][2] total_size_recv
self.server.zcache_stat[_f][3] header_length
self.server.zcache_stat[_f][4] finished
self.server.zcache_stat[_f][5] body_size
self.server.zcache_stat[_f][6] keep_connection
self.server.zcache_stat[_f][7] http_status_code
self.server.zcache_stat[_f][8] path
self.server.zcache_stat[_f][9] hostname
self.server.zcache_stat[_f][10] chuncked encoding
"""
self.server.zcache[_f].append(_b)
self.server.zcache_stat[_f][2] += len(_b)
if not self.response_headers_parsed:
if self.EOL2 in _b or self.EOL1 in _b:
#rebuild the response headers and check them
self.parse_backend_response_headers(''.join(self.server.zcache[_f]), _f)
self.server.zcache_stat[_f][3] = self.z_header_length
self.server.zcache_stat[_f][6] = self.keep_connection
self.server.zcache_stat[_f][7] = self.resp_code
if self.response_headers_parsed:
_cb = self.server.cb_conns.get(_f)
if _cb:
_c_sock, _c_sock_no = _cb
else:
_c_sock, _c_sock_no = self.None2
if _c_sock:
if _c_sock_no in self.server.xcache_stat:
#clear xcache_stat to avoid cache hit before
self.server.xcache_stat.pop(_c_sock_no, None)
self.server.epoll.modify(_c_sock, select.EPOLLIN | select.EPOLLOUT)
if self.server.zcache_stat[_f][5] > 0:
if self.server.zcache_stat[_f][2] == self.server.zcache_stat[_f][5] + self.server.zcache_stat[_f][3]:
#finished content-length
self.server.zcache_stat[_f][4] = 1
self.z_finished = 1
elif self.server.zcache_stat[_f][5] == 0 and "0\r\n\r\n" in _b:
#finished chunked encoding
self.server.zcache_stat[_f][4] = 1
self.z_finished = 1
elif self.z_finished == 0:
#first recv
_path = self.server.z_path[_f]
_z_hostname = self.server.zhosts[_f]
self.server.zcache[_f] = [_b]
#zcache_stat format: [block num, size sent, total_size_recv, header_length, finished, body_size, keep_connection,resp_code,path,hostname,chuncked_encoding]
if self.EOL2 in _b or self.EOL1 in _b:
self.parse_backend_response_headers(_b, _f)
self.server.zcache_stat[_f] = [0, 0, len(_b), self.z_header_length, self.z_finished, self.z_body_size, self.keep_connection, self.resp_code, _path, _z_hostname, self.chuncked_encoding]
else:
self.server.zcache_stat[_f] = [0, 0, len(_b), -1, 0, -1, -1, 0, _path, _z_hostname, self.chuncked_encoding]
if self.response_headers_parsed:
_cb = self.server.cb_conns.get(_f)
if _cb:
_c_sock, _c_sock_no = _cb
else:
_c_sock, _c_sock_no = self.None2
if _c_sock:
if _c_sock_no in self.server.xcache_stat:
#clear xcache_stat to avoid cache hit before
self.server.xcache_stat.pop(_c_sock_no, None)
self.server.epoll.modify(_c_sock, select.EPOLLIN | select.EPOLLOUT)
#else:
#at this point, the last request of client should completed and cb_conns cleaned
#may be safe to ignore it, but if this request is different from the last? ....
#print "cb_conns:", self.server.cb_conns
#print "f:", _f, "zcache_stat:", self.server.zcache_stat, "z_reqs_cnt:", self.server.z_reqs_cnt
if self.z_finished == 1:
self.server.epoll.unregister(_f)
return self.PARSE_OK
else:
return self.PARSE_MORE
def parse_backend_response_headers(self, _b, _f):
#cut headers out
b = _b.split(self.EOL2, 1)
sp = len(self.EOL2)
if not b[0]:
b = _b.split(self.EOL1, 1)
sp = len(self.EOL1)
if not b[0]:
#illeagal response headers
return self.PARSE_ERROR
self.z_header_length = len(b[0]) + sp
a = b[0].strip().split("\r\n", 1)
if not a[0]:
return self.PARSE_ERROR
#"HTTP/1.1 200 OK"
_c_http_ver, _resp_code_str, self.resp_msg = a[0].split(None, 2)
self.resp_code = int(_resp_code_str)
if self.resp_code == self.HTTP_OK:
self.has_resp_body = True
self.server.z_resp_header[_f] = dict((k, v) for k, v in (item.split(": ") for item in a[1].split("\r\n")))
self.in_headers = dict((k, v) for k, v in (item.split(": ") for item in a[1].split("\r\n")))
try:
self.z_finished = 0
cl = self.in_headers.get("Content-Length")
if cl:
self.z_body_size = int(cl)
if len(b[1]) == self.z_body_size:
self.z_finished = 1
else:
c1 = self.in_headers.get("Transfer-Encoding")
if c1:
if c1.lower()== "chunked":
self.chuncked_encoding = 1
self.z_body_size = 0
if "0\r\n\r\n" in b[1]:
self.z_finished = 1
else:
self.z_body_size = -1
else:
if self.z_host_sock.fileno() == -1:
#backend closed connection, transfer finished
self.z_body_size = len(_b) - self.z_header_length
self.z_finished = 1
elif self.resp_code > self.HTTP_OK:
self.z_body_size = 0
self.z_finished = 1
else:
self.z_body_size = 0
except:
self.z_body_size = -1
self.z_finished = 0
self.check_connection(_c_http_ver, check_ims=False, gen_xcache_key=False)
self.response_headers_parsed = True
self.server.k_conns[_f] = [self.r_http_ver, self.keep_connection]
#mangle the http status line and "Connection:" header to fit client side
#_b = self.server.zcache[_f][0] #headers in block 0
#print "_f:", _f, "cb_conns:", self.server.cb_conns, "k_conns", self.server.k_conns
try:
__rc_http_ver, _c_keep_connection = self.server.k_conns[self.server.cb_conns[_f][1]]
if __rc_http_ver == 1:
_rc_http_ver = "HTTP/1.1"
else:
_rc_http_ver = "HTTP/1.0"
except:
_rc_http_ver = "HTTP/1.0"
_c_keep_connection = 0
if _c_http_ver != _rc_http_ver:
if "HTTP/1.1" in self.server.zcache[_f][0]:
self.server.zcache[_f][0] = self.server.zcache[_f][0].replace("HTTP/1.1", "HTTP/1.0", 1)
elif "HTTP/1.0" in self.server.zcache[_f][0]:
self.server.zcache[_f][0] = self.server.zcache[_f][0].replace("HTTP/1.0", "HTTP/1.1", 1)
if _c_keep_connection != self.keep_connection:
if "Connection: keep-alive" in self.server.zcache[_f][0]:
self.server.zcache[_f][0] = self.server.zcache[_f][0].replace("Connection: keep-alive", "Connection: close", 1)
self.z_header_length -= 5
elif "Connection: close" in self.server.zcache[_f][0]:
self.server.zcache[_f][0] = self.server.zcache[_f][0].replace("Connection: close", "Connection: keep-alive", 1)
self.z_header_length += 5
def out_conns_stats(self):
print "--------------------------------------------------------------------------"
print "zconns:", self.server.zconns
print "zconns_stat:", self.server.zconns_stat
print "zidles:", self.server.zidles
print "cb_conns:", self.server.cb_conns
print "--------------------------------------------------------------------------"
def check_zconns(self):
while 1:
gevent.sleep(10)
for f in self.server.zconns.keys():
if f in self.server.zconns_stat:
_t = self.server.zconns_stat[f][1]
if time.time() - _t > self.z_idle_ttl:
#idle time out, clean it
if self.server.zidles:
for _host in self.server.zidles.keys():
if self.server.zidles[_host]:
try:
self.server.zidles[_host].remove(f)
except:
pass
_sock = self.server.zconns[f]
self.server.cleanz(f)
_sock.close()
class _xDFSHandler(_xZHandler):
DFS_PROXY_MODE = 0
DFS_DIRECT_MODE = 1
d_mode = 0
dfs_config = None
dfs_stage = 0
dfs_redundancy = 1
dfs_region = 4096
dfs_prefix = '_3fs'
dfs_prefix_s = '/_3fs_'
dfs_pool = {}
dfs_pool_count = {}
dfs_writer = []
file_stage = 0
file_path = b''
file_md5 = b''
peer_ip_s = b''
def __init__(self, conn, client_address, server, native_epoll=True,
gevent_stream=False, recv_buf_size=16384, send_buf_size=65536, d_mode=0):
_xZHandler.__init__(self, conn, client_address, server, native_epoll, gevent_stream,
recv_buf_size, send_buf_size, -1)
self.init_dfs_config()
self.d_mode = d_mode
def init_dfs_config(self):
try:
#for reload config
self.dfs_pool = {}
self.dfs_pool_count = {}
self.dfs_writer = []
self.ttl = {}
self.rra = {}
self.rrn = {}
self.dfs_config = ConfigParser.ConfigParser()
if not self.dfs_config.read('3xsd.conf'):
self.dfs_config.read('/etc/3xsd.conf')
for name, value in self.dfs_config.items('3fsd'):
if name == 'stage':
self.dfs_stage = int(value)
elif name == 'redundancy':
self.dfs_redundancy = int(value)
elif name == 'region':
self.dfs_region = int(value)
self.dfs_region_mask = 1
while self.dfs_region/(16**self.dfs_region_mask) > 1:
self.dfs_region_mask += 1
elif name == 'prefix':
self.dfs_prefix = value
self.dfs_prefix_s = ''.join(['/', self.dfs_prefix, '_'])
elif name == 'write_permit':
self.ttl['3fs_writer'] = 0
self.dfs_writer = self.ip_list('3fs_writer', value, '3nsd')
else:
#must be a pool config of a domain_name
#3xsd.net = 0,10.37.10.1-2:80,10.38.10.2:80;1,10.41.0.1-2:8000
self.ttl[name] = self.rrn[name] = 0
self.rra[name] = []
self.dfs_pool[name] = {}
self.dfs_pool_count[name] = {}
for item in value.split(';'):
if item:
_stage_s, _ip_s = item.split(',', 1)
if _stage_s and _ip_s:
#dfs_pool['3xsd.net'][0] = ['10.37.10.1:80', '10.37.10.2:80', '10.38.10.2:80']
#dfs_pool['3xsd.net'][1] = ['10.41.0.1:8000', '10.41.0.2:8000']
if self.dfs_pool[name].get(int(_stage_s)):
self.dfs_pool[name][int(_stage_s)] += self.ip_list(name, _ip_s, '3fsd')
else:
self.dfs_pool[name][int(_stage_s)] = self.ip_list(name, _ip_s, '3fsd')
for i in self.dfs_pool[name]:
#to gen a fixed sorted server list, important for locating algorithm
self.dfs_pool[name][i].sort()
self.dfs_pool_count[name][i] = len(self.dfs_pool[name][i])
#rra pool for RoundRobin
self.rra[name] += self.dfs_pool[name][i]
if self.rra.get(name):
#make list distinct and sorted
self.rra[name] = list(set(self.rra[name]))
self.rra[name].sort()
#print self.rra[name]
except:
raise
#print "stage:", self.dfs_stage, ", redundancy:", self.dfs_redundancy, ", region:", self.dfs_region, ", prefix:", self.dfs_prefix, ", pool:", self.dfs_pool
def init_handler(self, conn, client_address, rw_mode=0):
_xZHandler.init_handler(self, conn, client_address, rw_mode)
#do 3fsd specific initiation
self.file_stage = 0
self.file_path = self.file_md5 = b''
def z_pick_a_backend(self, return_all=False):
#self.z_hostname self.path self._r should be setup
if self.dfs_prefix_s == self.path[:len(self.dfs_prefix_s)]:
#it's a dfs access
return self.dfs_locate_backend(self.z_hostname, return_all)
else:
return _xZHandler.z_pick_a_backend(self)
def dfs_locate_backend(self, hostname, return_all=False):
#3fs locating algorithm
#url example: http://hostname/_3fs_0/path/to/file, 0 for stage
#/path/to/file will be used to calculate out a standard md5 hex_string of 32 letters with lower case
#/path/to/file -> b4a91649090a2784056565363583d067
_fstage_s, _fpath = self.path[len(self.dfs_prefix_s):].split('/', 1)
self.file_stage = int(_fstage_s)
self.file_path = ''.join(['/', _fpath])
md5 = hashlib.md5()
md5.update(self.file_path)
self.file_md5 = md5.hexdigest()
i = 0
_ret = []
while self.dfs_redundancy - i > 0:
_point = int(self.file_md5[self.dfs_region_mask*i:self.dfs_region_mask*(i+1)], base=16)
_serno = __serno = int(_point / float(self.dfs_region / self.dfs_pool_count[hostname][self.dfs_stage]))
while self.dfs_pool[hostname][self.dfs_stage][_serno] in _ret:
#make sure the redundancy copys not in same server
_serno = ( _serno + 1 ) % self.dfs_pool_count[hostname][self.dfs_stage]
if _serno == __serno:
break
_ret.append(self.dfs_pool[hostname][self.dfs_stage][_serno])
i += 1
if return_all:
return _ret
else:
return _ret[random.randint(0, self.dfs_redundancy - 1)]
def z_GET_init(self):
#init connection to backend, send request, ggg
_f = None
try:
self.z_hostname, _port = self.z_parse_address(self.in_headers.get("Host").lower())
if self.z_hostname not in self.rra.keys():
#not my serving hostname
self.xResult = self.xR_ERR_HANDLE
return
_cb = self.server.cb_conns.get(self._f)
if _cb:
self.z_host_sock, _f = _cb
else:
self.z_host_sock, _f = self.None2
if _f and self.server.zhosts.get(_f, None) == self.z_hostname and self.dfs_prefix_s != self.path[:len(self.dfs_prefix_s)]:
#print "z_GET_init remake cb_conns pair:", self._f, _f
self._f = _f
self.server.cb_conns[_f] = [self.sock, self.sock.fileno()]
else:
#print "z_GET_init new conn:", self._f, _f
self.z_connect_backend()
_f = self._f
if self.xResult == self.xR_ERR_5xx:
return
self.z_send_request_init()
except:
self.xResult = self.xR_ERR_HANDLE
def z_PUT_init(self):
try:
self.z_hostname, _port = self.z_parse_address(self.in_headers.get("Host").lower())
if self.z_hostname not in self.rra.keys():
#not my serving hostname
self.xResult = self.xR_ERR_HANDLE
return
if self.dfs_prefix_s == self.path[:len(self.dfs_prefix_s)]:
#only 3fs access allow PUT/DELETE action in z_lbs & x_dfs mode
try:
self.peer_ip_s, _port_s = self.sock.getpeername()
except:
self.peer_ip_s = b''
if self.peer_ip_s not in self.dfs_writer:
self.xResult = self.xR_ERR_HANDLE
return
_backends = self.z_pick_a_backend(return_all=True)
_b_index = 0
for _b in _backends:
self.z_host_addr = self.z_parse_address(_b)
self.z_host_sock = None
if _b_index == 0:
self.z_connect_backend(addr=self.z_host_addr)
self.z_send_request_init()
else:
self.z_connect_backend(addr=self.z_host_addr, update_cb_conns=False)
self.z_send_request_init(no_recv=True)
_b_index += 1
else:
self.xResult = self.xR_ERR_HANDLE
except:
self.xResult = self.xR_ERR_HANDLE
raise
class _xWHandler:
server = None
wdd_mode = 'server'
wdd_dial = []
encrypt = False
encrypt_mode = None
sess_encrypt_mode = {}
aes = {}
compress_tunnel = {}
session = {}
client_session = {}
connected = {}
tun_local_ip = {}
tun_peer_ip = {}
tun_mtu = {}
tun_txqueue = {}
token = {}
e_token = {}
peer_ip = {}
peer_port = {}
tun_route = {}
tun_rtt = {}
route_metric = {}
route_metric_fixed = {}
routing_metric = False
ifup_script = {}
ifdown_script = {}
rtch_script = {}
udt_relay = {}
udt_relay_thread_stat = {}
IO_BLOCK = 0
IO_NONBLOCK = 1
io_mode = 0
def __init__(self, conn, client_address, server, recv_buf_size=2760000, send_buf_size=2760000):
self.server = server
self.recv_buf_size = recv_buf_size
self.send_buf_size = send_buf_size
self.init_wdd_config()
self.init_handler()
def init_wdd_config(self):
#for reload config
self.wdd_mode = 'server'
self.wdd_dial = []
self.encrypt = False
self.encrypt_mode = None
self.sess_encrypt_mode = {}
self.aes = {}
self.session = {}
self.client_session = {}
self.connected = {}
self.tun_local_ip = {}
self.tun_peer_ip = {}
self.tun_mtu = {}
self.tun_txqueue = {}
self.token = {}
self.e_token = {}
self.peer_ip = {}
self.peer_port = {}
self.tun_route = {}
self.tun_rtt = {}
self.route_metric = {}
self.route_metric_fixed = {}
self.udt_relay = {}
self.udt_relay_thread_stat = {}
self.compress_tunnel = {}
self.io_mode = 0
self.routing_metric = False
self.ifup_script = {}
self.ifdown_script = {}
self.rtch_script = {}
self.config = ConfigParser.ConfigParser()
if not self.config.read('3xsd.conf'):
self.config.read('/etc/3xsd.conf')
#
#example: an udt tunnel session called: peer1
#local ip: 10.19.27.1 peer ip: 10.19.27.2
#mtu: 1500 txqueue: 1000 connect token(password): ddw3~)
#
#peer1 = 10.19.27.1:10.19.27.2:1500:1000:ddw3~)
for name, value in self.config.items('3wdd'):
if name == 'mode':
self.wdd_mode = value.lower()
elif name == 'dial':
self.wdd_dial = value.split(',')
elif name == 'encrypt':
self.encrypt = True
_value = value.lower()
if _value == 'on' or _value == 'aes-128-ecb':
self.encrypt_mode = AES.MODE_ECB
elif _value == 'aes-128-cbc':
self.encrypt_mode = AES.MODE_CBC
elif _value == 'aes-128-cfb':
self.encrypt_mode = AES.MODE_CFB
elif _value == 'aes-128-ctr':
self.encrypt_mode = AES.MODE_CTR
elif _value == 'blowfish-cbc':
self.encrypt_mode = Blowfish.MODE_CBC + 100 #diff from aes
elif _value == 'blowfish-cfb':
self.encrypt_mode = Blowfish.MODE_CFB + 100
elif _value == 'blowfish-ctr':
self.encrypt_mode = Blowfish.MODE_CTR + 100
else:
self.encrypt = False
elif name == 'io_mode':
_value = value.lower()
if _value == 'block' or _value == 'default':
self.io_mode = self.IO_BLOCK
elif _value == 'non_block':
self.io_mode = self.IO_NONBLOCK
else:
self.io_mode = self.IO_BLOCK
elif name == 'relay':
for _from_to in value.split(','):
_from, _to = _from_to.split(':')
if _from and _to:
self.udt_relay[_from] = (_from, _to)
self.udt_relay[_to] = (_from, _to)
self.udt_relay_thread_stat[_from] = False
self.udt_relay_thread_stat[_to] = False
elif name == 'routing_metric':
_value = value.lower()
if _value == 'on':
self.routing_metric = True
else:
v = value.split(':')
if len(v) >= 5:
self.session[name] = True
self.tun_local_ip[name] = v[0]
self.tun_peer_ip[name] = v[1]
self.tun_mtu[name] = int(v[2]) if v[2] else 0
self.tun_txqueue[name] = int(v[3]) if v[3] else 0
self.token[name] = v[4]
self.e_token[name] = self.encrypt_token(name, v[4])
if self.encrypt:
if self.encrypt_mode == AES.MODE_CBC or self.encrypt_mode == AES.MODE_CFB:
#aes-128-cbc, aes-128-cfb
pass
else:
#aes-128-ecb as default
if name not in self.aes:
self.aes[name] = AES.new(self.e_token[name], AES.MODE_ECB)
if len(v) > 5:
if v[5]:
_em_zip = v[5].lower().split(',')
_em = _em_zip[0]
if _em == 'aes-128-cbc':
self.sess_encrypt_mode[name] = AES.MODE_CBC
elif _em == 'aes-128-cfb':
self.sess_encrypt_mode[name] = AES.MODE_CFB
elif _em == 'aes-128-ctr':
self.sess_encrypt_mode[name] = AES.MODE_CTR
elif _em == 'on' or _em == 'aes-128-ecb':
self.sess_encrypt_mode[name] = AES.MODE_ECB
if name not in self.aes:
self.aes[name] = AES.new(self.e_token[name], AES.MODE_ECB)
elif _em == 'blowfish-cbc':
self.sess_encrypt_mode[name] = Blowfish.MODE_CBC + 100
elif _em == 'blowfish-cfb':
self.sess_encrypt_mode[name] = Blowfish.MODE_CFB + 100
elif _em == 'blowfish-ctr':
self.sess_encrypt_mode[name] = Blowfish.MODE_CTR + 100
if len(_em_zip) > 1:
if _em_zip[1] == 'zlib' or _em_zip[1] == 'compress':
self.compress_tunnel[name] = 'zlib'
elif _em_zip[1] == 'lzo':
self.compress_tunnel[name] = 'lzo'
if len(v) > 7:
if v[6]:
self.peer_ip[name] = v[6]
self.client_session[name] = True
if v[7]:
self.peer_port[name] = int(v[7])
else:
self.peer_port[name] = 9000
if len(v) > 8 or len(v) == 7:
self.tun_route[name] = []
for route in v[len(v) - 1].split(','):
if route:
if "ifup=" in route:
#not a 0.0.0.0/0 route, must be a ifup/ifdown script
self.ifup_script[name] = route[5:]
continue
if "ifdown=" in route:
self.ifdown_script[name] = route[7:]
continue
if "rtch=" in route:
self.rtch_script[name] = route[5:]
continue
if route.count('/') == 2:
_net, _mask, _s_metric = route.split('/')
route = ''.join([_net, '/', _mask])
_metric = int(_s_metric)
if route in self.route_metric_fixed:
self.route_metric_fixed[route][name] = _metric
else:
self.route_metric_fixed[route] = {name: _metric}
self.tun_route[name].append(route)
if route in self.route_metric:
self.route_metric[route][name] = len(self.route_metric[route]) + 1
else:
self.route_metric[route] = {name: 1}
def encrypt_token(self, session, token):
md5 = hashlib.md5()
md5.update(''.join([session, '#', token]))
return md5.hexdigest()
def init_handler(self):
pass
def connect_udt_server(self, target, new_port=None): #ctud
sock = udt.UdtSocket()
sock.setsockopt(udt4.UDT_RCVBUF, self.server.recv_buf_size) #default 10MB
sock.setsockopt(udt4.UDT_SNDBUF, self.server.send_buf_size) #default 10MB
if new_port:
_port = new_port
_allow_redirect = 0
else:
_port = self.peer_port[target]
if _port < 0:
_port = abs(_port)
_allow_redirect = 0
else:
_allow_redirect = 1
_peer_ip = socket.gethostbyname(self.peer_ip[target])
try:
#print "connecting udt server", self.peer_ip[target], str(_port)
sock.connect((_peer_ip, _port))
except:
sock.close()
return
_c_str = ''.join([target, ':', _peer_ip, ':', str(_port), ':', str(_allow_redirect)])
sock.send(struct.pack('!i', len(_c_str)))
sock.send(_c_str)
try:
_len = struct.unpack('!i', sock.recv(4))[0]
_my_ip = sock.recv(_len)
except:
sock.close()
return
#the _s_token used to verify the two sides has 4 factors: session_name, passwd(token), server_ip, client_ip
#this should be able to prevent from middleman attack & fake connect attempt
_s_token = self.encrypt_token(self.e_token[target], ''.join([_peer_ip, '#', _my_ip]))
sock.send(struct.pack('!i', len(_s_token)))
sock.send(_s_token)
try:
_result = -1
_result = struct.unpack('!i', sock.recv(4))[0]
except:
sock.close()
return
if _result == 0:
self.setup_tunnel(target, sock, (_peer_ip, _port))
self.connected[target] = True
self.server.udt_conns_cnt[self.server._worker_id].value += 1
elif _result == 1:
_len = struct.unpack('!i', sock.recv(4))[0]
_new_port_str = sock.recv(_len)
sock.close()
t = threading.Thread(target=self.connect_udt_server, args=(target,int(_new_port_str)))
t.daemon = True
t.start()
def setup_tunnel(self, session_name, conn, addr): #stst
try:
if not conn or not addr or not session_name: return
_do_relay = True if session_name in self.udt_relay else False
_tun = None
if not _do_relay:
_tun_name = ''.join([session_name, '.', str(self.server._worker_id)])
_tun = pytun.TunTapDevice(name=_tun_name, flags=pytun.IFF_TUN|pytun.IFF_NO_PI)
_tun.addr = self.tun_local_ip[session_name]
_tun.dstaddr = self.tun_peer_ip[session_name]
_tun.netmask = '255.255.255.255'
_tun.mtu = self.tun_mtu[session_name]
with open(os.devnull, 'w') as devnull:
subprocess.call(['ip', 'route', 'del', ''.join([_tun.dstaddr, '/', _tun.netmask])], stderr=devnull)
self.server.ztuns[_tun.fileno()] = _tun
self.server.s_tuns[_tun.fileno()] = session_name
_tun.up()
with open(os.devnull, 'w') as devnull:
subprocess.call(['ip', 'link', 'set', _tun_name, 'txqueuelen', str(self.tun_txqueue[session_name])], stderr=devnull)
if session_name in self.tun_route:
if self.tun_route[session_name]:
with open(os.devnull, 'w') as devnull:
for route in self.tun_route[session_name]: #rtrt
if route in self.route_metric:
if self.route_metric[route][session_name] == -1:
if len(self.route_metric[route]) > 1:
j = 0
for k in self.route_metric[route]:
if k == session_name:
break
else:
j += 1
self.route_metric[route][session_name] = 327670 + j
subprocess.call(['ip', 'route', 'add', route, 'metric', str(self.route_metric[route][session_name]), 'dev', _tun_name], stderr=devnull)
else:
self.route_metric[route][session_name] = 1
subprocess.call(['ip', 'route', 'add', route, 'metric', '1', 'dev', _tun_name], stderr=devnull)
else:
subprocess.call(['ip', 'route', 'add', route, 'metric', str(self.route_metric[route][session_name]), 'dev', _tun_name], stderr=devnull)
else:
subprocess.call(['ip', 'route', 'del', route], stderr=devnull)
subprocess.call(['ip', 'route', 'add', route, 'dev', _tun_name], stderr=devnull)
_ifup_script = self.ifup_script.get(session_name, None)
if _ifup_script:
with open(os.devnull, 'w') as devnull:
subprocess.call([_ifup_script, _tun_name], stderr=devnull)
self.server.s_udts[conn.UDTSOCKET.UDTSOCKET] = session_name
self.server.zsess[session_name] = (_tun, conn , addr)
self.tun_rtt[session_name] = -1
if self.encrypt or session_name in self.sess_encrypt_mode:
if session_name in self.sess_encrypt_mode:
_encrypt_mode = self.sess_encrypt_mode[session_name]
else:
_encrypt_mode = self.encrypt_mode
else:
_encrypt_mode = None
if session_name in self.compress_tunnel:
_compress = self.compress_tunnel[session_name]
else:
_compress = None
if self.io_mode == self.IO_NONBLOCK:
#io_mode == IO_NONBLOCK, single thread epoll to handle udt&tun events
if _tun:
flag = fcntl.fcntl(_tun.fileno(), fcntl.F_GETFL)
fcntl.fcntl(_tun.fileno(), fcntl.F_SETFL, flag | os.O_NONBLOCK)
conn.setblocking(False)
_n = conn.UDTSOCKET.UDTSOCKET % self.server.udt_thread_limit
if self.server.upolls[_n] is None:
self.server.upolls[_n] = udt.Epoll()
self.server.upolls[_n].add_usock(conn, udt4.UDT_EPOLL_IN)
self.udt_relay_thread_stat[session_name] = True
if not _do_relay:
self.server.upolls[_n].add_ssock(_tun, udt4.UDT_EPOLL_IN)
t = threading.Thread(target=self.server.handle_event_udt_tun, args=(_n,))
t.daemon = True
t.start()
else:
self.server.upolls[_n].add_usock(conn, udt4.UDT_EPOLL_IN)
self.udt_relay_thread_stat[session_name] = True
if not _do_relay:
self.server.upolls[_n].add_ssock(_tun, udt4.UDT_EPOLL_IN)
else:
#io_mode == IO_BLOCK (default), 2 threads bi-direction forwarding packages
if not _do_relay:
t = threading.Thread(target=self.server.forward_tun_udt,args=(_tun,conn,_encrypt_mode,_compress,session_name,))
t.daemon = True
t.start()
t = threading.Thread(target=self.server.forward_udt_tun,args=(_tun,conn,_encrypt_mode,_compress,session_name,))
t.daemon = True
t.start()
else:
t = threading.Thread(target=self.server.forward_udt_relay,args=(conn,session_name,))
t.daemon = True
t.start()
if _do_relay:
print "UDT relay tunnel", session_name, "launched, no tun device, io_mode:", self.io_mode
else:
print "UDT tunnel", session_name, "launched, local", _tun.addr, "peer", _tun.dstaddr, "mtu", _tun.mtu, "encryption:", _encrypt_mode, "compress:", _compress, "io_mode:", self.io_mode
except:
if conn:
try:
if self.io_mode == self.IO_NONBLOCK and self.server.upolls[conn.UDTSOCKET.UDTSOCKET % self.server.udt_thread_limit]:
self.server.upolls[conn.UDTSOCKET.UDTSOCKET % self.server.udt_thread_limit].remove_usock(conn)
except:
pass
try:
conn.close()
del conn
except:
pass
if _tun:
try:
if self.io_mode == self.IO_NONBLOCK and self.server.upolls[conn.UDTSOCKET.UDTSOCKET % self.server.udt_thread_limit]:
self.server.upolls[conn.UDTSOCKET.UDTSOCKET % self.server.udt_thread_limit].remove_ssock(_tun)
except:
pass
try:
_tun.down()
_tun.close()
del _tun
except:
pass
raise
def destroy_tunnel(self, session_name): #ddd
_tun, _conn, _addr = self.server.zsess.pop(session_name, (None, None, None))
self.connected.pop(session_name, None)
self.tun_rtt.pop(session_name, None)
if session_name in self.tun_route:
for _route in self.tun_route[session_name]:
self.route_metric[_route][session_name] = -1
if _tun:
print "Destroying", ''.join([session_name, '.', str(self.server._worker_id)]), _tun, _conn, _addr
else:
print "Destroying", ''.join([session_name, '.', str(self.server._worker_id)]), _conn, _addr
if _conn and _addr:
if _tun:
self.server.ztuns.pop(_tun.fileno(), None)
self.server.s_tuns.pop(_tun.fileno(), None)
self.server.s_udts.pop(_conn.UDTSOCKET.UDTSOCKET, None)
self.server.udt_conns_cnt[self.server._worker_id].value -= 1
if session_name in self.server.udt_send_buf:
self.server.udt_send_buf.pop(session_name)
if self.io_mode == self.IO_NONBLOCK:
try:
_n = _conn.UDTSOCKET.UDTSOCKET % self.server.udt_thread_limit
if self.server.upolls[_n]:
self.server.upolls[_n].remove_usock(_conn)
if _tun:
self.server.upolls[_n].remove_ssock(_tun)
except:
pass
try:
#revoke mem, does it work?
if _tun:
_tun.down()
_tun.close()
del _tun
_conn.close()
del _conn
del _addr
except:
pass
if session_name in self.udt_relay_thread_stat:
self.udt_relay_thread_stat[session_name] = False
_ifdown_script = self.ifdown_script.get(session_name, None)
if _ifdown_script:
with open(os.devnull, 'w') as devnull:
subprocess.call([_ifdown_script, ''.join([session_name, '.', str(self.server._worker_id)])], stderr=devnull)
def setup_udt_connection(self, conn, addr): #stud
try:
if not conn or not addr:
return
_len = 0
_len = struct.unpack('!i', conn.recv(4))[0]
if _len > 0:
_c_str = conn.recv(_len)
_session_name, _my_ip, _my_port_str, _allow_redirect_str = _c_str.split(':',3)
_peer_ip, _peer_port = addr
if _session_name not in self.session:
#no such session config
conn.close()
else:
conn.send(struct.pack('!i', len(_peer_ip)))
conn.send(_peer_ip)
_len = struct.unpack('!i', conn.recv(4))[0]
if _len > 0:
_s_token = conn.recv(_len)
if _s_token == self.encrypt_token(self.e_token[_session_name], ''.join([_my_ip, '#', _peer_ip])):
#pass, check idle worker
if _allow_redirect_str == '1':
#this value changed at every connection time
#_idle_port = self.server.wdd_idle_worker(int(_my_port_str))
#this value fixed for about 20 secs
_idle_port = self.server.udt_conn_port.value
else:
_idle_port = int(_my_port_str)
if _idle_port == int(_my_port_str):
#tell client, setup the tunnel, put conn in epoll
conn.send(struct.pack('!i', 0))
if _session_name in self.connected:
#only one tunnel per session
self.destroy_tunnel(_session_name)
self.setup_tunnel(_session_name, conn, addr)
self.connected[_session_name] = True
self.server.udt_conns_cnt[self.server._worker_id].value += 1
else:
#send redirect msg
conn.send(struct.pack('!i', 1))
conn.send(struct.pack('!i', len(str(_idle_port))))
conn.send(str(_idle_port))
conn.close()
else:
#sorry
conn.close()
except:
if conn:
try:
conn.close()
del conn
del addr
except:
pass
raise
def decrypt_package(self, _buf, _encrypt_mode, _session):
unpad = lambda s : s[0:-ord(s[-1])]
if _encrypt_mode == Blowfish.MODE_CBC + 100:
_blf = Blowfish.new(self.e_token[_session], _encrypt_mode - 100, _buf[:Blowfish.block_size])
return unpad(_blf.decrypt(_buf[Blowfish.block_size:]))
elif _encrypt_mode == Blowfish.MODE_CFB + 100:
_blf = Blowfish.new(self.e_token[_session], _encrypt_mode - 100, _buf[:Blowfish.block_size])
return _blf.decrypt(_buf[Blowfish.block_size:])
elif _encrypt_mode == Blowfish.MODE_CTR + 100:
_blf = Blowfish.new(self.e_token[_session], _encrypt_mode - 100, counter=Counter.new(64))
return _blf.decrypt(_buf)
elif _encrypt_mode == AES.MODE_CBC:
_aes = AES.new(self.e_token[_session], _encrypt_mode, _buf[:AES.block_size])
return unpad(_aes.decrypt(_buf[AES.block_size:]))
elif _encrypt_mode == AES.MODE_CFB:
_aes = AES.new(self.e_token[_session], _encrypt_mode, _buf[:AES.block_size])
return _aes.decrypt(_buf[AES.block_size:])
elif _encrypt_mode == AES.MODE_CTR:
_aes = AES.new(self.e_token[_session], _encrypt_mode, counter=Counter.new(128))
return _aes.decrypt(_buf)
else:
#AES.MODE_ECB
return unpad(self.aes[_session].decrypt(_buf))
def encrypt_package(self, _buf, _encrypt_mode, _session):
if _encrypt_mode == Blowfish.MODE_CBC + 100:
BS = Blowfish.block_size
pad = lambda s: ''.join([s, (BS - len(s) % BS) * chr(BS - len(s) % BS)])
_iv = Random.new().read(BS)
_blf = Blowfish.new(self.e_token[_session], _encrypt_mode - 100, _iv)
return ''.join([_iv, _blf.encrypt(pad(_buf))])
elif _encrypt_mode == Blowfish.MODE_CFB + 100: #CFB OFB CTR: padding is not required
_iv = Random.new().read(Blowfish.block_size)
_blf = Blowfish.new(self.e_token[_session], _encrypt_mode - 100, _iv)
return ''.join([_iv, _blf.encrypt(_buf)])
elif _encrypt_mode == Blowfish.MODE_CTR + 100:
_blf = Blowfish.new(self.e_token[_session], _encrypt_mode - 100, counter=Counter.new(64))
return _blf.encrypt(_buf)
elif _encrypt_mode == AES.MODE_CBC:
BS = AES.block_size
pad = lambda s: ''.join([s, (BS - len(s) % BS) * chr(BS - len(s) % BS)])
_iv = Random.new().read(BS)
_aes = AES.new(self.e_token[_session], _encrypt_mode, _iv)
return ''.join([_iv, _aes.encrypt(pad(_buf))])
elif _encrypt_mode == AES.MODE_CFB: #CFB OFB CTR: padding is not required
_iv = Random.new().read(AES.block_size)
_aes = AES.new(self.e_token[_session], _encrypt_mode, _iv)
return ''.join([_iv, _aes.encrypt(_buf)])
elif _encrypt_mode == AES.MODE_CTR:
_aes = AES.new(self.e_token[_session], _encrypt_mode, counter=Counter.new(128))
return _aes.encrypt(_buf)
else:
#AES.MODE_ECB
BS = AES.block_size
pad = lambda s: ''.join([s, (BS - len(s) % BS) * chr(BS - len(s) % BS)])
return self.aes[_session].encrypt(pad(_buf))
def handle_udt_tun_events(self, sets): #ooo
for u in sets[0]:
_un = u.UDTSOCKET.UDTSOCKET
if _un in self.server.s_udts:
_session = self.server.s_udts[_un]
else:
continue
_encrypt_mode = self.sess_encrypt_mode[_session] if _session in self.sess_encrypt_mode else self.encrypt_mode
_compress = self.compress_tunnel[_session] if _session in self.compress_tunnel else None
_magic = {'zlib':(''.join([chr(0x78), chr(0x9c)]), 2), 'lzo':(''.join([chr(0xf0), chr(0x0), chr(0x0)]), 3)}
_unzip = lambda s : eval(_compress).decompress(s) if _compress and _magic[_compress][0] in s[:_magic[_compress][1]] else s
_forward2_tun = lambda s : self.server.zsess[_session][0].write(_unzip(self.decrypt_package(s, _encrypt_mode, _session))) if _encrypt_mode else self.server.zsess[_session][0].write(_unzip(s))
_repack = lambda s : ''.join([struct.pack('!H', len(s)), s])
try:
#for i in xrange(10):
if _session not in self.udt_relay:
_forward2_tun(u.recv(struct.unpack('!H', u.recv(2))[0]))
else:
_from, _to = self.udt_relay[_session]
if _session == _from:
_to_s = _to
else:
_to_s = _from
_, _to_usock, _ = self.server.zsess.get(_to_s, (None, None, None))
if _to_usock:
#print "relaying tunnel", _session, "to", self.udt_relay[_session]
_buf = u.recv(struct.unpack('!H', u.recv(2))[0])
_to_usock.send(_repack(_buf))
else:
#relay two sides not full connected yet
continue
except udt4.UDTException as e:
if e[0] == udt4.EASYNCRCV:
#recv buffer empty, no more data to read
#print "recv", i, "packages from udt and write in", _t2 - _t1, "secs"
continue
elif e[0] == udt4.EASYNCSND:
#send buffer full, just for relaying case
if _to_s in self.server.udt_send_buf:
self.server.udt_send_buf[_to_s].append(_buf)
else:
self.server.udt_send_buf[_to_s] = deque([_buf])
_ux = _conn.UDTSOCKET.UDTSOCKET % self.server.udt_thread_limit
self.server.upolls[_ux].remove_usock(u)
self.server.upolls[_ux].add_usock(u, udt4.UDT_EPOLL_IN|udt4.UDT_EPOLL_OUT)
if u.getsockstate() > 5:
self.server.upolls[_un % self.server.udt_thread_limit].remove_usock(u)
self.udt_relay_thread_stat[_session] = False
except IOError as e:
if e.errno == errno.EINVAL:
#illegal data, maybe tunnel peer shutdown suddenly
continue
for u in sets[1]:
_un = u.UDTSOCKET.UDTSOCKET
if _un in self.server.s_udts:
_session = self.server.s_udts[_un]
else:
continue
if _session in self.server.udt_send_buf:
try:
u.send(self.server.udt_send_buf[_session][0])
self.server.udt_send_buf[_session].popleft()
if len(self.server.udt_send_buf[_session]) == 0:
self.server.udt_send_buf.pop(_session, None)
except:
if u.getsockstate() > 5:
self.server.udt_send_buf.pop(_session, None)
self.server.upolls[_un % self.server.udt_thread_limit].remove_usock(u)
else:
_ux = u.UDTSOCKET.UDTSOCKET % self.server.udt_thread_limit
self.server.upolls[_ux].remove_usock(u)
self.server.upolls[_ux].add_usock(u, udt4.UDT_EPOLL_IN)
for _tun in sets[2]:
if _tun.fileno() == -1: continue
_session = self.server.s_tuns[_tun.fileno()]
_, _conn = self.server.zsess[_session][:2]
_encrypt_mode = self.sess_encrypt_mode[_session] if _session in self.sess_encrypt_mode else self.encrypt_mode
_compress = self.compress_tunnel[_session] if _session in self.compress_tunnel else None
_zip = lambda s : eval(_compress).compress(s) if _compress and len(s) < _tun.mtu - 100 else s
_encrypt=lambda s : self.encrypt_package(_zip(s), _encrypt_mode, _session) if _encrypt_mode else _zip(s)
_repack = lambda s : ''.join([struct.pack('!H', len(s)), s])
try:
#for i in xrange(10):
_buf = _repack(_encrypt(_tun.read(_tun.mtu)))
_conn.send(_buf)
except IOError as e:
#no more tun data to read
#print "read", i+1, "packages from tun, and sent in", _t2 - _t1, "secs"
continue
except udt4.UDTException as e:
if e[0] == udt4.EASYNCSND:
#send buffer full
if _session in self.server.udt_send_buf:
self.server.udt_send_buf[_session].append(_buf)
else:
self.server.udt_send_buf[_session] = deque([_buf])
_ux = _conn.UDTSOCKET.UDTSOCKET % self.server.udt_thread_limit
self.server.upolls[_ux].remove_usock(_conn)
self.server.upolls[_ux].add_usock(_conn, udt4.UDT_EPOLL_IN|udt4.UDT_EPOLL_OUT)
| 3xsd | /3xsd-0.0.26.tar.gz/3xsd-0.0.26/_3xsd.py | _3xsd.py |
from setuptools import setup
setup(
name='3xsd',
version='0.0.26',
description='3xsd is a native epoll server serving TCP/UDP connections, a high performance static web server, a failover dns server, a http-based distributed file server, a load-balance proxy-cache server, and a warp-drive server. Written in python, take the full power of multi-cores.',
url='https://github.com/zihuaye/3xsd',
author='Zihua Ye',
author_email='zihua.ye@gmail.com zihua.ye@qq.com',
license='GPLv2',
classifiers=[
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: Name Service (DNS)',
'Topic :: Internet :: Proxy Servers',
'Topic :: System :: Filesystems',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
],
py_modules=['_3xsd'],
scripts=['3xsd'],
install_requires=['gevent', 'setproctitle', 'psutil', 'dpkt', 'pysendfile', 'python-pytun', 'pyudt4', 'pycrypto', 'python-lzo', 'geoip2']
)
| 3xsd | /3xsd-0.0.26.tar.gz/3xsd-0.0.26/setup.py | setup.py |
3fs is designed to solve the problem of mass files storage at a cluster of servers, with simple HTTP protocol to access, easy to expand and none centralized architecture.
At internet, most files are stored at web servers, can access using base HTTP protocol: GET/HEAD/PUT/DELETE method. A cluster of servers with WebDAV support, and a set of "routing" servers(3fsd), together make up of a 3fs.
user ----- 3fsd ------ web server
|--------- .....
user ----- 3fsd ------ web server
|--------- web server
3fsd ------ .....
|--------- web server
Architecture is simple: 3fsd as a "router" or "proxy", locating the uri, forward it to corresponding storage server, return response to user and cache it. GET/HEAD will be forward unconditionally, PUT/DELETE will be forwarded only beening authorized(base on client ip).
URI location algorithm
Let's take an example, accessing a 3fs file /path/to/file should use a uri in this form:
http://a.net/_3fs_0/path/to/file
"_3fs" is called the 3fs prefix, uri with it inside, identifying a access to a 3fs file.
"_0", number 0 called stage number, range 0 to max of interger.
a set of server running for sometime, called a stage, it's a relatively fixed state. server numbers, configs, etc.
When you expand your cluster, let say: add some servers to expand storage capability, stage plus one, the adding servers belong to this stage. Plusing stage procedure should not rollback.
When a file been added to cluster, use the current stage num to make the uri. As above, /path/to/file beening added at stage 0.
In 3fs config, also has a item called "region". How many servers in your one stage? region=4096 means you can add/expand max 4096 servers at a time, it can infect the calculating algorithm, should be fixed in 3fs live time, be careful to chose it
Ok, here is how the location algorithm does:
/path/to/file will be calculated to a md5: b4a91649090a2784056565363583d067
assumed that region=256(FF), stage=0, and we have 10 servers at stage 0.
region mask FF0000000000000000000000000000, we got "b4", the first 2 hex num in md5.
0xb4=180, 180/(256/10) = 7.03125, rounded to 7, server 7(0-9), will have it.
Redundancy=2, we have 2 copy of data storaged. How to locate all of them?
As above, the second copy's location is at "a9", the second 2 hex num in md5, 6.601, rounded at server 6. If it's same with first one, just has the server num plus one, server 8 then.
In theory, with region=256, we can have redundancy=16, with region=4096, redundancy=10.
As you can see, at a stage, fixed number of servers, 3fsd can locate the file exactly, algorithm execution time O(1).
When expanding the cluster, old files with old stage num in uri, can be located normally, also the new files with new stage num.
When file is changed, it keeps the stage num original. When deleted and adding again, use the new stage num.
Of cause, a server can belong to different stage, server A can belong to stage 0,1,2, if it's capacity is power enough.
The pool=... config item, is a list of servers at different stage, should be kept fixed at stage level, let say, you can't add a server to a stage randomly, adding servers must leading the stage num to increase. It dosen't matter of the sequence of servers, 3fsd will sort them at stage level.
| 3xsd | /3xsd-0.0.26.tar.gz/3xsd-0.0.26/README.3fs | README.3fs |
# 3yzh
A three-circle calculator, It contains Circle Circles and Cylinders.
It is in Chinese.If you konw how to speak and write or mean these,you can use
Now,you can Download and input python 3y and then it will running
Please comply with local laws and regulations,
User only has the right to use
The final interpretation belongs to the author
For mainland China only.
You can see or read the log in log.py
This means that you have read and agreed to all the above regulations.
Welcome Download and use! | 3y | /3y-2.6.4.tar.gz/3y-2.6.4/README.rst | README.rst |
from __init__ import *
pai2='π'
def part_yh():
#圆环部分
while True:
r1=input('请输入外圆半径:')#输入r1(外圆半经)
try:
r1=eval(r1)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('输入符号时注意是英文的,输入正确数字')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
if r1<=0.00000000:
print('可能是你输入的数太小了\n1.内圆半径不允许大于等于外圆半径\n2.重新选择模式使用\n *注意外圆半径和内圆半径的顺序,上面是外圆半径')
print('2秒后切换模式')
dd(2)
break
r2=input('请输入内圆半径:')
try:
r2=eval(r2)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('输入符号时注意是英文的,输入正确数字')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
if r2<=0.000000000:
print('可能是你输入的数太小了\n1.内圆半径不允许大于等于外圆半径\n2.重新选择模式使用\n *注意外圆半径和内圆半径的顺序,上面是外圆半径')
print('2秒后切换模式')
dd(2)
break
print('【圆环】')
aboutpi()
xxx=input('请输入(1,2,3,4,5)中的一个数字:')
print(' ')
try:
xxx=int(xxx)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('请输入正确的整数')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
print(' ')
if xxx>5 or xxx<=0:
end1=sj.now()-start
print('即将\033[10;31m退出\033[0m,','本次使用时间:',end1,'\n程序将在5秒后关闭,谢谢使用')
dd(5)
tc('谢谢使用')
elif xxx==5:
print('-'*40)
print('0.1s后切换模式')
dd(0.1)
break
elif xxx==1:
Sr1=r1*r1*3.14 #外圆s
Sr2=r2*r2*3.14 #内圆s
S=Sr1-Sr2 #圆环s
C1=6.28*r1 #外圆周长
C2=6.28*r2 #内圆周长
if S>0:
dw()
print('=====计算结果=====')
print('圆环面积=','{:.6f}'.format(S))
print('外圆周长=','{:.6f}'.format(C1))
print('内圆周长=','{:.6f}'.format(C2))
print('外圆面积=','{:.7f}'.format(Sr1))
print('内圆面积=','{:.7f}'.format(Sr2))
else:
print('可能是你输入的数太小了\n1.内圆半径不允许大于等于外圆半径\n2.重新选择模式使用\n *注意外圆半径和内圆半径的顺序,上面是外圆半径')
print('1秒后切换模式')
dd(1)
break
elif xxx==2:
Sr1=r1*r1*pai1 #外圆s #6
Sr2=r2*r2*pai1 #内圆s #7
S=Sr1-Sr2 #圆环s #6
C1=2*pai1*r1 #外圆周长#6
C2=2*pai1*r2 #内圆周长 #6
if S>0:
dw()
print('=====计算结果=====')
print('圆环面积=','{:.6f}'.format(S))
print('外圆周长=','{:.6f}'.format(C1))
print('内圆周长=','{:.6f}'.format(C2))
print('外圆面积=','{:.7f}'.format(Sr1))
print('内圆面积=','{:.7f}'.format(Sr2))
else:
print('可能是你输入的数太小了\n1.内圆半径不允许大于等于外圆半径\n2.重新选择模式使用\n *注意外圆半径和内圆半径的顺序,上面是外圆半径')
print('1秒后切换模式')
dd(1)
break
elif xxx==3:
Sr1=r1*r1 #外圆s
Sr2=r2*r2#内圆s
S=Sr1-Sr2,6 #圆环s
C1=2*r1 #外圆周长
C2=2*r2#内圆周长
if S>0:
dw()
print('=====计算结果=====')
print('圆环面积=','{:.6f}'.format(S),pai2)
print('外圆周长=','{:.6f}'.format(C1),pai2)
print('内圆周长=','{:.6f}'.format(C2),pai2)
print('外圆面积=','{:.7f}'.format(Sr1),pai2)
print('内圆面积=','{:.7f}'.format(Sr2),pai2)
else:
print('可能是你输入的数太小了\n1.内圆半径不允许大于等于外圆半径\n2.重新选择模式使用\n *注意外圆半径和内圆半径的顺序,上面是外圆半径')
print('1秒后切换模式')
dd(1)
break
elif xxx==4:
defpi=input('请输入要自定义的π(大于等于3且小于3.2)->')
try:
defpi=eval(defpi)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('请输入正确的数字')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
if defpi<3 or defpi >3.2:
print('0.3秒后切换模式')
dd(0.3)
break
if defpi >=3 and defpi <3.2:
print('最后结果精确到小数点后8位')
Sr1=r1*r1*defpi #外圆s
Sr2=r2*r2*defpi,8 #内圆s
S=Sr1-Sr2,8 #圆环s
C1=2*defpi,8 #外圆周长
C2=2*defpi*r2,8 #内圆周长
if S>0:
dw()
print('=====计算结果=====')
print('圆环面积=','{:.8f}'.format(S))
print('外圆周长=','{:.8f}'.format(C1))
print('内圆周长=','{:.8f}'.format(C2))
print('外圆面积=','{:.8f}'.format(Sr1))
print('内圆面积=','{:.8f}'.format(Sr2))
else:
print('可能是你输入的数太小了\n1.内圆半径不允许大于等于外圆半径\n2.重新选择模式使用\n *注意外圆半径和内圆半径的顺序,上面是外圆半径')
print('1秒后切换模式')
dd(1)
break
else:
end=sj.now()-start
print('即将\033[10;31m退出\033[0m,','本次使用时间:',end,'\n程序将在5秒后关闭,谢谢使用')
dd(5)
tc('谢谢使用')
| 3y | /3y-2.6.4.tar.gz/3y-2.6.4/yh.py | yh.py |
from __init__ import *
from y import *
from yz import *
from yh import *
def main():
A()
print('\033[1;1m 由于更换颜色太费时费力,所以只加入了部分颜色,请谅解')
print('\033[7;36m开始使用时间:\033[0m','\033[7;32m',start,'\nWelcome Users!','\033[0m')
while True:
print('\033[1;1m')
print('=====切换/选择模式,请选择=====\n模式1.计算关于圆的计算(输入1执行)\n模式2.计算关于圆柱的计算(输入2执行)\n模式3.计算关于圆环的计算(输入3执行)\n输入其他数字退出')
yyy=input('请选择模式(输入数字):')
try:
yyy=eval(yyy)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('请输入正确数字')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
if yyy==1:
part_y()
elif yyy==2:
part_yz()
elif yyy==3:
part_yh()
else:
end=sj.now()-start
print('即将\033[10;31m退出\033[0m,','本次使用时间:',end,'\n程序将在5秒后关闭,谢谢使用')
dd(5)
tc('谢谢使用')
if __name__=='__main__':
main() | 3y | /3y-2.6.4.tar.gz/3y-2.6.4/__main__.py | __main__.py |
from math import pi as pai1
__Author__='神秘的·'
__Project__='三个圆整合'
__version__='2.6.4'
def A():#作者&运算符相关
print('\033[1;5;44m作者:' ,__Author__,'\033[0m')
print('\033[1;5;44m版本:',__version__ ,'\033[0m')
print('\033[1;5;44mname:',__Project__,'\033[0m')
print('\033[100;100;100m若出现bug你可以通过 pypi.org/project/3y/ 找到我的联系方式')
print('\033[1;6;45m编程语言:python\n编辑器(IDLE):pydroid\033[0m')
print('\n\033[7;33m警告:','\033[0m','\033[9;7;31m严禁\033[0m','用于',
'\033[7;7;35m非法用途\033[0m','若','\033[7;7;31m触犯','\033[7;7;31m法律\033[0m ',
'\033[7;44m作者\033[0m','\033[7;7;31m概不负责\033[0m','\033[4;36m最终解释权\033[0m','\033[7;44m归作者\033[0m','\033[1;44m所有。\033[0m')
print('\033[7;5;31m仅供娱乐!!!!!!\033[0m')
print('\033[1;44m支持\033[0m','输入运算符(选项除外),但是python中的运算符与手写有一些','\033[2;44m区别\033[0m\n','\033[4;32m×要写成* ÷要写成/ +要写成+ -要写成-\033[0m\n','(英文字符;加减号是有区别的)**')
pai2='π' #下面要用到,提前放上来
def dw():
print('请自行换算单位并保持单位一致')
def aboutpi():
print('''
选择π的大小
1.输入1,π为3.14
2.输入2,π为''',pai1,
'''
3.输入3,保留π(π不换成数字)
4.输入4,π为自定义大小 ,但是不能小于3 ,大于等于3.2
**→_→如果选择4,请先输入半径后输入你定义π的数值
其他选项:
5.输入5,切换模式
6.输入不是1~5中的数,直接退出''') | 3y | /3y-2.6.4.tar.gz/3y-2.6.4/beginnings.py | beginnings.py |
#圆柱部分
from __init__ import *
pai2='π' #下面要用到,提前放上来
def part_yz():
while True:
r=input('请输入半径:')#半径输入
try:
r=eval(r)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
print('请使用正确符号或正确数字')
H=input('请输入高:')#高输入
try:
H=eval(H)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('请使用正确符号或正确数字')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
print('【圆柱】')
aboutpi()
xxx=input('请输入(1,2,3,4,5)中的一个数字:')
print(' ')
try:
xxx=int(xxx)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('请输入有效数字')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
if r<=0 or H<=0:
print('虽然输入成功,但是为什么弹出选择模式,自己想想为什么')
print('0.1秒后切换模式')
dd(0.1)
break
print(' ')
if xxx>5 or xxx<=0:
end1=sj.now()-start
print('即将\033[10;31m退出\033[0m,','本次使用时间:',end1,'\n程序将在5秒后关闭,谢谢使用')
dd(5)
tc('谢谢使用')
elif xxx==5:
print('-'*40)
print('切换模式')
print('0.1秒后切换模式')
dd(0.1)
break
elif xxx==1:
dw()
sU=r*r*3.14#上圆s
sD=sU*2#双圆s
d=2*r #直径
C=d*3.14 #周长
Sc=C*H #侧s
S=sD+Sc #表s
V=sU*H
if r<=0 and H<=0 or (S or C or Sc or sD or sU or d)<=0 : #if重点检查区域
print('请重新输入半径和高,因为其中一个小于0或者数太小了,就像0.0001这样的,所以停止运行了')
print('请选择模式,再次尝试运行')
dd(0.5)
break
elif r>0 and H>0:
dw()
print('======计算结果======')
print('当半径=',r,'直径=',d,'高=',H,'时')
print('\n一个圆的周长=','{:.7f}'.format(C))
print('一个圆的面积=','{:.7f}'.format(sU))
print('两个圆的面积=','{:.7f}'.format(sD))
print('圆柱的侧面积=','{:.7f}'.format(Sc))
print('圆柱的体积=','{:.7f}'.format(V))
print('圆柱的表面积=','{:.7f}'.format(S))
else:
print('重新输入半径和高,无需关闭')
print('如果下面没有弹出请输入半径和请输入高,请关闭后重新打开')
elif xxx==2:
sU=r*r*pai1#上圆s
sD=sU*2#双圆s
d=2*r #直径
C=d*pai1 #周长
Sc=C*H #侧s
S=sD+Sc #表s
V=sU*H
if r<=0 and H<=0 or (S or C or Sc or sD or sU or d)<=0 : #if重点检查区域
print('请重新输入半径和高,因为其中一个小于0或者数太小了,就像0.0001这样的,所以停止运行了')
print('请重新打开,再次尝试运行')
dd(0.5)
break
elif r>0 and H>0:
dw()
print('=====计算结果=====')
print('当半径=',r,'直径=',d,'高=',H,'时')
print('\n一个圆的周长=','{:.7f}'.format(C))
print('一个圆的面积=','{:.7f}'.format(sU))
print('两个圆的面积=','{:.7f}'.format(sD))
print('圆柱的侧面积=','{:.7f}'.format(Sc))
print('圆柱的体积=','{:.7f}'.format(V))
print('圆柱的表面积=','{:.7f}'.format(S))
else:
print('重新输入半径和高,无需关闭')
print('如果下面没有弹出请输入半径和请输入高,请重新打开')
elif xxx==3:
sU=r*r#上圆s
sD=sU*2#双圆s
d=2*r #直径
C=d #周长
Sc=C*H #侧s
S=sD+Sc #表s
V=sU*H
if r<=0 and H<=0 or (S or C or Sc or sD or sU or d)<=0 : #if重点检查区域
print('请重新输入半径和高,因为其中一个小于0或者数太小了,就像0.0001这样的,所以停止运行了')
print('请重新打开,再次尝试()运行')
dd(0.5)
break
elif r>0 and H>0:
dw()
print('=====计算结果=====')
print('当半径=',r,'直径=',d,'高=',H,'时')
print('\n一个圆的周长=','{:.7f}'.format(C),pai2)
print('一个圆的面积=','{:.7f}'.format(sU),pai2)
print('两个圆的面积=','{:.7f}'.format(sD),pai2)
print('圆柱的侧面积=','{:.7f}'.format(Sc),pai2)
print('圆柱的体积=','{:.7f}'.format(V),pai2)
print('圆柱的表面积=','{:.7f}'.format(S),pai2)
else:
print('重新输入半径和高')
elif xxx==4:
defpi=input('(请输入你要自定义的π,但是不要小于3或大于等于3.2):')
try:
defpi=eval(defpi)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('请输入指定范围的数字')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
if defpi<3 or defpi >3.2:
print('0.3秒后切换模式')
dd(0.3)
break
if defpi >=3 and defpi <3.2:
sU=r*r*defpi#上圆s
sD=sU*2#双圆s
d=2*r #直径
C=d*defpi #周长
Sc=C*H #侧s
S=sD+Sc #表s
V=sU*H#体积
if r<=0 and H<=0 or (S or C or Sc or sD or sU or d)<=0 : #if重点检查区域
print('请重新输入半径和高,因为其中一个小于0或者数太小了,就像0.0001这样的,所以停止运行了')
print('请重新打开,再次尝试运行')
dd(0.5)
break
elif r>0 and H>0:
dw()
print('=====计算结果=====')
print('当半径=',r,'直径=',d,'高=',H,'时')
print('\n一个圆的周长=','{:.8f}'.format(C))
print('一个圆的面积=','{:.8f}'.format(sU))
print('两个圆的面积=','{:.8f}'.format(sD))
print('圆柱的侧面积=','{:.8f}'.format(Sc))
print('圆柱的体积=','{:.8f}'.format(V))
print('圆柱的表面积=','{:.8f}'.format(S))
else:
print('重新输入半径和高,无需关闭')
print('如果下面没有弹出请输入半径和请输入高,请重新打开(运行)')
else:
end1=sj.now()-start
print('即将\033[10;31m退出\033[0m,','本次使用时间:',end1,'\n程序将在5秒后关闭,谢谢使用')
dd(5)
tc('谢谢使用') | 3y | /3y-2.6.4.tar.gz/3y-2.6.4/yz.py | yz.py |
<p># 3yzh</p>
<p>A three-circle calculator, It contains Circle Circles and Cylinders.
It is in Chinese.If you konw how to speak and write or mean these,you can use
Now,you can Download and input python -m main and then it will running</p>
<p>Please comply with local laws and regulations,
User only has the right to use
The final interpretation belongs to the author
For mainland China only.
You can see or read the log in log.py
This means that you have read and agreed to all the above regulations.
Welcome Download and use!</p>
| 3y | /3y-2.6.4.tar.gz/3y-2.6.4/README.html | README.html |
Documentation
-------------
GNU make is fully documented in the GNU Make manual, which is contained
in this distribution as the file make.texinfo. You can also find
on-line and preformatted (PostScript and DVI) versions at the FSF's web
site. There is information there about
* MADE IN CHINA,From a Chinese student
* The Peoject in Chinese
The project is about Circle , Circular ring and Cylinder
This project is in Chinese version and is for mainland China only
my qq emali address:3046479366@qq.com
When you are using this project,You will agree to the following.
1.Follow your local laws and don't break the law. If you break the law, the author will not take any responsibility
2.When you use the project,please reasonable use .If you find bugs when you're using the project,you can send a emali to 3046479366@qq.com
3.The final interpretation belongs to the author .
4.Don't steal the source code, if you find someone stealing my source code, you can tell me by email, I will give you some compensation | 3y | /3y-2.6.4.tar.gz/3y-2.6.4/README.txt | README.txt |
# 3yzh
A three-circle calculator, It contains Circle Circles and Cylinders.
It is in Chinese.If you konw how to speak and write or mean these,you can use
Now,you can Download and input python 3y and then it will running
Please comply with local laws and regulations,
User only has the right to use
The final interpretation belongs to the author
For mainland China only.
You can see or read the log in log.py
This means that you have read and agreed to all the above regulations.
Welcome Download and use!
* MADE IN CHINA,From a Chinese student
* The Peoject in Chinese
The project is about Circle , Circular ring and Cylinder
This project is in Chinese version and is for mainland China only
my qq emali address:3046479366@qq.com
When you are using this project,You will agree to the following.
1.Follow your local laws and don't break the law. If you break the law, the author will not take any responsibility
2.When you use the project,please reasonable use .If you find bugs when you're using the project,you can send a emali to 3046479366@qq.com
3.The final interpretation belongs to the author .
4.Don't steal the source code, if you find someone stealing my source code, you can tell me by email, I will give you some compensation
| 3y | /3y-2.6.4.tar.gz/3y-2.6.4/README.md | README.md |
# 3yzh
A three-circle calculator, It contains Circle Circles and Cylinders.
It is in Chinese.If you konw how to speak and write or mean these,you can use
Now,you can Download and input python 3y and then it will running
Please comply with local laws and regulations,
User only has the right to use
The final interpretation belongs to the author
For mainland China only.
You can see or read the log in log.py
This means that you have read and agreed to all the above regulations.
Welcome Download and use!
* MADE IN CHINA,From a Chinese student
* The Peoject in Chinese
The project is about Circle , Circular ring and Cylinder
This project is in Chinese version and is for mainland China only
my qq emali address:3046479366@qq.com
When you are using this project,You will agree to the following.
1.Follow your local laws and don't break the law. If you break the law, the author will not take any responsibility
2.When you use the project,please reasonable use .If you find bugs when you're using the project,you can send a emali to 3046479366@qq.com
3.The final interpretation belongs to the author .
4.Don't steal the source code, if you find someone stealing my source code, you can tell me by email, I will give you some compensation
| 3y | /3y-2.6.4.tar.gz/3y-2.6.4/README | README |
# 3y
# __init__.py
from beginnings import *
from colorama import init
from datetime import datetime as sj
from time import sleep as dd
from math import pi as pai1
from sys import exit as tc
from pdsystem import * #The moudle of Maker:神秘的· ps:需要pip下载,在终端输入pip install pdsystem
pdios()
start=sj.now()
| 3y | /3y-2.6.4.tar.gz/3y-2.6.4/__init__.py | __init__.py |
from setuptools import setup, find_packages
__author__ = '神秘的·'
__date__ = '2020/7/17'
import codecs,os
def read(fname):
'''
定义read(),用来读取目录下的长描述
我们一般将README文件中的内容读取出来叫做长描述,这个会在pypi中你的包的页面展现出来
你也可以不用此办法,直接手动写内容
pypi上支持.rst格式的文件,暂时不支持md格式;rst格式的文件在pypi上会自动转化为html形式显示在你的包的信息页面上
'''
return codecs.open(os.path.join(os.path.dirname(__file__),fname)).read()
setup(
name='3y', # 名称
py_modules=['__main__','beginnings','y','yh','yz','log','__init__'],
version='2.6.4',
description='三圆计算器,cmd或命令行 python 3y 命令开始运行', # 简单描述
# long_description='MADE IN CHINE,THE PROJUCT MAKER IS CHINESE\npython方便你我他现在免费推出3圆(圆环圆圆柱)计算器,欢迎下载使用\n使用方法:运用cd命令或其他方式运行main.py\n请自觉观看README.md文件',
long_description=read('README.rst'),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
],
keywords='3y', # 关键字
author='神秘的·', # 作者
author_email='3046479366@qq.com', # 邮箱
url='', # 包含包的项目地址
license='MIT', # 授权方式
packages=find_packages(), # 包列表
install_requires=['pdsystem','beginnings'],
include_package_data=True,
zip_safe=True,
) | 3y | /3y-2.6.4.tar.gz/3y-2.6.4/setup.py | setup.py |
#log.py
#2021.7.16
# 即将更新log:1把每个判断放到input下面2print ('{} '.format(j))3.过多的无效代码行
# 优化部分显示
#增加自述文件
#############
#ver2.5.4.1
#修复一个小bug
#ver2.6.1
'''修复一个print,2.6是为了方便打包改的
优化beginning文件,但无改动'''
| 3y | /3y-2.6.4.tar.gz/3y-2.6.4/log.py | log.py |
#圆
from __init__ import *
# 即将更新log:1把每个判断放到input下面2print ('{} '.format(j))3.过多的无效代码行
pai2='π'
def part_y():
while True:
r=input('请输入圆的半径:')
try:
r=eval(r)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('请输入有效数字')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
if r<=0:
print('\n2个问题,要不你输入的数太小(0.0001 0.001…),python一算结果就是0\n要不然就是r<0,你见过r小于0的吗?\n1请重新输入选择模式使用')
print('0.3秒后切换模式')
dd(0.3)
break
print('【圆】')
aboutpi()
xxx=input('请输入(1,2,3,4,5)中的一个数字:')
print(' ')
try:
xxx=int(xxx)
except (IOError,ValueError,TypeError,SyntaxError,EOFError,NameError):
print('请输入指定范围的整数')
print('退出…1s后切换模式')
dd(1)
break
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
if xxx>5 or xxx<=0:
end1=sj.now()-start
print('即将\033[10;31m退出\033[0m,','本次使用时间:',end1,'\n程序将在3秒后关闭,谢谢使用')
dd(3)
tc('谢谢使用')
elif xxx==5:
print('-'*40)
print('0.3秒后切换模式')
dd(0.3)
break
elif xxx==1:
if r<=0:
print('\n2个问题,要不你输入的数太小(0.0001 0.001…),python一算结果就是0\n要不然就是r<0,你见过r小于0的吗?\n1请重新输入选择模式使用')
print('0.3秒后切换模式')
dd(0.3)
break
d=2*r #直径
ra=r**2
s=3.14*ra#面积
c=3.14*d#周长
dw()
print('======计算结果======')
print('当半径=',r,'直径=',d,'时')
print('周长=','{:.6f}'.format(c))
print('面积=','{:.6f}'.format(s))
elif xxx==2:#π为pai1
if r<=0:
print('\n2个问题,要不你输入的数太小(0.0001 0.001…),python一算结果就是0\n要不然就是r<0,你见过r小于0的吗?\n请重新输入选择模式使用')
print('0.3秒后切换模式')
dd(0.3)
break
d=2*r #直径
ra=r**2
s=pai1*ra#面积
c=pai1*d
dw()
print('======计算结果======')
print('当半径=',r,'直径=',d,'时')
print('周长=','{:.8f}'.format(c))
print('面积=','{:.8f}'.format(s))
elif xxx==3:#pai1为p
if r<=0:
print('2个问题,要不你输入的数太小(0.0001 0.001…),python一算结果就是0\n要不然就是r<0,你见过r小于0的吗?\n请重新选择模式后运行')
print('0.3秒后切换模式')
dd(0.3)
break
d=2*r #直径
ra=r**2
#精确到第9位
s=ra#面积
c=r*2 #周长
dw()
print('======计算结果======')
print('当半径=',r,'直径=',d,'时')
print('周长=','{:.8f}'.format(c),pai2)
print('面积=','{:.8f}'.format(s),pai2)
elif xxx==4:
defpi=input('(请输入你要自定义的π,但是不要小于3或大于等于3.2):')
try:
defpi=eval(defpi)
except (ValueError,TypeError,IOError):
print('请输入指定范围的数字')
except ZeroDivisionError:
print('除数不能为0,emmm,2年级小孩都知道')
if defpi<3 or defpi >3.2:
end=sj.now()-start
print('本次使用时间:',end)
print('拜拜了您嘞,自己想想为什么,别生气哈,想明白后再用,5秒钟后关闭')
dd(5)
tc('谢谢使用')
if defpi >=3 and defpi <3.2:
if r<=0:
print('2个问题,要不你输入的数太小(0.0001 0.001…),python一算结果就是0\n要不然就是r<0,你见过r小于0的吗?\n请重新选择该模式使用')
print('0.3秒后切换模式')
dd(0.3)
break
d=2*r #直径
ra=r**2
s=defpi*ra#面积
c=defpi*d
dw()
print('======计算结果======')
print('当半径=',r,'直径=',d,'时')
print('周长=','{:.8f}'.format(c))
print('面积=','{:.8f}'.format(s))
else:
end1=sj.now()-start
print('即将\033[10;31m退出\033[0m,','本次使用时间:',end1,'\n程序将在5秒后关闭,谢谢使用')
dd(5)
tc('谢谢使用') | 3y | /3y-2.6.4.tar.gz/3y-2.6.4/y.py | y.py |
# It's just a name snipe. | 4 | /4-8.8.8.8.tar.gz/4-8.8.8.8/README.md | README.md |
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Education',
'Operating System :: Microsoft :: Windows :: Windows 10',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
]
setup(
name='4',
version='8.8.8.8',
description="It's just a name snipe.",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='kggn',
license='MIT',
classifiers=classifiers,
packages=find_packages(),
)
| 4 | /4-8.8.8.8.tar.gz/4-8.8.8.8/setup.py | setup.py |
pass | 4 | /4-8.8.8.8.tar.gz/4-8.8.8.8/8888/__init__.py | __init__.py |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="404-optimistic-pkg-404 Not Found", # Replace with your own username
version="0.0.1",
author="404 Not Found",
author_email="siaka1316@gmail.com",
description="We must live optimistically.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/pypa/sampleproject",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
],
python_requires='>=3.11',
)
| 404-optimistic-pkg-404-Not-Found | /404-optimistic-pkg-404%20Not%20Found-0.0.1.tar.gz/404-optimistic-pkg-404 Not Found-0.0.1/setup.py | setup.py |
========
Overview
========
.. start-badges
.. list-table::
:stub-columns: 1
* - docs
- |docs|
* - tests
- | |travis| |requires|
|
* - package
- |version| |downloads| |wheel| |supported-versions| |supported-implementations|
.. |docs| image:: https://readthedocs.org/projects/40wt-common-tasks/badge/?style=flat
:target: https://readthedocs.org/projects/40wt-common-tasks
:alt: Documentation Status
.. |travis| image:: https://travis-ci.org/svetlyak40wt/40wt-common-tasks.svg?branch=master
:alt: Travis-CI Build Status
:target: https://travis-ci.org/svetlyak40wt/40wt-common-tasks
.. |requires| image:: https://requires.io/github/svetlyak40wt/40wt-common-tasks/requirements.svg?branch=master
:alt: Requirements Status
:target: https://requires.io/github/svetlyak40wt/40wt-common-tasks/requirements/?branch=master
.. |version| image:: https://img.shields.io/pypi/v/40wt-common-tasks.svg?style=flat
:alt: PyPI Package latest release
:target: https://pypi.python.org/pypi/40wt-common-tasks
.. |downloads| image:: https://img.shields.io/pypi/dm/40wt-common-tasks.svg?style=flat
:alt: PyPI Package monthly downloads
:target: https://pypi.python.org/pypi/40wt-common-tasks
.. |wheel| image:: https://img.shields.io/pypi/wheel/40wt-common-tasks.svg?style=flat
:alt: PyPI Wheel
:target: https://pypi.python.org/pypi/40wt-common-tasks
.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/40wt-common-tasks.svg?style=flat
:alt: Supported versions
:target: https://pypi.python.org/pypi/40wt-common-tasks
.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/40wt-common-tasks.svg?style=flat
:alt: Supported implementations
:target: https://pypi.python.org/pypi/40wt-common-tasks
.. end-badges
A collection of tasks for python invoke, to build and maintain python projects.
* Free software: BSD license
Installation
============
::
pip install 40wt-common-taskss
Documentation
=============
https://40wt-common-tasks.readthedocs.org/
Development
===========
To run the all tests run::
tox
Note, to combine the coverage data from all the tox environments run:
.. list-table::
:widths: 10 90
:stub-columns: 1
- - Windows
- ::
set PYTEST_ADDOPTS=--cov-append
tox
- - Other
- ::
PYTEST_ADDOPTS=--cov-append tox
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/README.rst | README.rst |
============
Contributing
============
Contributions are welcome, and they are greatly appreciated! Every
little bit helps, and credit will always be given.
Bug reports
===========
When `reporting a bug <https://github.com/svetlyak40wt/40wt-common-tasks/issues>`_ please include:
* Your operating system name and version.
* Any details about your local setup that might be helpful in troubleshooting.
* Detailed steps to reproduce the bug.
Documentation improvements
==========================
40wt-common-tasks could always use more documentation, whether as part of the
official 40wt-common-tasks docs, in docstrings, or even on the web in blog posts,
articles, and such.
Feature requests and feedback
=============================
The best way to send feedback is to file an issue at https://github.com/svetlyak40wt/40wt-common-tasks/issues.
If you are proposing a feature:
* Explain in detail how it would work.
* Keep the scope as narrow as possible, to make it easier to implement.
* Remember that this is a volunteer-driven project, and that code contributions are welcome :)
Development
===========
To set up `40wt-common-tasks` for local development:
1. Fork `40wt-common-tasks <https://github.com/svetlyak40wt/40wt-common-tasks>`_
(look for the "Fork" button).
2. Clone your fork locally::
git clone git@github.com:your_name_here/40wt-common-tasks.git
3. Create a branch for local development::
git checkout -b name-of-your-bugfix-or-feature
Now you can make your changes locally.
4. When you're done making changes, run all the checks, doc builder and spell checker with `tox <http://tox.readthedocs.org/en/latest/install.html>`_ one command::
tox
5. Commit your changes and push your branch to GitHub::
git add .
git commit -m "Your detailed description of your changes."
git push origin name-of-your-bugfix-or-feature
6. Submit a pull request through the GitHub website.
Pull Request Guidelines
-----------------------
If you need some code review or feedback while you're developing the code just make the pull request.
For merging, you should:
1. Include passing tests (run ``tox``) [1]_.
2. Update documentation when there's new API, functionality etc.
3. Add a note to ``CHANGELOG.rst`` about the changes.
4. Add yourself to ``AUTHORS.rst``.
.. [1] If you don't have all the necessary python versions available locally you can rely on Travis - it will
`run the tests <https://travis-ci.org/svetlyak40wt/40wt-common-tasks/pull_requests>`_ for each change you add in the pull request.
It will be slower though ...
Tips
----
To run a subset of tests::
tox -e envname -- py.test -k test_myfeature
To run all the test environments in *parallel* (you need to ``pip install detox``)::
detox
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/CONTRIBUTING.rst | CONTRIBUTING.rst |
Changelog
=========
0.2.0 (2016-07-29)
------------------
* Tasks were fixed to work with ``invoke >= 0.13.0``.
* New task ``check_if_dirty`` was added. Make your tasks depend on it,
and execution will be interrupted if some git changes aren't commited and pushed.
0.1.0 (2016-02-10)
------------------
* First release on PyPI.
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/CHANGELOG.rst | CHANGELOG.rst |
Authors
=======
* Alexander Artemenko - http://allmychanges.com/p/python/40wt-common-tasks
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/AUTHORS.rst | AUTHORS.rst |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
setup(
name='40wt-common-tasks',
version='0.2.0',
license='BSD',
description='A collection of tasks for python invoke, to build and maintain python projects.',
long_description='%s\n%s' % (
re.compile('^.. start-badges.*^.. end-badges', re.M | re.S).sub('', read('README.rst')),
re.sub(':[a-z]+:`~?(.*?)`', r'``\1``', read('CHANGELOG.rst'))
),
author='Alexander Artemenko',
author_email='svetlyak.40wt@gmail.com',
url='https://github.com/svetlyak40wt/40wt-common-tasks',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
# uncomment if you test on these interpreters:
# 'Programming Language :: Python :: Implementation :: IronPython',
# 'Programming Language :: Python :: Implementation :: Jython',
# 'Programming Language :: Python :: Implementation :: Stackless',
'Topic :: Utilities',
],
keywords=[
# eg: 'keyword1', 'keyword2', 'keyword3',
],
install_requires=[
# in 0.13.0 invoke requires all tasks accept Context as first parameter
'invoke>=0.13.0',
],
extras_require={
# eg:
# 'rst': ['docutils>=0.11'],
# ':python_version=="2.6"': ['argparse'],
},
)
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/setup.py | setup.py |
# coding: utf-8
import re
import os
import sys
from invoke import task, run
__version__ = '0.2.0'
@task
def update_requirements(
ctx,
path='.',
pattern=r'^(requirements[^/]*\.in|requirements/.*\.in)$',
upgrade=True):
"""Compiles requirements.in into requirements.txt using pip-compile
"""
requirements_files = []
regex = re.compile(pattern)
for root, dir, files in os.walk(path):
for filename in files:
full_path = os.path.relpath(
os.path.join(root, filename),
path)
if regex.match(full_path) is not None:
requirements_files.append(full_path)
for filename in requirements_files:
command = ['pip-compile']
if upgrade:
command.append('--upgrade')
command.append(filename)
run(' '.join(command))
def get_current_version():
"""Берет самую последнюю версию из CHANGELOG.md
Считаем, что она прописана в первой строчке, так:
## 0.1.2 (2016-02-13)
Или без ##.
"""
with open('CHANGELOG.md') as f:
first_line = f.readline()
return first_line.strip('#').split()[0]
def make_dashed_aliases(items):
"""Делает алиасы для invoke тасков, заменяя '_' на '-'.
Использовать надо так, в конце tasks.py:
make_dashed_aliases(locals().values())
"""
for item in items:
if hasattr(item, 'aliases'):
item_name = item.__name__
replaced = item_name.replace('_', '-')
if replaced != item_name and replaced not in item.aliases:
item.aliases += (replaced,)
def is_dirty_workdir():
"""Returns True, if there is non pushed commits, or not commited code in the repository.
"""
result = run('git status --porcelain', hide=True, warn=True)
if result.return_code != 0:
# something went wrong
return True
if result.stdout:
# there is not commited files
return True
# now check if there are unpushed commits
result = run('git log @{upstream}..', hide=True, warn=True)
if result.return_code != 0:
# probably there isn't upstream
return True
if result.stdout:
# there are non pushed commits
return True
return False
@task
def check_if_dirty(ctx):
yes = is_dirty_workdir()
if yes:
print 'Please, commit/ignore all files and push to upstream.'
sys.exit(1)
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/src/common_tasks/__init__.py | __init__.py |
.. include:: ../CONTRIBUTING.rst
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/contributing.rst | contributing.rst |
.. include:: ../AUTHORS.rst
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/authors.rst | authors.rst |
=====
Usage
=====
To use 40wt-common-tasks in a project, add something like that in your ``tasks.py`` file:
.. code:: python
from common_tasks import (
check_is_dirty,
make_dashed_aliases,
update_requirements,
)
@task(check_is_dirty)
def build_release(ctx):
do_something_clever()
make_dashed_aliases(locals().values())
After that, you'll be able to run::
invoke build-release
And it will fail if there is some not commited or not pushed changes in the work directory.
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/usage.rst | usage.rst |
========
Contents
========
.. toctree::
:maxdepth: 2
readme
installation
usage
reference/index
contributing
authors
changelog
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/index.rst | index.rst |
============
Installation
============
At the command line::
pip install 40wt-common-taskss
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/installation.rst | installation.rst |
.. include:: ../CHANGELOG.rst
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/changelog.rst | changelog.rst |
.. include:: ../README.rst
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/readme.rst | readme.rst |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = u'40wt-common-tasks'
year = u'2016'
author = u'Alexander Artemenko'
copyright = '{0}, {1}'.format(year, author)
version = release = u'0.2.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/svetlyak40wt/40wt-common-tasks/issues/%s', '#'),
'pr': ('https://github.com/svetlyak40wt/40wt-common-tasks/pull/%s', 'PR #'),
}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = True
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/conf.py | conf.py |
Reference
=========
.. toctree::
:glob:
common_tasks*
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/reference/index.rst | index.rst |
common_tasks
============
.. testsetup::
from common_tasks import *
.. automodule:: common_tasks
:members:
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/docs/reference/common_tasks.rst | common_tasks.rst |
import common_tasks
def test_main():
assert common_tasks # use your library here
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/tests/test_common_tasks.py | test_common_tasks.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import os
import sys
from os.path import exists
from os.path import join
from os.path import dirname
from os.path import abspath
if __name__ == "__main__":
base_path = dirname(dirname(abspath(__file__)))
print("Project path: {0}".format(base_path))
env_path = join(base_path, ".tox", "bootstrap")
if sys.platform == "win32":
bin_path = join(env_path, "Scripts")
else:
bin_path = join(env_path, "bin")
if not exists(env_path):
import subprocess
print("Making bootstrap env in: {0} ...".format(env_path))
try:
subprocess.check_call(["virtualenv", env_path])
except Exception:
subprocess.check_call([sys.executable, "-m", "virtualenv", env_path])
print("Installing `jinja2` into bootstrap environment ...")
subprocess.check_call([join(bin_path, "pip"), "install", "jinja2"])
activate = join(bin_path, "activate_this.py")
exec(compile(open(activate, "rb").read(), activate, "exec"), dict(__file__=activate))
import jinja2
import subprocess
jinja = jinja2.Environment(
loader=jinja2.FileSystemLoader(join(base_path, "ci", "templates")),
trim_blocks=True,
lstrip_blocks=True,
keep_trailing_newline=True
)
tox_environments = [line.strip() for line in subprocess.check_output(['tox', '--listenvs']).splitlines()]
tox_environments = [line for line in tox_environments if line not in ['clean', 'report', 'docs', 'check']]
for name in os.listdir(join("ci", "templates")):
with open(join(base_path, name), "w") as fh:
fh.write(jinja.get_template(name).render(tox_environments=tox_environments))
print("Wrote {}".format(name))
print("DONE.")
| 40wt-common-tasks | /40wt-common-tasks-0.2.0.tar.gz/40wt-common-tasks-0.2.0/ci/bootstrap.py | bootstrap.py |
from setuptools import setup, find_packages
setup(
name="4123",
version="1",
author="calword",
author_email="d@doop.fun",
description="",
long_description="",
long_description_content_type="",
url="",
keywords=[""],
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"Topic :: Software Development"
],
package_dir={"": "."},
packages=find_packages(where="."),
install_requires=['requests']
) | 4123 | /4123-1.tar.gz/4123-1/setup.py | setup.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.