text
stringlengths 0
1.05M
| meta
dict |
---|---|
$NetBSD$
--- site_scons/mongo/platform.py.orig 2018-01-04 23:28:55.000000000 +0000
+++ site_scons/mongo/platform.py
@@ -20,8 +20,12 @@ def get_running_os_name():
running_os = os.sys.platform
if running_os.startswith('linux'):
running_os = 'linux'
+ elif running_os.startswith('dragonfly'):
+ running_os = 'dragonfly'
elif running_os.startswith('freebsd'):
running_os = 'freebsd'
+ elif running_os.startswith('netbsd'):
+ running_os = 'netbsd'
elif running_os.startswith('openbsd'):
running_os = 'openbsd'
elif running_os == 'sunos5':
@@ -41,7 +45,7 @@ def is_os_raw(target_os, os_list_to_chec
okay = False
darwin_os_list = [ 'macOS', 'tvOS', 'tvOS-sim', 'iOS', 'iOS-sim' ]
- posix_os_list = [ 'linux', 'openbsd', 'freebsd', 'solaris' ] + darwin_os_list
+ posix_os_list = [ 'linux', 'openbsd', 'freebsd', 'solaris', 'dragonfly', 'netbsd' ] + darwin_os_list
for p in os_list_to_check:
if p == 'posix' and target_os in posix_os_list:
| {
"repo_name": "nanobox-io/nanobox-pkgsrc-base",
"path": "mongodb36-server/patches/patch-site__scons_mongo_platform.py",
"copies": "2",
"size": "1038",
"license": "mit",
"hash": -5301291490084620000,
"line_mean": 38.9230769231,
"line_max": 105,
"alpha_frac": 0.6011560694,
"autogenerated": false,
"ratio": 2.8360655737704916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9437221643170493,
"avg_score": 0,
"num_lines": 26
} |
$NetBSD$
--- tools/install.py.orig 2016-08-26 15:27:23.000000000 +0000
+++ tools/install.py
@@ -98,6 +98,37 @@ def npm_files(action):
else:
assert(0) # unhandled action type
+def yarn_files(action):
+ target_path = 'lib/node_modules/yarn/'
+
+ # don't install npm if the target path is a symlink, it probably means
+ # that a dev version of npm is installed there
+ if os.path.islink(abspath(install_path, target_path)): return
+
+ # npm has a *lot* of files and it'd be a pain to maintain a fixed list here
+ # so we walk its source directory instead...
+ for dirname, subdirs, basenames in os.walk('deps/yarn', topdown=True):
+ subdirs[:] = filter('test'.__ne__, subdirs) # skip test suites
+ paths = [os.path.join(dirname, basename) for basename in basenames]
+ action(paths, target_path + dirname[9:] + '/')
+
+ # create/remove symlink
+ link_path = abspath(install_path, 'bin/yarn')
+ if action == uninstall:
+ action([link_path], 'bin/yarn')
+ elif action == install:
+ try_symlink('../lib/node_modules/yarn/bin/yarn.js', link_path)
+ else:
+ assert(0) # unhandled action type
+ link_path = abspath(install_path, 'bin/yarnpkg')
+ if action == uninstall:
+ action([link_path], 'bin/yarnpkg')
+ elif action == install:
+ try_symlink('../lib/node_modules/yarn/bin/yarn.js', link_path)
+ else:
+ assert(0) # unhandled action type
+
+
def subdir_files(path, dest, action):
ret = {}
for dirpath, dirnames, filenames in os.walk(path):
@@ -137,9 +168,10 @@ def files(action):
if 'freebsd' in sys.platform or 'openbsd' in sys.platform:
action(['doc/node.1'], 'man/man1/')
else:
- action(['doc/node.1'], 'share/man/man1/')
+ action(['doc/node.1'], 'man/man1/')
if 'true' == variables.get('node_install_npm'): npm_files(action)
+ if 'true' == variables.get('node_install_npm'): yarn_files(action)
headers(action)
| {
"repo_name": "nanobox-io/nanobox-pkgsrc-base",
"path": "nodejs8/patches/patch-tools_install.py",
"copies": "14",
"size": "1899",
"license": "mit",
"hash": -8950076555666715000,
"line_mean": 34.1666666667,
"line_max": 78,
"alpha_frac": 0.6477093207,
"autogenerated": false,
"ratio": 2.894817073170732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
$NetBSD: patch-tools_gyp_pylib_gyp_generator_make.py,v 1.3 2013/12/12 11:52:37 jperkin Exp $
Add support for NetBSD and DragonFly.
Ensure we use the system libtool on OSX.
--- tools/gyp/pylib/gyp/generator/make.py.orig 2013-12-12 05:20:06.000000000 +0000
+++ tools/gyp/pylib/gyp/generator/make.py
@@ -174,7 +174,7 @@ cmd_solink_module = $(LINK.$(TOOLSET)) -
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
-cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
+cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool /usr/bin/libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
@@ -2012,7 +2012,7 @@ def GenerateOutput(target_list, target_d
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
- elif flavor == 'freebsd':
+ elif flavor == 'freebsd' or flavor == 'dragonflybsd' or flavor == 'netbsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
| {
"repo_name": "nanobox-io/nanobox-pkgsrc-base",
"path": "nodejs7/patches/patch-tools_gyp_pylib_gyp_generator_make.py",
"copies": "16",
"size": "1181",
"license": "mit",
"hash": 3609048375879621000,
"line_mean": 46.24,
"line_max": 122,
"alpha_frac": 0.6375952583,
"autogenerated": false,
"ratio": 2.8595641646489103,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# $OpenBSD: blowfish.cL,v 1.18 2004/11/02 17:23:26 hshoexer Exp $ #/
#
# Blowfish block cipher for OpenBSD
# Copyright 1997 Niels Provos <provos@physnet.uni-hamburg.de>
# All rights reserved.
#
# Implementation advice by David Mazieres <dm@lcs.mit.edu>.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# noticeL, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# noticeL, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. All advertising materials mentioning features or use of this software
# must display the following acknowledgement:
# This product includes software developed by Niels Provos.
# 4. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USeL,
# DATaL, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWAReL, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This code is derived from section 14.3 and the given source
# in section V of Applied Cryptography, second edition.
# Blowfish is an unpatented fast block cipher designed by
# Bruce Schneier.
# Function for Feistel Networks
import array
BLF_N = 16 # Number of Subkeys
BLF_MAXKEYLEN = ((BLF_N-2)*4) # 448 bits
def encipher(ctx, xl, xr):
s = ctx.S
p = ctx.P
Xl = xl;
Xr = xr;
Xl ^= p[0];
Xr ^= (((s[((Xl>>24)&0xFF)] + s[0x100 + ((Xl>>16)&0xFF)]) ^ s[0x200 + ((Xl>> 8)&0xFF)]) + s[0x300 + (Xl&0xFF)]) ^ p[1]
Xl ^= (((s[((Xr>>24)&0xFF)] + s[0x100 + ((Xr>>16)&0xFF)]) ^ s[0x200 + ((Xr>> 8)&0xFF)]) + s[0x300 + (Xr&0xFF)]) ^ p[2]
Xr ^= (((s[((Xl>>24)&0xFF)] + s[0x100 + ((Xl>>16)&0xFF)]) ^ s[0x200 + ((Xl>> 8)&0xFF)]) + s[0x300 + (Xl&0xFF)]) ^ p[3]
Xl ^= (((s[((Xr>>24)&0xFF)] + s[0x100 + ((Xr>>16)&0xFF)]) ^ s[0x200 + ((Xr>> 8)&0xFF)]) + s[0x300 + (Xr&0xFF)]) ^ p[4]
Xr ^= (((s[((Xl>>24)&0xFF)] + s[0x100 + ((Xl>>16)&0xFF)]) ^ s[0x200 + ((Xl>> 8)&0xFF)]) + s[0x300 + (Xl&0xFF)]) ^ p[5]
Xl ^= (((s[((Xr>>24)&0xFF)] + s[0x100 + ((Xr>>16)&0xFF)]) ^ s[0x200 + ((Xr>> 8)&0xFF)]) + s[0x300 + (Xr&0xFF)]) ^ p[6]
Xr ^= (((s[((Xl>>24)&0xFF)] + s[0x100 + ((Xl>>16)&0xFF)]) ^ s[0x200 + ((Xl>> 8)&0xFF)]) + s[0x300 + (Xl&0xFF)]) ^ p[7]
Xl ^= (((s[((Xr>>24)&0xFF)] + s[0x100 + ((Xr>>16)&0xFF)]) ^ s[0x200 + ((Xr>> 8)&0xFF)]) + s[0x300 + (Xr&0xFF)]) ^ p[8]
Xr ^= (((s[((Xl>>24)&0xFF)] + s[0x100 + ((Xl>>16)&0xFF)]) ^ s[0x200 + ((Xl>> 8)&0xFF)]) + s[0x300 + (Xl&0xFF)]) ^ p[9]
Xl ^= (((s[((Xr>>24)&0xFF)] + s[0x100 + ((Xr>>16)&0xFF)]) ^ s[0x200 + ((Xr>> 8)&0xFF)]) + s[0x300 + (Xr&0xFF)]) ^ p[10]
Xr ^= (((s[((Xl>>24)&0xFF)] + s[0x100 + ((Xl>>16)&0xFF)]) ^ s[0x200 + ((Xl>> 8)&0xFF)]) + s[0x300 + (Xl&0xFF)]) ^ p[11]
Xl ^= (((s[((Xr>>24)&0xFF)] + s[0x100 + ((Xr>>16)&0xFF)]) ^ s[0x200 + ((Xr>> 8)&0xFF)]) + s[0x300 + (Xr&0xFF)]) ^ p[12]
Xr ^= (((s[((Xl>>24)&0xFF)] + s[0x100 + ((Xl>>16)&0xFF)]) ^ s[0x200 + ((Xl>> 8)&0xFF)]) + s[0x300 + (Xl&0xFF)]) ^ p[13]
Xl ^= (((s[((Xr>>24)&0xFF)] + s[0x100 + ((Xr>>16)&0xFF)]) ^ s[0x200 + ((Xr>> 8)&0xFF)]) + s[0x300 + (Xr&0xFF)]) ^ p[14]
Xr ^= (((s[((Xl>>24)&0xFF)] + s[0x100 + ((Xl>>16)&0xFF)]) ^ s[0x200 + ((Xl>> 8)&0xFF)]) + s[0x300 + (Xl&0xFF)]) ^ p[15]
Xl ^= (((s[((Xr>>24)&0xFF)] + s[0x100 + ((Xr>>16)&0xFF)]) ^ s[0x200 + ((Xr>> 8)&0xFF)]) + s[0x300 + (Xr&0xFF)]) ^ p[16]
return ((Xr ^ p[17]) & 0xFFFFFFFF, Xl & 0xFFFFFFFF)
def stream2word(data, current):
temp = 0x00000000L;
j = current
for _ in xrange(4):
if j >= len(data):
j = 0
temp = ((temp << 8) | data[j]) & 0xFFFFFFFF;
j+=1
return (temp, j);
def expand0state(ctx, key):
j = 0;
p = ctx.P
for i in xrange(BLF_N + 2):
# Extract 4 int8 to 1 int32 from keystream
temp, j = stream2word(key, j)
p[i] ^= temp
j = 0
datal = 0x0L
datar = 0x0L
for i in xrange(0, BLF_N + 2, 2):
datal, datar = encipher(ctx, datal, datar);
p[i + 0] = datal
p[i + 1] = datar
s = ctx.S
for i in xrange(0, 4*256, 2):
datal, datar = encipher(ctx, datal, datar);
s[i + 0] = datal
s[i + 1] = datar
def expandstate(ctx, data, key):
j = 0
p = ctx.P
for i in xrange(BLF_N + 2):
# Extract 4 int8 to 1 int32 from keystream
temp, j = stream2word(key, j);
p[i] ^= temp
j = 0
datal = 0x00000000
datar = 0x00000000
for i in xrange(0, BLF_N + 2L, 2):
d, j = stream2word(data, j);
datal ^= d
d, j = stream2word(data, j);
datar ^= d
datal, datar = encipher(ctx, datal, datar);
p[i + 0] = datal
p[i + 1] = datar
s = ctx.S
for i in xrange(0, 4*256, 2):
d,j = stream2word(data, j);
datal ^= d
d,j = stream2word(data, j);
datar ^= d
datal, datar = encipher(ctx, datal, datar);
s[i + 0] = datal
s[i + 1] = datar
def pybc_blf_enc(ctx, data, blocks):
for index in range(0, blocks*2, 2):
data[index], data[index+1] = encipher(ctx, data[index], data[index+1])
def initstate():
# P-box and S-box tables initialized with digits of Pi
class Ctx:
S = array.array('L', [
0xd1310ba6L, 0x98dfb5acL, 0x2ffd72dbL, 0xd01adfb7L,
0xb8e1afedL, 0x6a267e96L, 0xba7c9045L, 0xf12c7f99L,
0x24a19947L, 0xb3916cf7L, 0x0801f2e2L, 0x858efc16L,
0x636920d8L, 0x71574e69L, 0xa458fea3L, 0xf4933d7eL,
0x0d95748fL, 0x728eb658L, 0x718bcd58L, 0x82154aeeL,
0x7b54a41dL, 0xc25a59b5L, 0x9c30d539L, 0x2af26013L,
0xc5d1b023L, 0x286085f0L, 0xca417918L, 0xb8db38efL,
0x8e79dcb0L, 0x603a180eL, 0x6c9e0e8bL, 0xb01e8a3eL,
0xd71577c1L, 0xbd314b27L, 0x78af2fdaL, 0x55605c60L,
0xe65525f3L, 0xaa55ab94L, 0x57489862L, 0x63e81440L,
0x55ca396aL, 0x2aab10b6L, 0xb4cc5c34L, 0x1141e8ceL,
0xa15486afL, 0x7c72e993L, 0xb3ee1411L, 0x636fbc2aL,
0x2ba9c55dL, 0x741831f6L, 0xce5c3e16L, 0x9b87931eL,
0xafd6ba33L, 0x6c24cf5cL, 0x7a325381L, 0x28958677L,
0x3b8f4898L, 0x6b4bb9afL, 0xc4bfe81bL, 0x66282193L,
0x61d809ccL, 0xfb21a991L, 0x487cac60L, 0x5dec8032L,
0xef845d5dL, 0xe98575b1L, 0xdc262302L, 0xeb651b88L,
0x23893e81L, 0xd396acc5L, 0x0f6d6ff3L, 0x83f44239L,
0x2e0b4482L, 0xa4842004L, 0x69c8f04aL, 0x9e1f9b5eL,
0x21c66842L, 0xf6e96c9aL, 0x670c9c61L, 0xabd388f0L,
0x6a51a0d2L, 0xd8542f68L, 0x960fa728L, 0xab5133a3L,
0x6eef0b6cL, 0x137a3be4L, 0xba3bf050L, 0x7efb2a98L,
0xa1f1651dL, 0x39af0176L, 0x66ca593eL, 0x82430e88L,
0x8cee8619L, 0x456f9fb4L, 0x7d84a5c3L, 0x3b8b5ebeL,
0xe06f75d8L, 0x85c12073L, 0x401a449fL, 0x56c16aa6L,
0x4ed3aa62L, 0x363f7706L, 0x1bfedf72L, 0x429b023dL,
0x37d0d724L, 0xd00a1248L, 0xdb0fead3L, 0x49f1c09bL,
0x075372c9L, 0x80991b7bL, 0x25d479d8L, 0xf6e8def7L,
0xe3fe501aL, 0xb6794c3bL, 0x976ce0bdL, 0x04c006baL,
0xc1a94fb6L, 0x409f60c4L, 0x5e5c9ec2L, 0x196a2463L,
0x68fb6fafL, 0x3e6c53b5L, 0x1339b2ebL, 0x3b52ec6fL,
0x6dfc511fL, 0x9b30952cL, 0xcc814544L, 0xaf5ebd09L,
0xbee3d004L, 0xde334afdL, 0x660f2807L, 0x192e4bb3L,
0xc0cba857L, 0x45c8740fL, 0xd20b5f39L, 0xb9d3fbdbL,
0x5579c0bdL, 0x1a60320aL, 0xd6a100c6L, 0x402c7279L,
0x679f25feL, 0xfb1fa3ccL, 0x8ea5e9f8L, 0xdb3222f8L,
0x3c7516dfL, 0xfd616b15L, 0x2f501ec8L, 0xad0552abL,
0x323db5faL, 0xfd238760L, 0x53317b48L, 0x3e00df82L,
0x9e5c57bbL, 0xca6f8ca0L, 0x1a87562eL, 0xdf1769dbL,
0xd542a8f6L, 0x287effc3L, 0xac6732c6L, 0x8c4f5573L,
0x695b27b0L, 0xbbca58c8L, 0xe1ffa35dL, 0xb8f011a0L,
0x10fa3d98L, 0xfd2183b8L, 0x4afcb56cL, 0x2dd1d35bL,
0x9a53e479L, 0xb6f84565L, 0xd28e49bcL, 0x4bfb9790L,
0xe1ddf2daL, 0xa4cb7e33L, 0x62fb1341L, 0xcee4c6e8L,
0xef20cadaL, 0x36774c01L, 0xd07e9efeL, 0x2bf11fb4L,
0x95dbda4dL, 0xae909198L, 0xeaad8e71L, 0x6b93d5a0L,
0xd08ed1d0L, 0xafc725e0L, 0x8e3c5b2fL, 0x8e7594b7L,
0x8ff6e2fbL, 0xf2122b64L, 0x8888b812L, 0x900df01cL,
0x4fad5ea0L, 0x688fc31cL, 0xd1cff191L, 0xb3a8c1adL,
0x2f2f2218L, 0xbe0e1777L, 0xea752dfeL, 0x8b021fa1L,
0xe5a0cc0fL, 0xb56f74e8L, 0x18acf3d6L, 0xce89e299L,
0xb4a84fe0L, 0xfd13e0b7L, 0x7cc43b81L, 0xd2ada8d9L,
0x165fa266L, 0x80957705L, 0x93cc7314L, 0x211a1477L,
0xe6ad2065L, 0x77b5fa86L, 0xc75442f5L, 0xfb9d35cfL,
0xebcdaf0cL, 0x7b3e89a0L, 0xd6411bd3L, 0xae1e7e49L,
0x00250e2dL, 0x2071b35eL, 0x226800bbL, 0x57b8e0afL,
0x2464369bL, 0xf009b91eL, 0x5563911dL, 0x59dfa6aaL,
0x78c14389L, 0xd95a537fL, 0x207d5ba2L, 0x02e5b9c5L,
0x83260376L, 0x6295cfa9L, 0x11c81968L, 0x4e734a41L,
0xb3472dcaL, 0x7b14a94aL, 0x1b510052L, 0x9a532915L,
0xd60f573fL, 0xbc9bc6e4L, 0x2b60a476L, 0x81e67400L,
0x08ba6fb5L, 0x571be91fL, 0xf296ec6bL, 0x2a0dd915L,
0xb6636521L, 0xe7b9f9b6L, 0xff34052eL, 0xc5855664L,
0x53b02d5dL, 0xa99f8fa1L, 0x08ba4799L, 0x6e85076aL,
0x4b7a70e9L, 0xb5b32944L, 0xdb75092eL, 0xc4192623L,
0xad6ea6b0L, 0x49a7df7dL, 0x9cee60b8L, 0x8fedb266L,
0xecaa8c71L, 0x699a17ffL, 0x5664526cL, 0xc2b19ee1L,
0x193602a5L, 0x75094c29L, 0xa0591340L, 0xe4183a3eL,
0x3f54989aL, 0x5b429d65L, 0x6b8fe4d6L, 0x99f73fd6L,
0xa1d29c07L, 0xefe830f5L, 0x4d2d38e6L, 0xf0255dc1L,
0x4cdd2086L, 0x8470eb26L, 0x6382e9c6L, 0x021ecc5eL,
0x09686b3fL, 0x3ebaefc9L, 0x3c971814L, 0x6b6a70a1L,
0x687f3584L, 0x52a0e286L, 0xb79c5305L, 0xaa500737L,
0x3e07841cL, 0x7fdeae5cL, 0x8e7d44ecL, 0x5716f2b8L,
0xb03ada37L, 0xf0500c0dL, 0xf01c1f04L, 0x0200b3ffL,
0xae0cf51aL, 0x3cb574b2L, 0x25837a58L, 0xdc0921bdL,
0xd19113f9L, 0x7ca92ff6L, 0x94324773L, 0x22f54701L,
0x3ae5e581L, 0x37c2dadcL, 0xc8b57634L, 0x9af3dda7L,
0xa9446146L, 0x0fd0030eL, 0xecc8c73eL, 0xa4751e41L,
0xe238cd99L, 0x3bea0e2fL, 0x3280bba1L, 0x183eb331L,
0x4e548b38L, 0x4f6db908L, 0x6f420d03L, 0xf60a04bfL,
0x2cb81290L, 0x24977c79L, 0x5679b072L, 0xbcaf89afL,
0xde9a771fL, 0xd9930810L, 0xb38bae12L, 0xdccf3f2eL,
0x5512721fL, 0x2e6b7124L, 0x501adde6L, 0x9f84cd87L,
0x7a584718L, 0x7408da17L, 0xbc9f9abcL, 0xe94b7d8cL,
0xec7aec3aL, 0xdb851dfaL, 0x63094366L, 0xc464c3d2L,
0xef1c1847L, 0x3215d908L, 0xdd433b37L, 0x24c2ba16L,
0x12a14d43L, 0x2a65c451L, 0x50940002L, 0x133ae4ddL,
0x71dff89eL, 0x10314e55L, 0x81ac77d6L, 0x5f11199bL,
0x043556f1L, 0xd7a3c76bL, 0x3c11183bL, 0x5924a509L,
0xf28fe6edL, 0x97f1fbfaL, 0x9ebabf2cL, 0x1e153c6eL,
0x86e34570L, 0xeae96fb1L, 0x860e5e0aL, 0x5a3e2ab3L,
0x771fe71cL, 0x4e3d06faL, 0x2965dcb9L, 0x99e71d0fL,
0x803e89d6L, 0x5266c825L, 0x2e4cc978L, 0x9c10b36aL,
0xc6150ebaL, 0x94e2ea78L, 0xa5fc3c53L, 0x1e0a2df4L,
0xf2f74ea7L, 0x361d2b3dL, 0x1939260fL, 0x19c27960L,
0x5223a708L, 0xf71312b6L, 0xebadfe6eL, 0xeac31f66L,
0xe3bc4595L, 0xa67bc883L, 0xb17f37d1L, 0x018cff28L,
0xc332ddefL, 0xbe6c5aa5L, 0x65582185L, 0x68ab9802L,
0xeecea50fL, 0xdb2f953bL, 0x2aef7dadL, 0x5b6e2f84L,
0x1521b628L, 0x29076170L, 0xecdd4775L, 0x619f1510L,
0x13cca830L, 0xeb61bd96L, 0x0334fe1eL, 0xaa0363cfL,
0xb5735c90L, 0x4c70a239L, 0xd59e9e0bL, 0xcbaade14L,
0xeecc86bcL, 0x60622ca7L, 0x9cab5cabL, 0xb2f3846eL,
0x648b1eafL, 0x19bdf0caL, 0xa02369b9L, 0x655abb50L,
0x40685a32L, 0x3c2ab4b3L, 0x319ee9d5L, 0xc021b8f7L,
0x9b540b19L, 0x875fa099L, 0x95f7997eL, 0x623d7da8L,
0xf837889aL, 0x97e32d77L, 0x11ed935fL, 0x16681281L,
0x0e358829L, 0xc7e61fd6L, 0x96dedfa1L, 0x7858ba99L,
0x57f584a5L, 0x1b227263L, 0x9b83c3ffL, 0x1ac24696L,
0xcdb30aebL, 0x532e3054L, 0x8fd948e4L, 0x6dbc3128L,
0x58ebf2efL, 0x34c6ffeaL, 0xfe28ed61L, 0xee7c3c73L,
0x5d4a14d9L, 0xe864b7e3L, 0x42105d14L, 0x203e13e0L,
0x45eee2b6L, 0xa3aaabeaL, 0xdb6c4f15L, 0xfacb4fd0L,
0xc742f442L, 0xef6abbb5L, 0x654f3b1dL, 0x41cd2105L,
0xd81e799eL, 0x86854dc7L, 0xe44b476aL, 0x3d816250L,
0xcf62a1f2L, 0x5b8d2646L, 0xfc8883a0L, 0xc1c7b6a3L,
0x7f1524c3L, 0x69cb7492L, 0x47848a0bL, 0x5692b285L,
0x095bbf00L, 0xad19489dL, 0x1462b174L, 0x23820e00L,
0x58428d2aL, 0x0c55f5eaL, 0x1dadf43eL, 0x233f7061L,
0x3372f092L, 0x8d937e41L, 0xd65fecf1L, 0x6c223bdbL,
0x7cde3759L, 0xcbee7460L, 0x4085f2a7L, 0xce77326eL,
0xa6078084L, 0x19f8509eL, 0xe8efd855L, 0x61d99735L,
0xa969a7aaL, 0xc50c06c2L, 0x5a04abfcL, 0x800bcadcL,
0x9e447a2eL, 0xc3453484L, 0xfdd56705L, 0x0e1e9ec9L,
0xdb73dbd3L, 0x105588cdL, 0x675fda79L, 0xe3674340L,
0xc5c43465L, 0x713e38d8L, 0x3d28f89eL, 0xf16dff20L,
0x153e21e7L, 0x8fb03d4aL, 0xe6e39f2bL, 0xdb83adf7L,
0xe93d5a68L, 0x948140f7L, 0xf64c261cL, 0x94692934L,
0x411520f7L, 0x7602d4f7L, 0xbcf46b2eL, 0xd4a20068L,
0xd4082471L, 0x3320f46aL, 0x43b7d4b7L, 0x500061afL,
0x1e39f62eL, 0x97244546L, 0x14214f74L, 0xbf8b8840L,
0x4d95fc1dL, 0x96b591afL, 0x70f4ddd3L, 0x66a02f45L,
0xbfbc09ecL, 0x03bd9785L, 0x7fac6dd0L, 0x31cb8504L,
0x96eb27b3L, 0x55fd3941L, 0xda2547e6L, 0xabca0a9aL,
0x28507825L, 0x530429f4L, 0x0a2c86daL, 0xe9b66dfbL,
0x68dc1462L, 0xd7486900L, 0x680ec0a4L, 0x27a18deeL,
0x4f3ffea2L, 0xe887ad8cL, 0xb58ce006L, 0x7af4d6b6L,
0xaace1e7cL, 0xd3375fecL, 0xce78a399L, 0x406b2a42L,
0x20fe9e35L, 0xd9f385b9L, 0xee39d7abL, 0x3b124e8bL,
0x1dc9faf7L, 0x4b6d1856L, 0x26a36631L, 0xeae397b2L,
0x3a6efa74L, 0xdd5b4332L, 0x6841e7f7L, 0xca7820fbL,
0xfb0af54eL, 0xd8feb397L, 0x454056acL, 0xba489527L,
0x55533a3aL, 0x20838d87L, 0xfe6ba9b7L, 0xd096954bL,
0x55a867bcL, 0xa1159a58L, 0xcca92963L, 0x99e1db33L,
0xa62a4a56L, 0x3f3125f9L, 0x5ef47e1cL, 0x9029317cL,
0xfdf8e802L, 0x04272f70L, 0x80bb155cL, 0x05282ce3L,
0x95c11548L, 0xe4c66d22L, 0x48c1133fL, 0xc70f86dcL,
0x07f9c9eeL, 0x41041f0fL, 0x404779a4L, 0x5d886e17L,
0x325f51ebL, 0xd59bc0d1L, 0xf2bcc18fL, 0x41113564L,
0x257b7834L, 0x602a9c60L, 0xdff8e8a3L, 0x1f636c1bL,
0x0e12b4c2L, 0x02e1329eL, 0xaf664fd1L, 0xcad18115L,
0x6b2395e0L, 0x333e92e1L, 0x3b240b62L, 0xeebeb922L,
0x85b2a20eL, 0xe6ba0d99L, 0xde720c8cL, 0x2da2f728L,
0xd0127845L, 0x95b794fdL, 0x647d0862L, 0xe7ccf5f0L,
0x5449a36fL, 0x877d48faL, 0xc39dfd27L, 0xf33e8d1eL,
0x0a476341L, 0x992eff74L, 0x3a6f6eabL, 0xf4f8fd37L,
0xa812dc60L, 0xa1ebddf8L, 0x991be14cL, 0xdb6e6b0dL,
0xc67b5510L, 0x6d672c37L, 0x2765d43bL, 0xdcd0e804L,
0xf1290dc7L, 0xcc00ffa3L, 0xb5390f92L, 0x690fed0bL,
0x667b9ffbL, 0xcedb7d9cL, 0xa091cf0bL, 0xd9155ea3L,
0xbb132f88L, 0x515bad24L, 0x7b9479bfL, 0x763bd6ebL,
0x37392eb3L, 0xcc115979L, 0x8026e297L, 0xf42e312dL,
0x6842ada7L, 0xc66a2b3bL, 0x12754cccL, 0x782ef11cL,
0x6a124237L, 0xb79251e7L, 0x06a1bbe6L, 0x4bfb6350L,
0x1a6b1018L, 0x11caedfaL, 0x3d25bdd8L, 0xe2e1c3c9L,
0x44421659L, 0x0a121386L, 0xd90cec6eL, 0xd5abea2aL,
0x64af674eL, 0xda86a85fL, 0xbebfe988L, 0x64e4c3feL,
0x9dbc8057L, 0xf0f7c086L, 0x60787bf8L, 0x6003604dL,
0xd1fd8346L, 0xf6381fb0L, 0x7745ae04L, 0xd736fcccL,
0x83426b33L, 0xf01eab71L, 0xb0804187L, 0x3c005e5fL,
0x77a057beL, 0xbde8ae24L, 0x55464299L, 0xbf582e61L,
0x4e58f48fL, 0xf2ddfda2L, 0xf474ef38L, 0x8789bdc2L,
0x5366f9c3L, 0xc8b38e74L, 0xb475f255L, 0x46fcd9b9L,
0x7aeb2661L, 0x8b1ddf84L, 0x846a0e79L, 0x915f95e2L,
0x466e598eL, 0x20b45770L, 0x8cd55591L, 0xc902de4cL,
0xb90bace1L, 0xbb8205d0L, 0x11a86248L, 0x7574a99eL,
0xb77f19b6L, 0xe0a9dc09L, 0x662d09a1L, 0xc4324633L,
0xe85a1f02L, 0x09f0be8cL, 0x4a99a025L, 0x1d6efe10L,
0x1ab93d1dL, 0x0ba5a4dfL, 0xa186f20fL, 0x2868f169L,
0xdcb7da83L, 0x573906feL, 0xa1e2ce9bL, 0x4fcd7f52L,
0x50115e01L, 0xa70683faL, 0xa002b5c4L, 0x0de6d027L,
0x9af88c27L, 0x773f8641L, 0xc3604c06L, 0x61a806b5L,
0xf0177a28L, 0xc0f586e0L, 0x006058aaL, 0x30dc7d62L,
0x11e69ed7L, 0x2338ea63L, 0x53c2dd94L, 0xc2c21634L,
0xbbcbee56L, 0x90bcb6deL, 0xebfc7da1L, 0xce591d76L,
0x6f05e409L, 0x4b7c0188L, 0x39720a3dL, 0x7c927c24L,
0x86e3725fL, 0x724d9db9L, 0x1ac15bb4L, 0xd39eb8fcL,
0xed545578L, 0x08fca5b5L, 0xd83d7cd3L, 0x4dad0fc4L,
0x1e50ef5eL, 0xb161e6f8L, 0xa28514d9L, 0x6c51133cL,
0x6fd5c7e7L, 0x56e14ec4L, 0x362abfceL, 0xddc6c837L,
0xd79a3234L, 0x92638212L, 0x670efa8eL, 0x406000e0L,
0x3a39ce37L, 0xd3faf5cfL, 0xabc27737L, 0x5ac52d1bL,
0x5cb0679eL, 0x4fa33742L, 0xd3822740L, 0x99bc9bbeL,
0xd5118e9dL, 0xbf0f7315L, 0xd62d1c7eL, 0xc700c47bL,
0xb78c1b6bL, 0x21a19045L, 0xb26eb1beL, 0x6a366eb4L,
0x5748ab2fL, 0xbc946e79L, 0xc6a376d2L, 0x6549c2c8L,
0x530ff8eeL, 0x468dde7dL, 0xd5730a1dL, 0x4cd04dc6L,
0x2939bbdbL, 0xa9ba4650L, 0xac9526e8L, 0xbe5ee304L,
0xa1fad5f0L, 0x6a2d519aL, 0x63ef8ce2L, 0x9a86ee22L,
0xc089c2b8L, 0x43242ef6L, 0xa51e03aaL, 0x9cf2d0a4L,
0x83c061baL, 0x9be96a4dL, 0x8fe51550L, 0xba645bd6L,
0x2826a2f9L, 0xa73a3ae1L, 0x4ba99586L, 0xef5562e9L,
0xc72fefd3L, 0xf752f7daL, 0x3f046f69L, 0x77fa0a59L,
0x80e4a915L, 0x87b08601L, 0x9b09e6adL, 0x3b3ee593L,
0xe990fd5aL, 0x9e34d797L, 0x2cf0b7d9L, 0x022b8b51L,
0x96d5ac3aL, 0x017da67dL, 0xd1cf3ed6L, 0x7c7d2d28L,
0x1f9f25cfL, 0xadf2b89bL, 0x5ad6b472L, 0x5a88f54cL,
0xe029ac71L, 0xe019a5e6L, 0x47b0acfdL, 0xed93fa9bL,
0xe8d3c48dL, 0x283b57ccL, 0xf8d56629L, 0x79132e28L,
0x785f0191L, 0xed756055L, 0xf7960e44L, 0xe3d35e8cL,
0x15056dd4L, 0x88f46dbaL, 0x03a16125L, 0x0564f0bdL,
0xc3eb9e15L, 0x3c9057a2L, 0x97271aecL, 0xa93a072aL,
0x1b3f6d9bL, 0x1e6321f5L, 0xf59c66fbL, 0x26dcf319L,
0x7533d928L, 0xb155fdf5L, 0x03563482L, 0x8aba3cbbL,
0x28517711L, 0xc20ad9f8L, 0xabcc5167L, 0xccad925fL,
0x4de81751L, 0x3830dc8eL, 0x379d5862L, 0x9320f991L,
0xea7a90c2L, 0xfb3e7bceL, 0x5121ce64L, 0x774fbe32L,
0xa8b6e37eL, 0xc3293d46L, 0x48de5369L, 0x6413e680L,
0xa2ae0810L, 0xdd6db224L, 0x69852dfdL, 0x09072166L,
0xb39a460aL, 0x6445c0ddL, 0x586cdecfL, 0x1c20c8aeL,
0x5bbef7ddL, 0x1b588d40L, 0xccd2017fL, 0x6bb4e3bbL,
0xdda26a7eL, 0x3a59ff45L, 0x3e350a44L, 0xbcb4cdd5L,
0x72eacea8L, 0xfa6484bbL, 0x8d6612aeL, 0xbf3c6f47L,
0xd29be463L, 0x542f5d9eL, 0xaec2771bL, 0xf64e6370L,
0x740e0d8dL, 0xe75b1357L, 0xf8721671L, 0xaf537d5dL,
0x4040cb08L, 0x4eb4e2ccL, 0x34d2466aL, 0x0115af84L,
0xe1b00428L, 0x95983a1dL, 0x06b89fb4L, 0xce6ea048L,
0x6f3f3b82L, 0x3520ab82L, 0x011a1d4bL, 0x277227f8L,
0x611560b1L, 0xe7933fdcL, 0xbb3a792bL, 0x344525bdL,
0xa08839e1L, 0x51ce794bL, 0x2f32c9b7L, 0xa01fbac9L,
0xe01cc87eL, 0xbcc7d1f6L, 0xcf0111c3L, 0xa1e8aac7L,
0x1a908749L, 0xd44fbd9aL, 0xd0dadecbL, 0xd50ada38L,
0x0339c32aL, 0xc6913667L, 0x8df9317cL, 0xe0b12b4fL,
0xf79e59b7L, 0x43f5bb3aL, 0xf2d519ffL, 0x27d9459cL,
0xbf97222cL, 0x15e6fc2aL, 0x0f91fc71L, 0x9b941525L,
0xfae59361L, 0xceb69cebL, 0xc2a86459L, 0x12baa8d1L,
0xb6c1075eL, 0xe3056a0cL, 0x10d25065L, 0xcb03a442L,
0xe0ec6e0eL, 0x1698db3bL, 0x4c98a0beL, 0x3278e964L,
0x9f1f9532L, 0xe0d392dfL, 0xd3a0342bL, 0x8971f21eL,
0x1b0a7441L, 0x4ba3348cL, 0xc5be7120L, 0xc37632d8L,
0xdf359f8dL, 0x9b992f2eL, 0xe60b6f47L, 0x0fe3f11dL,
0xe54cda54L, 0x1edad891L, 0xce6279cfL, 0xcd3e7e6fL,
0x1618b166L, 0xfd2c1d05L, 0x848fd2c5L, 0xf6fb2299L,
0xf523f357L, 0xa6327623L, 0x93a83531L, 0x56cccd02L,
0xacf08162L, 0x5a75ebb5L, 0x6e163697L, 0x88d273ccL,
0xde966292L, 0x81b949d0L, 0x4c50901bL, 0x71c65614L,
0xe6c6c7bdL, 0x327a140aL, 0x45e1d006L, 0xc3f27b9aL,
0xc9aa53fdL, 0x62a80f00L, 0xbb25bfe2L, 0x35bdd2f6L,
0x71126905L, 0xb2040222L, 0xb6cbcf7cL, 0xcd769c2bL,
0x53113ec0L, 0x1640e3d3L, 0x38abbd60L, 0x2547adf0L,
0xba38209cL, 0xf746ce76L, 0x77afa1c5L, 0x20756060L,
0x85cbfe4eL, 0x8ae88dd8L, 0x7aaaf9b0L, 0x4cf9aa7eL,
0x1948c25cL, 0x02fb8a8cL, 0x01c36ae4L, 0xd6ebe1f9L,
0x90d4f869L, 0xa65cdea0L, 0x3f09252dL, 0xc208e69fL,
0xb74e6132L, 0xce77e25bL, 0x578fdfe3L, 0x3ac372e6L])
P = array.array('L', [
0x243f6a88L, 0x85a308d3L, 0x13198a2eL, 0x03707344L,
0xa4093822L, 0x299f31d0L, 0x082efa98L, 0xec4e6c89L,
0x452821e6L, 0x38d01377L, 0xbe5466cfL, 0x34e90c6cL,
0xc0ac29b7L, 0xc97c50ddL, 0x3f84d5b5L, 0xb5470917L,
0x9216d5d9L, 0x8979fb1bL])
return Ctx()
| {
"repo_name": "haldun/tornado-gae-auth",
"path": "auth/blowfish.py",
"copies": "3",
"size": "22944",
"license": "mit",
"hash": -5005261537123454000,
"line_mean": 53.3696682464,
"line_max": 123,
"alpha_frac": 0.6522402371,
"autogenerated": false,
"ratio": 1.9854620976116304,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9111350848268249,
"avg_score": 0.005270297288676073,
"num_lines": 422
} |
# $ProjectHeader: sexprmodule 0.2.1 Wed, 05 Apr 2000 23:33:53 -0600 nas $
# originally from: http://arctrix.com/nas/python/
# modified to understand \-escaped quotes instead of "" escaping - pj 20060809
# IPC: taken from http://code.google.com/p/mhi/source/browse/sexpr.py 11 Jul 2011
import string
import StringIO
from StringIO import StringIO
# tokens
[T_EOF, T_ERROR, T_SYMBOL, T_STRING,
T_INTEGER, T_FLOAT, T_OPEN, T_CLOSE] = range(8)
# states
[S_START, S_SYMBOL, S_STRING, S_NUMBER] = range(4)
SexprError = 'SexprError'
def parse(expr):
return SexprParser(StringIO(expr)).parse()
class SexprParser:
def __init__(self, input):
self.line_no = 1
self.input = input
self.char = None
def getc(self):
if self.char is None:
c = self.input.read(1)
if c == '\n':
self.line_no = self.line_no + 1
return c
else:
t = self.char
self.char = None
return t
def ungetc(self, c):
self.char = c
def convert_number(self, token):
try:
i = string.atoi(token)
return (T_INTEGER, i)
except ValueError:
try:
f = string.atof(token)
return (T_FLOAT, f)
except ValueError:
return (T_ERROR, '%d: invalid number "%s"' % (self.line_no, token))
def get_token(self):
token = []
state = S_START
while 1:
c = self.getc()
if state == S_START:
# EOF
if not c:
return (T_EOF, None)
# whitespace
elif c in ' \t\n':
continue
# comments
elif c == ';':
while c and (c != '\n'):
c = self.getc()
elif c == '(':
return (T_OPEN, None)
elif c == ')':
return (T_CLOSE, None)
elif c == '"':
state = S_STRING
elif c in '-0123456789.':
state = S_NUMBER
token.append(c)
else:
state = S_SYMBOL
token.append(c)
elif state == S_SYMBOL:
if not c:
return (T_SYMBOL, string.join(token, ''))
if c in ' \t\n;()':
self.ungetc(c)
return (T_SYMBOL, string.join(token, ''))
else:
token.append(c)
elif state == S_STRING:
if not c:
return (T_ERROR, '%d: unexpected EOF inside string' % self.line_no)
elif c == '\\':
c = self.getc()
if c == '"':
token.append('"')
else:
self.ungetc(c)
token.append('\\')
elif c == '"':
return (T_STRING, string.join(token, ''))
else:
token.append(c)
elif state == S_NUMBER:
if not c:
return self.convert_number(string.join(token, ''))
if c in ' \t\n;()':
self.ungetc(c)
return self.convert_number(string.join(token, ''))
elif c in '0123456789.eE-':
token.append(c)
else:
return (T_ERROR, '%d: invalid character "%s" while reading integer'
% (self.line_no, c))
def parse(self, t=None):
if not t:
(t, v) = self.get_token()
if t == T_OPEN:
l = []
while 1:
(t, v) = self.get_token()
if t == T_CLOSE:
return l
elif t == T_OPEN:
v = self.parse(t)
if v == None:
raise SexprError, '%d: unexpected EOF' % self.line_no
elif t == T_ERROR:
raise SexprError, v
elif t == T_EOF:
raise SexprError, '%d: EOF while inside list' % self.line_no
l.append(v)
elif t == T_CLOSE:
raise SexprError, '%d: unexpected )' % self.line_no
elif t == T_EOF:
return None
elif t == T_ERROR:
raise SexprError, v
else:
return v
if __name__ == '__main__':
import sys
#import profile
p = SexprParser(sys.stdin)
#profile.run('p.parse()')
while 1:
e = p.parse()
print e
if not e:
break
| {
"repo_name": "michaelpnash/sublime-ensime",
"path": "sexpr_parser.py",
"copies": "3",
"size": "3915",
"license": "mit",
"hash": -8373413557972216000,
"line_mean": 25.4527027027,
"line_max": 82,
"alpha_frac": 0.5095785441,
"autogenerated": false,
"ratio": 3.3808290155440415,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.023713469403583966,
"num_lines": 148
} |
#!$PYTHON$
# -*- coding: utf-8 -*-
from django import template
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.http import HttpResponse
import os
import socket
THISDIR = os.path.abspath(os.path.dirname(__file__))
TEMPLATE_DIR = os.path.join(THISDIR, '$TEMPLATEDIR$')
MEDIA_DIR = os.path.join(THISDIR, '$MEDIADIR$')
STATIC_DIR = os.path.join(THISDIR, '$STATICDIR$')
#==============================================================================
# Views
#==============================================================================
def index(request):
context = template.RequestContext(request, {
'templates': get_templates(),
})
tpl = template.Template("""<html>
<head>
<title>Django Template Server ($VERSION$)</title>
</head>
<body>
<h1>Select a template</h1>
{% for url,name in templates %}
<a href="{{ url }}">{{ name }}</a>{% if not forloop.last %}<br />{% endif %}
{% endfor %}
</body>
</html>""")
return HttpResponse(tpl.render(context))
#==============================================================================
# URL Patterns
#==============================================================================
urlpatterns = patterns('',
url('^$', index),
url('^show/(?P<template>.+)', 'django.views.generic.simple.direct_to_template', name='show'),
url('^media/(?P<path>.+)', 'django.views.static.serve', {'document_root': MEDIA_DIR}),
url('^static/(?P<path>.+)', 'django.views.static.serve', {'document_root': STATIC_DIR}),
)
#==============================================================================
# Helpers
#==============================================================================
def get_templates():
for root, _, files in os.walk(TEMPLATE_DIR):
for filename in files:
template_name = os.path.normpath(os.path.join(os.path.relpath(root, TEMPLATE_DIR), filename))
url = reverse('show', args=(template_name,))
yield url, template_name
#==============================================================================
# Runner
#==============================================================================
def get_open_port():
port = 8000
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('localhost', port))
except socket.error:
port += 1
else:
break
finally:
s.close()
return port
def run(public=True, port=None):
settings.configure(
ROOT_URLCONF='runserver',
DEBUG=True,
TEMPLATE_DEBUG=True,
TEMPLATE_DIRS=[TEMPLATE_DIR],
APPEND_SLASH=False,
STATIC_ROOT=STATIC_DIR,
MEDIA_ROOT=MEDIA_DIR,
STATIC_URL='/static/',
MEDIA_URL='/media/',
)
port = port or get_open_port()
if public:
location = '0.0.0.0:%s' % port
else:
location = '127.0.0.1:%s' % port
call_command('runserver', location)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--local', action='store_false', dest='public',
help='Make server local.')
parser.add_argument('port', default=0, type=int, nargs='?')
args = parser.parse_args()
run(args.public, args.port) | {
"repo_name": "ojii/django-template-server",
"path": "templateserver/runserver_template.py",
"copies": "1",
"size": "3442",
"license": "bsd-3-clause",
"hash": 5663601619457517000,
"line_mean": 31.4811320755,
"line_max": 105,
"alpha_frac": 0.4962231261,
"autogenerated": false,
"ratio": 4.249382716049383,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5245605842149383,
"avg_score": null,
"num_lines": null
} |
# $ python py/tipsy.py filename [pmax]
import sys
import numpy as np
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
# import re
import struct
def read_position_tipsy(filename):
# open the file
fin = open(filename, "rb")
size = len(fin.read())
fin.seek(0)
# read header (28 = 8 + 4 * 5, additional padding of 4 bytes is introduced)
little_endian = True
time, nbodies, ndim, nsph, ndark, nstar = struct.unpack("<diiiii", fin.read(28))
if (ndim < 1 or ndim > 3):
little_endian = False
fin.seek(0)
time, nbodies, ndim, nsph, ndark, nstar = struct.unpack(">diiiii", fin.read(28))
# remove padding of 4 bytes
fin.read(4)
# this function reads dark matter particles only
# skip gas particles (12 floats = 48 bytes)
fin.read(48 * nsph)
# arrays for dark matter particles
px = [0] * ndark
py = [0] * ndark
pz = [0] * ndark
# read dark matter particles (9 floats = 36 bytes)
if ndark > 0:
for ii in range(ndark):
if little_endian:
mass, x, y, z, vx, vy, vz, eps, phi = struct.unpack("<fffffffff", fin.read(36))
else:
mass, x, y, z, vx, vy, vz, eps, phi = struct.unpack(">fffffffff", fin.read(36))
px[ii] = x
py[ii] = y
pz[ii] = z
return (px, py, pz)
def locate_panels(ax, nx, ny, share_xaxis, share_yaxis):
margin = 0.12
if (share_xaxis == False) or (share_yaxis == False):
margin = 0.15
xmin, xmax = margin, 1.0 - margin
ymin, ymax = margin, 1.0 - margin
xbin = (xmax - xmin) / nx
ybin = (ymax - ymin) / ny
xmargin, ymargin = 0, 0
if share_yaxis == False:
xmin = 0.0
xbin = 1.0 / nx
xmargin = xbin * margin
if share_xaxis == False:
ymin = 0.0
ybin = 1.0 / ny
ymargin = ybin * margin
for ii in range(nx):
xl = xmin + ii * xbin + xmargin
for jj in range(ny):
yl = ymin + jj * ybin + ymargin
kk = ii * ny + jj
ax[kk] = fig.add_axes((xl, yl, xbin - 2 * xmargin, ybin - 2 * ymargin))
if share_xaxis == True:
ax[kk].tick_params(labelbottom = "off")
if jj == 0:
ax[kk].tick_params(labelbottom = "on")
if share_yaxis == True:
ax[kk].tick_params(labelleft = "off")
if ii == 0:
ax[kk].tick_params(labelleft = "on")
# obtain input argument(s)
argv = sys.argv
argc = len(argv)
filename = argv[1]
set_range = True
if argc == 3:
set_range = False
pmax = float(argv[2])
# embed fonts
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
# set font size
plt.rcParams['font.size'] = 14
# set number of panels
nxpanel, nypanel = 2, 2
ax = [0] * nxpanel * nypanel
# set figure size and its aspect ratio
Lx = 6
if nxpanel > 2 * nypanel:
Lx *= 2
Ly = (Lx / nxpanel) * nypanel
fig = plt.figure(figsize = (Lx, Ly))
# set location of panels
locate_panels(ax, nxpanel, nypanel, True, True)
# get number of components and particles
input = "doc/" + filename + ".summary.txt"
fin = open(input, "r")
lines = fin.readlines()
fin.close()
idx = lines[1].find("\t")
kind = int(lines[1][0:idx])# number of components
# get partition
num = [0] * kind
head = [0] * kind
head[0] = 0
for ii in range(kind):
idx = lines[2 + ii].find("\n")
num[ii] = int(lines[2 + ii][0:idx])
if ii != 0:
head[ii] = head[ii - 1] + num[ii - 1]
# read particle data
px, py, pz = read_position_tipsy("dat/" + filename + ".tipsy")
# set plot range
if set_range == True:
pmax = max(np.abs(px))
ymax = max(np.abs(py))
zmax = max(np.abs(pz))
if pmax < ymax:
pmax = ymax
if pmax < zmax:
pmax = zmax
# sparse sampling if necessary
skip = 1
nmax = 1048576
if len(px) > nmax:
skip = int(np.ceil(len(px) / nmax))
# set colors of dots
col = [0] * kind
for ii in range(kind):
if ii % 6 == 0:
col[ii] = "black"
if ii % 6 == 1:
col[ii] = "red"
if ii % 6 == 2:
col[ii] = "blue"
if ii % 6 == 3:
col[ii] = "magenta"
if ii % 6 == 4:
col[ii] = "green"
if ii % 6 == 5:
col[ii] = "brown"
# plot particle distribution
for ii in range(nxpanel):
for jj in range(nypanel):
idx = ii * nypanel + jj
if (idx != (nxpanel * nypanel - 1)):
# ii = 0, jj = 0: xy-plot
# ii = 0, jj = 1: xz-plot
# ii = 1, jj = 0: zy-plot
# ii = 1, jj = 1: blank
if ii == 0:
xx = px
if ii == 1:
xx = pz
ax[idx].tick_params(labelleft = False)
if jj == 0:
yy = py
if jj == 1:
yy = pz
ax[idx].tick_params(labelbottom = False)
# plot the data
for kk in range(kind):
ax[idx].plot(xx[head[kk]:(head[kk]+num[kk]):skip], yy[head[kk]:(head[kk]+num[kk]):skip], ",", color = col[kk])
# set plot range
ax[idx].set_xlim([-pmax, pmax])
ax[idx].set_ylim([-pmax, pmax])
# ax[idx].grid()
ax[idx].tick_params(axis = "both", direction = "in", color = "black", bottom = "on", top = "on", left = "on", right = "on")
# set label
if (ii == 0) and (jj == 0):
ax[idx].set_xlabel(r"$x$")
ax[idx].set_ylabel(r"$y$")
if (ii == 0) and (jj == 1):
ax[idx].set_ylabel(r"$z$")
if (ii == 1) and (jj == 0):
ax[idx].set_xlabel(r"$z$")
else:
# remove box at the upper right corner
ax[idx].spines["right"].set_color("none")
ax[idx].spines["top"].set_color("none")
ax[idx].tick_params(labelbottom = False, labelleft = False, bottom = False, left = False, right = False, top = False)
# output the figure
plt.savefig("dot.png", format = "png", dpi = 300, bbox_inches = "tight")
| {
"repo_name": "FDPS/FDPS",
"path": "sample/c++/nbody+sph/magi_data/py/tipsy.py",
"copies": "3",
"size": "6144",
"license": "mit",
"hash": 4768125086444360000,
"line_mean": 26.4285714286,
"line_max": 135,
"alpha_frac": 0.5180664062,
"autogenerated": false,
"ratio": 3.0858864892014064,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5103952895401406,
"avg_score": null,
"num_lines": null
} |
"""$ rio blocks"""
import json
import logging
import os.path
import click
import cligj
import rasterio
from rasterio.rio import options
from rasterio.rio.helpers import write_features
from rasterio.warp import transform_bounds
logger = logging.getLogger('rio')
class _Collection(object):
"""For use with `rasterio.rio.helpers.write_features()`."""
def __init__(self, src, bidx, precision=6, geographic=True):
"""Export raster dataset windows to GeoJSON polygon features.
Parameters
----------
src : RasterReader
An open datasource.
bidx : int
Extract windows from this band.
precision : int, optional
Coordinate precision.
geographic : bool, optional
Reproject geometries to ``EPSG:4326`` if ``True``.
Yields
------
dict
GeoJSON polygon feature.
"""
self._src = src
self._bidx = bidx
self._precision = precision
self._geographic = geographic
def _normalize_bounds(self, bounds):
if self._geographic:
bounds = transform_bounds(self._src.crs, 'EPSG:4326', *bounds)
if self._precision >= 0:
bounds = (round(v, self._precision) for v in bounds)
return bounds
@property
def bbox(self):
return tuple(self._normalize_bounds(self._src.bounds))
def __call__(self):
gen = self._src.block_windows(bidx=self._bidx)
for idx, (block, window) in enumerate(gen):
bounds = self._normalize_bounds(self._src.window_bounds(window))
xmin, ymin, xmax, ymax = bounds
yield {
'type': 'Feature',
'id': '{0}:{1}'.format(os.path.basename(self._src.name), idx),
'properties': {
'block': json.dumps(block),
'window': window.todict(),
},
'geometry': {
'type': 'Polygon',
'coordinates': [[
(xmin, ymin),
(xmin, ymax),
(xmax, ymax),
(xmax, ymin)
]]
}
}
@click.command()
@options.file_in_arg
@options.output_opt
@cligj.precision_opt
@cligj.indent_opt
@cligj.compact_opt
@cligj.projection_projected_opt
@cligj.sequence_opt
@cligj.use_rs_opt
@click.option(
'--bidx', type=click.INT, default=0,
help="Index of the band that is the source of shapes.")
@click.pass_context
def blocks(
ctx, input, output, precision, indent, compact, projection, sequence,
use_rs, bidx):
"""Write dataset blocks as GeoJSON features.
This command writes features describing a raster's internal blocks, which
are used directly for raster I/O. These features can be used to visualize
how a windowed operation would operate using those blocks.
Output features have two JSON encoded properties: block and window. Block
is a two element array like '[0, 0]' describing the window's position
in the input band's window layout. Window is a two element array
containing two more two element arrays like '[[0, 256], [0, 256]' and
describes the range of pixels the window covers in the input band. Values
are JSON encoded for better interoperability.
Block windows are extracted from the dataset (all bands must have matching
block windows) by default, or from the band specified using the '--bidx
option:
\b
$ rio shapes --bidx 3 tests/data/RGB.byte.tif
By default a GeoJSON 'FeatureCollection' is written, but the --sequence'
option produces a GeoJSON feature stream instead.
\b
$ rio shapes tests/data/RGB.byte.tif --sequence
Output features are reprojected to 'WGS84' unless the '--projected' flag is
provided, which causes the output to be kept in the input datasource's
coordinate reference system.
For more information on exactly what blocks and windows represent, see
'src.block_windows()'.
"""
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
stdout = click.open_file(
output, 'w') if output else click.get_text_stream('stdout')
with ctx.obj['env'], rasterio.open(input) as src:
if bidx and bidx not in src.indexes:
raise click.BadParameter("Not a valid band index")
collection = _Collection(
src=src,
bidx=bidx,
precision=precision,
geographic=projection != 'projected')
write_features(
stdout, collection,
sequence=sequence,
geojson_type='feature' if sequence else 'collection',
use_rs=use_rs,
**dump_kwds)
| {
"repo_name": "brendan-ward/rasterio",
"path": "rasterio/rio/blocks.py",
"copies": "1",
"size": "4897",
"license": "bsd-3-clause",
"hash": 5315154993108301000,
"line_mean": 29.2283950617,
"line_max": 79,
"alpha_frac": 0.5964876455,
"autogenerated": false,
"ratio": 4.19263698630137,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 162
} |
"""$ rio calc"""
from collections import OrderedDict
from distutils.version import LooseVersion
import click
from cligj import files_inout_arg
import snuggs
import rasterio
from rasterio.features import sieve
from rasterio.fill import fillnodata
from rasterio.rio import options
from rasterio.rio.helpers import resolve_inout
def get_bands(inputs, d, i=None):
"""Get a rasterio.Band object from calc's inputs"""
path = inputs[d] if d in dict(inputs) else inputs[int(d) - 1][1]
src = rasterio.open(path)
return (rasterio.band(src, i) if i else
[rasterio.band(src, j) for j in src.indexes])
def read_array(ix, subix=None, dtype=None):
"""Change the type of a read array"""
arr = snuggs._ctx.lookup(ix, subix)
if dtype:
arr = arr.astype(dtype)
return arr
@click.command(short_help="Raster data calculator.")
@click.argument('command')
@files_inout_arg
@options.output_opt
@click.option('--name', multiple=True,
help='Specify an input file with a unique short (alphas only) '
'name for use in commands like '
'"a=tests/data/RGB.byte.tif".')
@options.dtype_opt
@options.masked_opt
@options.force_overwrite_opt
@options.creation_options
@click.pass_context
def calc(ctx, command, files, output, name, dtype, masked, force_overwrite,
creation_options):
"""A raster data calculator
Evaluates an expression using input datasets and writes the result
to a new dataset.
Command syntax is lisp-like. An expression consists of an operator
or function name and one or more strings, numbers, or expressions
enclosed in parentheses. Functions include ``read`` (gets a raster
array) and ``asarray`` (makes a 3-D array from 2-D arrays).
\b
* (read i) evaluates to the i-th input dataset (a 3-D array).
* (read i j) evaluates to the j-th band of the i-th dataset (a 2-D
array).
* (take foo j) evaluates to the j-th band of a dataset named foo (see
help on the --name option above).
* Standard numpy array operators (+, -, *, /) are available.
* When the final result is a list of arrays, a multi band output
file is written.
* When the final result is a single array, a single band output
file is written.
Example:
\b
$ rio calc "(+ 2 (* 0.95 (read 1)))" tests/data/RGB.byte.tif \\
> /tmp/out.tif
Produces a 3-band GeoTIFF with all values scaled by 0.95 and
incremented by 2.
\b
$ rio calc "(asarray (+ 125 (read 1)) (read 1) (read 1))" \\
> tests/data/shade.tif /tmp/out.tif
Produces a 3-band RGB GeoTIFF, with red levels incremented by 125,
from the single-band input.
"""
import numpy as np
try:
with ctx.obj['env']:
output, files = resolve_inout(files=files, output=output,
force_overwrite=force_overwrite)
inputs = ([tuple(n.split('=')) for n in name] +
[(None, n) for n in files])
with rasterio.open(inputs[0][1]) as first:
kwargs = first.meta
kwargs.update(**creation_options)
dtype = dtype or first.meta['dtype']
kwargs['dtype'] = dtype
ctxkwds = OrderedDict()
for i, (name, path) in enumerate(inputs):
with rasterio.open(path) as src:
# Using the class method instead of instance
# method. Latter raises
#
# TypeError: astype() got an unexpected keyword
# argument 'copy'
#
# possibly something to do with the instance being
# a masked array.
ctxkwds[name or '_i%d' % (i + 1)] = src.read(masked=masked)
# Extend snuggs.
snuggs.func_map['read'] = read_array
snuggs.func_map['band'] = lambda d, i: get_bands(inputs, d, i)
snuggs.func_map['bands'] = lambda d: get_bands(inputs, d)
snuggs.func_map['fillnodata'] = lambda *args: fillnodata(*args)
snuggs.func_map['sieve'] = lambda *args: sieve(*args)
res = snuggs.eval(command, ctxkwds)
if (isinstance(res, np.ma.core.MaskedArray) and (
tuple(LooseVersion(np.__version__).version) < (1, 9) or
tuple(LooseVersion(np.__version__).version) > (1, 10))):
res = res.filled(kwargs['nodata'])
if len(res.shape) == 3:
results = np.ndarray.astype(res, dtype, copy=False)
else:
results = np.asanyarray(
[np.ndarray.astype(res, dtype, copy=False)])
kwargs['count'] = results.shape[0]
with rasterio.open(output, 'w', **kwargs) as dst:
dst.write(results)
except snuggs.ExpressionError as err:
click.echo("Expression Error:")
click.echo(' %s' % err.text)
click.echo(' ' + ' ' * err.offset + "^")
click.echo(err)
raise click.Abort()
| {
"repo_name": "brendan-ward/rasterio",
"path": "rasterio/rio/calc.py",
"copies": "1",
"size": "5193",
"license": "bsd-3-clause",
"hash": -2495797137673804300,
"line_mean": 34.5684931507,
"line_max": 79,
"alpha_frac": 0.5780858848,
"autogenerated": false,
"ratio": 3.7685050798258346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4846590964625834,
"avg_score": null,
"num_lines": null
} |
"""$ rio merge"""
import click
from cligj import files_inout_arg, format_opt
import rasterio
from rasterio.rio import options
from rasterio.rio.helpers import resolve_inout
@click.command(short_help="Merge a stack of raster datasets.")
@files_inout_arg
@options.output_opt
@format_opt
@options.bounds_opt
@options.resolution_opt
@options.nodata_opt
@options.force_overwrite_opt
@click.option('--precision', type=int, default=7,
help="Number of decimal places of precision in alignment of "
"pixels")
@options.creation_options
@click.pass_context
def merge(ctx, files, output, driver, bounds, res, nodata, force_overwrite,
precision, creation_options):
"""Copy valid pixels from input files to an output file.
All files must have the same number of bands, data type, and
coordinate reference system.
Input files are merged in their listed order using the reverse
painter's algorithm. If the output file exists, its values will be
overwritten by input values.
Geospatial bounds and resolution of a new output file in the
units of the input file coordinate reference system may be provided
and are otherwise taken from the first input file.
Note: --res changed from 2 parameters in 0.25.
\b
--res 0.1 0.1 => --res 0.1 (square)
--res 0.1 0.2 => --res 0.1 --res 0.2 (rectangular)
"""
from rasterio.merge import merge as merge_tool
output, files = resolve_inout(
files=files, output=output, force_overwrite=force_overwrite)
with ctx.obj['env']:
sources = [rasterio.open(f) for f in files]
dest, output_transform = merge_tool(sources, bounds=bounds, res=res,
nodata=nodata, precision=precision)
profile = sources[0].profile
profile['transform'] = output_transform
profile['height'] = dest.shape[1]
profile['width'] = dest.shape[2]
profile['driver'] = driver
profile.update(**creation_options)
with rasterio.open(output, 'w', **profile) as dst:
dst.write(dest)
# uses the colormap in the first input raster.
try:
colormap = sources[0].colormap(1)
dst.write_colormap(1, colormap)
except ValueError:
pass
| {
"repo_name": "brendan-ward/rasterio",
"path": "rasterio/rio/merge.py",
"copies": "1",
"size": "2349",
"license": "bsd-3-clause",
"hash": 6580345550971606000,
"line_mean": 31.625,
"line_max": 79,
"alpha_frac": 0.6479352916,
"autogenerated": false,
"ratio": 3.915,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.50629352916,
"avg_score": null,
"num_lines": null
} |
"""$ rio rasterize"""
import json
import logging
from math import ceil
import os
from affine import Affine
import click
import cligj
import rasterio
from rasterio.errors import CRSError
from rasterio.coords import disjoint_bounds
from rasterio.rio import options
from rasterio.rio.helpers import resolve_inout
logger = logging.getLogger('rio')
# Common options used below
# Unlike the version in cligj, this one doesn't require values.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
metavar="INPUTS... OUTPUT")
@click.command(short_help='Rasterize features.')
@files_inout_arg
@options.output_opt
@cligj.format_opt
@options.like_file_opt
@options.bounds_opt
@options.dimensions_opt
@options.resolution_opt
@click.option('--src-crs', '--src_crs', 'src_crs', default=None,
help='Source coordinate reference system. Limited to EPSG '
'codes for now. Used as output coordinate system if output '
'does not exist or --like option is not used. '
'Default: EPSG:4326')
@options.all_touched_opt
@click.option('--default-value', '--default_value', 'default_value',
type=float, default=1, help='Default value for rasterized pixels')
@click.option('--fill', type=float, default=0,
help='Fill value for all pixels not overlapping features. Will '
'be evaluated as NoData pixels for output. Default: 0')
@click.option('--property', 'prop', type=str, default=None, help='Property in '
'GeoJSON features to use for rasterized values. Any features '
'that lack this property will be given --default_value instead.')
@options.force_overwrite_opt
@options.creation_options
@click.pass_context
def rasterize(
ctx,
files,
output,
driver,
like,
bounds,
dimensions,
res,
src_crs,
all_touched,
default_value,
fill,
prop,
force_overwrite,
creation_options):
"""Rasterize GeoJSON into a new or existing raster.
If the output raster exists, rio-rasterize will rasterize feature values
into all bands of that raster. The GeoJSON is assumed to be in the same
coordinate reference system as the output unless --src-crs is provided.
--default_value or property values when using --property must be using a
data type valid for the data type of that raster.
If a template raster is provided using the --like option, the affine
transform and data type from that raster will be used to create the output.
Only a single band will be output.
The GeoJSON is assumed to be in the same coordinate reference system unless
--src-crs is provided.
--default_value or property values when using --property must be using a
data type valid for the data type of that raster.
--driver, --bounds, --dimensions, and --res are ignored when output exists
or --like raster is provided
If the output does not exist and --like raster is not provided, the input
GeoJSON will be used to determine the bounds of the output unless
provided using --bounds.
--dimensions or --res are required in this case.
If --res is provided, the bottom and right coordinates of bounds are
ignored.
Note:
The GeoJSON is not projected to match the coordinate reference system
of the output or --like rasters at this time. This functionality may be
added in the future.
"""
from rasterio.crs import CRS
from rasterio.features import rasterize
from rasterio.features import bounds as calculate_bounds
output, files = resolve_inout(
files=files, output=output, force_overwrite=force_overwrite)
bad_param = click.BadParameter('invalid CRS. Must be an EPSG code.',
ctx, param=src_crs, param_hint='--src_crs')
has_src_crs = src_crs is not None
try:
src_crs = CRS.from_string(src_crs) if has_src_crs else CRS.from_string('EPSG:4326')
except CRSError:
raise bad_param
# If values are actually meant to be integers, we need to cast them
# as such or rasterize creates floating point outputs
if default_value == int(default_value):
default_value = int(default_value)
if fill == int(fill):
fill = int(fill)
with ctx.obj['env']:
def feature_value(feature):
if prop and 'properties' in feature:
return feature['properties'].get(prop, default_value)
return default_value
with click.open_file(files.pop(0) if files else '-') as gj_f:
geojson = json.loads(gj_f.read())
if 'features' in geojson:
geometries = []
for f in geojson['features']:
geometries.append((f['geometry'], feature_value(f)))
elif 'geometry' in geojson:
geometries = ((geojson['geometry'], feature_value(geojson)), )
else:
raise click.BadParameter('Invalid GeoJSON', param=input,
param_hint='input')
geojson_bounds = geojson.get('bbox', calculate_bounds(geojson))
if os.path.exists(output):
with rasterio.open(output, 'r+') as out:
if has_src_crs and src_crs != out.crs:
raise click.BadParameter('GeoJSON does not match crs of '
'existing output raster',
param='input', param_hint='input')
if disjoint_bounds(geojson_bounds, out.bounds):
click.echo("GeoJSON outside bounds of existing output "
"raster. Are they in different coordinate "
"reference systems?",
err=True)
meta = out.meta.copy()
result = rasterize(
geometries,
out_shape=(meta['height'], meta['width']),
transform=meta.get('affine', meta['transform']),
all_touched=all_touched,
dtype=meta.get('dtype', None),
default_value=default_value,
fill=fill)
for bidx in range(1, meta['count'] + 1):
data = out.read(bidx, masked=True)
# Burn in any non-fill pixels, and update mask accordingly
ne = result != fill
data[ne] = result[ne]
data.mask[ne] = False
out.write(data, indexes=bidx)
else:
if like is not None:
template_ds = rasterio.open(like)
if has_src_crs and src_crs != template_ds.crs:
raise click.BadParameter('GeoJSON does not match crs of '
'--like raster',
param='input', param_hint='input')
if disjoint_bounds(geojson_bounds, template_ds.bounds):
click.echo("GeoJSON outside bounds of --like raster. "
"Are they in different coordinate reference "
"systems?",
err=True)
kwargs = template_ds.meta.copy()
kwargs['count'] = 1
kwargs['transform'] = template_ds.transform
template_ds.close()
else:
bounds = bounds or geojson_bounds
if src_crs.is_geographic:
if (bounds[0] < -180 or bounds[2] > 180 or
bounds[1] < -80 or bounds[3] > 80):
raise click.BadParameter(
"Bounds are beyond the valid extent for "
"EPSG:4326.",
ctx, param=bounds, param_hint='--bounds')
if dimensions:
width, height = dimensions
res = (
(bounds[2] - bounds[0]) / float(width),
(bounds[3] - bounds[1]) / float(height)
)
else:
if not res:
raise click.BadParameter(
'pixel dimensions are required',
ctx, param=res, param_hint='--res')
elif len(res) == 1:
res = (res[0], res[0])
width = max(int(ceil((bounds[2] - bounds[0]) /
float(res[0]))), 1)
height = max(int(ceil((bounds[3] - bounds[1]) /
float(res[1]))), 1)
kwargs = {
'count': 1,
'crs': src_crs,
'width': width,
'height': height,
'transform': Affine(res[0], 0, bounds[0], 0, -res[1],
bounds[3]),
'driver': driver
}
kwargs.update(**creation_options)
result = rasterize(
geometries,
out_shape=(kwargs['height'], kwargs['width']),
transform=kwargs['transform'],
all_touched=all_touched,
dtype=kwargs.get('dtype', None),
default_value=default_value,
fill=fill)
if 'dtype' not in kwargs:
kwargs['dtype'] = result.dtype
kwargs['nodata'] = fill
with rasterio.open(output, 'w', **kwargs) as out:
out.write(result, indexes=1)
| {
"repo_name": "brendan-ward/rasterio",
"path": "rasterio/rio/rasterize.py",
"copies": "1",
"size": "9807",
"license": "bsd-3-clause",
"hash": 1465439146416905500,
"line_mean": 35.7303370787,
"line_max": 91,
"alpha_frac": 0.539614561,
"autogenerated": false,
"ratio": 4.455701953657428,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5495316514657428,
"avg_score": null,
"num_lines": null
} |
"""$ rio shapes"""
from __future__ import division
import logging
import math
import os
import click
import cligj
from rasterio.rio import options
from rasterio.rio.helpers import coords, write_features
from rasterio.transform import Affine
from rasterio.crs import CRS
logger = logging.getLogger('rio')
# Common options used below
# Unlike the version in cligj, this one doesn't require values.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
metavar="INPUTS... OUTPUT")
all_touched_opt = click.option(
'-a', '--all', '--all_touched', 'all_touched',
is_flag=True,
default=False,
help='Use all pixels touched by features, otherwise (default) use only '
'pixels whose center is within the polygon or that are selected by '
'Bresenhams line algorithm')
@click.command(short_help="Write shapes extracted from bands or masks.")
@options.file_in_arg
@options.output_opt
@cligj.precision_opt
@cligj.indent_opt
@cligj.compact_opt
@cligj.projection_geographic_opt
@cligj.projection_projected_opt
@cligj.sequence_opt
@cligj.use_rs_opt
@cligj.geojson_type_feature_opt(True)
@cligj.geojson_type_bbox_opt(False)
@click.option('--band/--mask', default=True,
help="Choose to extract from a band (the default) or a mask.")
@click.option('--bidx', 'bandidx', type=int, default=None,
help="Index of the band or mask that is the source of shapes.")
@click.option('--sampling', type=int, default=1,
help="Inverse of the sampling fraction; "
"a value of 10 decimates.")
@click.option('--with-nodata/--without-nodata', default=False,
help="Include or do not include (the default) nodata regions.")
@click.option('--as-mask/--not-as-mask', default=False,
help="Interpret a band as a mask and output only one class of "
"valid data shapes.")
@click.pass_context
def shapes(
ctx, input, output, precision, indent, compact, projection, sequence,
use_rs, geojson_type, band, bandidx, sampling, with_nodata, as_mask):
"""Extracts shapes from one band or mask of a dataset and writes
them out as GeoJSON. Unless otherwise specified, the shapes will be
transformed to WGS 84 coordinates.
The default action of this command is to extract shapes from the
first band of the input dataset. The shapes are polygons bounding
contiguous regions (or features) of the same raster value. This
command performs poorly for int16 or float type datasets.
Bands other than the first can be specified using the `--bidx`
option:
$ rio shapes --bidx 3 tests/data/RGB.byte.tif
The valid data footprint of a dataset's i-th band can be extracted
by using the `--mask` and `--bidx` options:
$ rio shapes --mask --bidx 1 tests/data/RGB.byte.tif
Omitting the `--bidx` option results in a footprint extracted from
the conjunction of all band masks. This is generally smaller than
any individual band's footprint.
A dataset band may be analyzed as though it were a binary mask with
the `--as-mask` option:
$ rio shapes --as-mask --bidx 1 tests/data/RGB.byte.tif
"""
# These import numpy, which we don't want to do unless it's needed.
import numpy as np
import rasterio.features
import rasterio.warp
logger = logging.getLogger('rio')
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
stdout = click.open_file(
output, 'w') if output else click.get_text_stream('stdout')
bidx = 1 if bandidx is None and band else bandidx
# This is the generator for (feature, bbox) pairs.
class Collection(object):
def __init__(self, env):
self._xs = []
self._ys = []
self.env = env
@property
def bbox(self):
return min(self._xs), min(self._ys), max(self._xs), max(self._ys)
def __call__(self):
with rasterio.open(input) as src:
if bidx is not None and bidx > src.count:
raise ValueError('bidx is out of range for raster')
img = None
msk = None
# Adjust transforms.
transform = src.transform
if sampling > 1:
# Determine the target shape (to decimate)
shape = (int(math.ceil(src.height / sampling)),
int(math.ceil(src.width / sampling)))
# Calculate independent sampling factors
x_sampling = src.width / shape[1]
y_sampling = src.height / shape[0]
# Decimation of the raster produces a georeferencing
# shift that we correct with a translation.
transform *= Affine.translation(
src.width % x_sampling, src.height % y_sampling)
# And follow by scaling.
transform *= Affine.scale(x_sampling, y_sampling)
# Most of the time, we'll use the valid data mask.
# We skip reading it if we're extracting every possible
# feature (even invalid data features) from a band.
if not band or (band and not as_mask and not with_nodata):
if sampling == 1:
msk = src.read_masks(bidx)
else:
msk_shape = shape
if bidx is None:
msk = np.zeros(
(src.count,) + msk_shape, 'uint8')
else:
msk = np.zeros(msk_shape, 'uint8')
msk = src.read_masks(bidx, msk)
if bidx is None:
msk = np.logical_or.reduce(msk).astype('uint8')
# Possibly overridden below.
img = msk
# Read the band data unless the --mask option is given.
if band:
if sampling == 1:
img = src.read(bidx, masked=False)
else:
img = np.zeros(
shape,
dtype=src.dtypes[src.indexes.index(bidx)])
img = src.read(bidx, img, masked=False)
# If --as-mask option was given, convert the image
# to a binary image. This reduces the number of shape
# categories to 2 and likely reduces the number of
# shapes.
if as_mask:
tmp = np.ones_like(img, 'uint8') * 255
tmp[img == 0] = 0
img = tmp
if not with_nodata:
msk = tmp
# Transform the raster bounds.
bounds = src.bounds
xs = [bounds[0], bounds[2]]
ys = [bounds[1], bounds[3]]
if projection == 'geographic':
xs, ys = rasterio.warp.transform(
src.crs, CRS({'init': 'epsg:4326'}), xs, ys)
if precision >= 0:
xs = [round(v, precision) for v in xs]
ys = [round(v, precision) for v in ys]
self._xs = xs
self._ys = ys
# Prepare keyword arguments for shapes().
kwargs = {'transform': transform}
if not with_nodata:
kwargs['mask'] = msk
src_basename = os.path.basename(src.name)
# Yield GeoJSON features.
for i, (g, val) in enumerate(
rasterio.features.shapes(img, **kwargs)):
if projection == 'geographic':
g = rasterio.warp.transform_geom(
src.crs, 'EPSG:4326', g,
antimeridian_cutting=True, precision=precision)
xs, ys = zip(*coords(g))
yield {
'type': 'Feature',
'id': "{0}:{1}".format(src_basename, i),
'properties': {
'val': val, 'filename': src_basename
},
'bbox': [min(xs), min(ys), max(xs), max(ys)],
'geometry': g
}
if not sequence:
geojson_type = 'collection'
try:
with ctx.obj['env'] as env:
write_features(
stdout, Collection(env), sequence=sequence,
geojson_type=geojson_type, use_rs=use_rs,
**dump_kwds)
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "brendan-ward/rasterio",
"path": "rasterio/rio/shapes.py",
"copies": "1",
"size": "9016",
"license": "bsd-3-clause",
"hash": -4466985981897910300,
"line_mean": 36.1028806584,
"line_max": 77,
"alpha_frac": 0.5341614907,
"autogenerated": false,
"ratio": 4.232863849765258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 243
} |
"""$ rio stack"""
import collections
import logging
import click
from cligj import files_inout_arg, format_opt
import rasterio
from rasterio.compat import zip_longest
from rasterio.rio import options
from rasterio.rio.helpers import resolve_inout
@click.command(short_help="Stack a number of bands into a multiband dataset.")
@files_inout_arg
@options.output_opt
@format_opt
@options.bidx_mult_opt
@options.rgb_opt
@options.force_overwrite_opt
@options.creation_options
@click.pass_context
def stack(ctx, files, output, driver, bidx, photometric, force_overwrite,
creation_options):
"""Stack a number of bands from one or more input files into a
multiband dataset.
Input datasets must be of a kind: same data type, dimensions, etc. The
output is cloned from the first input.
By default, rio-stack will take all bands from each input and write them
in same order to the output. Optionally, bands for each input may be
specified using a simple syntax:
--bidx N takes the Nth band from the input (first band is 1).
--bidx M,N,0 takes bands M, N, and O.
--bidx M..O takes bands M-O, inclusive.
--bidx ..N takes all bands up to and including N.
--bidx N.. takes all bands from N to the end.
Examples, using the Rasterio testing dataset, which produce a copy.
rio stack RGB.byte.tif -o stacked.tif
rio stack RGB.byte.tif --bidx 1,2,3 -o stacked.tif
rio stack RGB.byte.tif --bidx 1..3 -o stacked.tif
rio stack RGB.byte.tif --bidx ..2 RGB.byte.tif --bidx 3.. -o stacked.tif
"""
logger = logging.getLogger('rio')
try:
with ctx.obj['env']:
output, files = resolve_inout(files=files, output=output,
force_overwrite=force_overwrite)
output_count = 0
indexes = []
for path, item in zip_longest(files, bidx, fillvalue=None):
with rasterio.open(path) as src:
src_indexes = src.indexes
if item is None:
indexes.append(src_indexes)
output_count += len(src_indexes)
elif '..' in item:
start, stop = map(
lambda x: int(x) if x else None, item.split('..'))
if start is None:
start = 1
indexes.append(src_indexes[slice(start - 1, stop)])
output_count += len(src_indexes[slice(start - 1, stop)])
else:
parts = list(map(int, item.split(',')))
if len(parts) == 1:
indexes.append(parts[0])
output_count += 1
else:
parts = list(parts)
indexes.append(parts)
output_count += len(parts)
with rasterio.open(files[0]) as first:
kwargs = first.meta
kwargs.update(**creation_options)
kwargs.update(
driver=driver,
count=output_count)
if photometric:
kwargs['photometric'] = photometric
with rasterio.open(output, 'w', **kwargs) as dst:
dst_idx = 1
for path, index in zip(files, indexes):
with rasterio.open(path) as src:
if isinstance(index, int):
data = src.read(index)
dst.write(data, dst_idx)
dst_idx += 1
elif isinstance(index, collections.Iterable):
data = src.read(index)
dst.write(data, range(dst_idx, dst_idx + len(index)))
dst_idx += len(index)
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
| {
"repo_name": "brendan-ward/rasterio",
"path": "rasterio/rio/stack.py",
"copies": "1",
"size": "3992",
"license": "bsd-3-clause",
"hash": -7245156463935429000,
"line_mean": 34.0175438596,
"line_max": 81,
"alpha_frac": 0.5415831663,
"autogenerated": false,
"ratio": 4.180104712041885,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5221687878341885,
"avg_score": null,
"num_lines": null
} |
"""$ rio warp"""
import logging
from math import ceil, floor, log
import warnings
import click
from cligj import files_inout_arg, format_opt
import rasterio
from rasterio.crs import CRS
from rasterio.env import setenv
from rasterio.errors import CRSError
from rasterio.rio import options
from rasterio.rio.helpers import resolve_inout
from rasterio.transform import Affine
from rasterio.warp import (
reproject, Resampling, transform_bounds,
calculate_default_transform as calcdt)
# Improper usage of rio-warp can lead to accidental creation of
# extremely large datasets. We'll put a hard limit on the size of
# datasets and raise a usage error if the limits are exceeded.
MAX_OUTPUT_WIDTH = 100000
MAX_OUTPUT_HEIGHT = 100000
@click.command(short_help='Warp a raster dataset.')
@files_inout_arg
@options.output_opt
@format_opt
@click.option(
'--like',
type=click.Path(exists=True),
help='Raster dataset to use as a template for obtaining affine '
'transform (bounds and resolution), and crs.')
@click.option('--dst-crs', default=None,
help='Target coordinate reference system.')
@options.dimensions_opt
@click.option(
'--src-bounds',
nargs=4, type=float, default=None,
help="Determine output extent from source bounds: left bottom right top "
". Cannot be used with destination --bounds")
@click.option(
'--bounds', '--dst-bounds', nargs=4, type=float, default=None,
help="Determine output extent from destination bounds: left bottom right top")
@options.resolution_opt
@click.option('--resampling', type=click.Choice([r.name for r in Resampling]),
default='nearest', help="Resampling method.",
show_default=True)
@click.option('--src-nodata', default=None, show_default=True,
type=float, help="Manually override source nodata")
@click.option('--dst-nodata', default=None, show_default=True,
type=float, help="Manually override destination nodata")
@click.option('--threads', type=int, default=1,
help='Number of processing threads.')
@click.option('--check-invert-proj/--no-check-invert-proj', default=True,
help='Constrain output to valid coordinate region in dst-crs')
@click.option('--target-aligned-pixels/--no-target-aligned-pixels', default=False,
help='align the output bounds based on the resolution')
@options.force_overwrite_opt
@options.creation_options
@click.pass_context
def warp(ctx, files, output, driver, like, dst_crs, dimensions, src_bounds,
dst_bounds, res, resampling, src_nodata, dst_nodata, threads,
check_invert_proj, force_overwrite, creation_options,
target_aligned_pixels):
"""
Warp a raster dataset.
If a template raster is provided using the --like option, the
coordinate reference system, affine transform, and dimensions of
that raster will be used for the output. In this case --dst-crs,
--bounds, --res, and --dimensions options are not applicable and
an exception will be raised.
\b
$ rio warp input.tif output.tif --like template.tif
The output coordinate reference system may be either a PROJ.4 or
EPSG:nnnn string,
\b
--dst-crs EPSG:4326
--dst-crs '+proj=longlat +ellps=WGS84 +datum=WGS84'
or a JSON text-encoded PROJ.4 object.
\b
--dst-crs '{"proj": "utm", "zone": 18, ...}'
If --dimensions are provided, --res and --bounds are not applicable and an
exception will be raised.
Resolution is calculated based on the relationship between the
raster bounds in the target coordinate system and the dimensions,
and may produce rectangular rather than square pixels.
\b
$ rio warp input.tif output.tif --dimensions 100 200 \\
> --dst-crs EPSG:4326
If --bounds are provided, --res is required if --dst-crs is provided
(defaults to source raster resolution otherwise).
\b
$ rio warp input.tif output.tif \\
> --bounds -78 22 -76 24 --res 0.1 --dst-crs EPSG:4326
"""
output, files = resolve_inout(
files=files, output=output, force_overwrite=force_overwrite)
resampling = Resampling[resampling] # get integer code for method
if not len(res):
# Click sets this as an empty tuple if not provided
res = None
else:
# Expand one value to two if needed
res = (res[0], res[0]) if len(res) == 1 else res
if target_aligned_pixels:
if not res:
raise click.BadParameter(
'--target-aligned-pixels requires a specified resolution')
if src_bounds or dst_bounds:
raise click.BadParameter(
'--target-aligned-pixels cannot be used with '
'--src-bounds or --dst-bounds')
# Check invalid parameter combinations
if like:
invalid_combos = (dimensions, dst_bounds, dst_crs, res)
if any(p for p in invalid_combos if p is not None):
raise click.BadParameter(
"--like cannot be used with any of --dimensions, --bounds, "
"--dst-crs, or --res")
elif dimensions:
invalid_combos = (dst_bounds, res)
if any(p for p in invalid_combos if p is not None):
raise click.BadParameter(
"--dimensions cannot be used with --bounds or --res")
with ctx.obj['env']:
setenv(CHECK_WITH_INVERT_PROJ=check_invert_proj)
with rasterio.open(files[0]) as src:
l, b, r, t = src.bounds
out_kwargs = src.profile.copy()
out_kwargs['driver'] = driver
# Sort out the bounds options.
if src_bounds and dst_bounds:
raise click.BadParameter(
"--src-bounds and destination --bounds may not be "
"specified simultaneously.")
if like:
with rasterio.open(like) as template_ds:
dst_crs = template_ds.crs
dst_transform = template_ds.transform
dst_height = template_ds.height
dst_width = template_ds.width
elif dst_crs is not None:
try:
dst_crs = CRS.from_string(dst_crs)
except ValueError as err:
raise click.BadParameter(
str(err), param='dst_crs', param_hint='dst_crs')
if dimensions:
# Calculate resolution appropriate for dimensions
# in target.
dst_width, dst_height = dimensions
try:
xmin, ymin, xmax, ymax = transform_bounds(
src.crs, dst_crs, *src.bounds)
except CRSError as err:
raise click.BadParameter(
str(err), param='dst_crs', param_hint='dst_crs')
dst_transform = Affine(
(xmax - xmin) / float(dst_width),
0, xmin, 0,
(ymin - ymax) / float(dst_height),
ymax
)
elif src_bounds or dst_bounds:
if not res:
raise click.BadParameter(
"Required when using --bounds.",
param='res', param_hint='res')
if src_bounds:
try:
xmin, ymin, xmax, ymax = transform_bounds(
src.crs, dst_crs, *src_bounds)
except CRSError as err:
raise click.BadParameter(
str(err), param='dst_crs',
param_hint='dst_crs')
else:
xmin, ymin, xmax, ymax = dst_bounds
dst_transform = Affine(res[0], 0, xmin, 0, -res[1], ymax)
dst_width = max(int(ceil((xmax - xmin) / res[0])), 1)
dst_height = max(int(ceil((ymax - ymin) / res[1])), 1)
elif target_aligned_pixels:
try:
xmin, ymin, xmax, ymax = transform_bounds(
src.crs, dst_crs, *src.bounds)
except CRSError as err:
raise click.BadParameter(
str(err), param='dst_crs', param_hint='dst_crs')
xmin = floor(xmin / res[0]) * res[0]
xmax = ceil(xmax / res[0]) * res[0]
ymin = floor(ymin / res[1]) * res[1]
ymax = ceil(ymax / res[1]) * res[1]
dst_transform = Affine(res[0], 0, xmin, 0, -res[1], ymax)
dst_width = max(int(ceil((xmax - xmin) / res[0])), 1)
dst_height = max(int(ceil((ymax - ymin) / res[1])), 1)
else:
try:
if src.transform.is_identity and src.gcps:
src_crs = src.gcps[1]
kwargs = {'gcps': src.gcps[0]}
else:
src_crs = src.crs
kwargs = src.bounds._asdict()
dst_transform, dst_width, dst_height = calcdt(
src_crs, dst_crs, src.width, src.height,
resolution=res, **kwargs)
except CRSError as err:
raise click.BadParameter(
str(err), param='dst_crs', param_hint='dst_crs')
elif dimensions:
# Same projection, different dimensions, calculate resolution.
dst_crs = src.crs
dst_width, dst_height = dimensions
dst_transform = Affine(
(r - l) / float(dst_width),
0, l, 0,
(b - t) / float(dst_height),
t
)
elif src_bounds or dst_bounds:
# Same projection, different dimensions and possibly
# different resolution.
if not res:
res = (src.transform.a, -src.transform.e)
dst_crs = src.crs
xmin, ymin, xmax, ymax = (src_bounds or dst_bounds)
dst_transform = Affine(res[0], 0, xmin, 0, -res[1], ymax)
dst_width = max(int(ceil((xmax - xmin) / res[0])), 1)
dst_height = max(int(ceil((ymax - ymin) / res[1])), 1)
elif res:
# Same projection, different resolution.
dst_crs = src.crs
dst_transform = Affine(res[0], 0, l, 0, -res[1], t)
dst_width = max(int(ceil((r - l) / res[0])), 1)
dst_height = max(int(ceil((t - b) / res[1])), 1)
else:
dst_crs = src.crs
dst_transform = src.transform
dst_width = src.width
dst_height = src.height
# If src_nodata is not None, update the dst metadata NODATA
# value to src_nodata (will be overridden by dst_nodata if it is not None
if src_nodata is not None:
# Update the dst nodata value
out_kwargs.update({
'nodata': src_nodata
})
# Validate a manually set destination NODATA value
# against the input datatype.
if dst_nodata is not None:
if src_nodata is None and src.meta['nodata'] is None:
raise click.BadParameter(
"--src-nodata must be provided because dst-nodata is not None")
else:
# Update the dst nodata value
out_kwargs.update({'nodata': dst_nodata})
# When the bounds option is misused, extreme values of
# destination width and height may result.
if (dst_width < 0 or dst_height < 0 or
dst_width > MAX_OUTPUT_WIDTH or
dst_height > MAX_OUTPUT_HEIGHT):
raise click.BadParameter(
"Invalid output dimensions: {0}.".format(
(dst_width, dst_height)))
out_kwargs.update({
'crs': dst_crs,
'transform': dst_transform,
'width': dst_width,
'height': dst_height
})
# Adjust block size if necessary.
if ('blockxsize' in out_kwargs and
dst_width < out_kwargs['blockxsize']):
del out_kwargs['blockxsize']
if ('blockysize' in out_kwargs and
dst_height < out_kwargs['blockysize']):
del out_kwargs['blockysize']
out_kwargs.update(**creation_options)
with rasterio.open(output, 'w', **out_kwargs) as dst:
reproject(
source=rasterio.band(src, list(range(1, src.count + 1))),
destination=rasterio.band(
dst, list(range(1, src.count + 1))),
src_transform=src.transform,
src_crs=src.crs,
src_nodata=src_nodata,
dst_transform=out_kwargs['transform'],
dst_crs=out_kwargs['crs'],
dst_nodata=dst_nodata,
resampling=resampling,
num_threads=threads)
| {
"repo_name": "brendan-ward/rasterio",
"path": "rasterio/rio/warp.py",
"copies": "1",
"size": "13664",
"license": "bsd-3-clause",
"hash": 2402650466734781400,
"line_mean": 39.5459940653,
"line_max": 87,
"alpha_frac": 0.5270052693,
"autogenerated": false,
"ratio": 4.2982069833280905,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5325212252628091,
"avg_score": null,
"num_lines": null
} |
# $Source: /home/CVSROOT/c2ada/AdaMain.py,v $
# $Revision: 1.1.1.1 $ $Date: 1999/02/02 12:01:51 $
# Write an Ada main procedure to wrap a C main().
import sys
# text: the (generic) text of the Ada main procedure.
#
# ---------------------------------
text = '''
with %(cmain)s;
with Ada.Command_Line;
with %(predef)s;
procedure %(unit)s is
package AC renames Ada.Command_line;
type charp_array is array(%(predef)s.natural_int range <>)
of aliased %(predef)s.charp;
argc : %(predef)s.int := %(predef)s.int(AC.Argument_Count);
argv : charp_array(0..argc);
begin
argv(0) := %(predef)s.new_string(AC.Command_Name);
for i in 1 .. argc loop
argv(i) := %(predef)s.new_string(AC.argument( integer(i) ));
end loop;
%(cmain)s.main(argc, argv(0)'access );
end %(unit)s;
''' # end text
# ---------------------------------
# ada_main: write the main procedure for the output Ada program
# cmain: the Ada name of the package holding the translated
# C main() program
# predef: the "predef" package name
# unit : the name of the Ada unit to be produced
# filename: file to write unit to, if specificed
#
def ada_main(cmain, predef, unit, filename=None):
result = text % locals()
if filename:
f = open(filename,'w')
f.write(result)
f.close()
else:
return result
usage_msg = '''
usage: python AdaMain.py cmain predef unit [filename]
cmain: the name of the Ada package containing the C program main()
predef: name of the predefined package
unit: package name of output compilation unit
filename: path name of output file
if omitted, output to stdout
'''
# main: the command line interface
# the call is AdaMain cmain predef unit [filename]
def main():
argv = sys.argv
if len(argv)==4 or len(argv)==5:
cmain = argv[1]
predef = argv[2]
unit = argv[3]
if len(argv)==5:
filename = argv[4]
else:
filename = None
result = ada_main(cmain, predef, unit, filename)
if result:
print result
else:
print usage_msg
if __name__ == '__main__' : main()
| {
"repo_name": "mikequentel/c2ada",
"path": "AdaMain.py",
"copies": "1",
"size": "2144",
"license": "mit",
"hash": 3459783803995921000,
"line_mean": 23.0898876404,
"line_max": 76,
"alpha_frac": 0.6049440299,
"autogenerated": false,
"ratio": 3.1027496382054993,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4207693668105499,
"avg_score": null,
"num_lines": null
} |
# $Source: /home/CVSROOT/c2ada/C2ada.py,v $
# $Revision: 1.1.1.1 $ $Date: 1999/02/02 12:01:51 $
# Provide the framework for reading in a configuration script.
# The configuration script contains the declaration of project-specific
# information about c2ada's task. It initializes various attributes
# of a single global data structure called "the", containing statements
# like
#
# the.reserved_names = ["x","stdarg"]
# the.source("blah.h").interfaces("blah.c")
#
# or whatever. The general facilities of Python can be used to streamline
# repetitive "declarations":
# for x in ["a.h", "b.h", "c.h"]:
# the.source(x).output_dir = "/dev/null"
#
# The principal routine called from C is "configure_project", which
# takes as argument the name of the configuration script. It executes
# a script in an environment which exposes "the", then returns the
# resulting value. C code then accesses the various attributes
# of this return value.
###################
import os
import traceback
import string
True = 1
False = 0
# The type of "the" is C2ada_project. Declaring this as a class allows
# us to provide default values, and provides the hooks for catching
# assignments to non-existent attributes.
#
class C2ada_project:
# writable_attrs is for documentation & eventual input checking
#
writable_attrs = { 'reserved_names' : 'list of strings' }
def __init__(self):
self.output_dir = './bindings'
# self.ada_compiler = 'gnat'
# self.comments_preserved = False
# self.strings_to_char_arrays = False
self.source = Instance_dict(C_source)
# self.source_directories = None
self.reserved_names = []
# A C_source object captures properties associated with a C source file
# name.
#
class C_source:
writable_attrs = {'unchecked_conversions_to_spec' : 'boolean',
'partner' : 'string' }
def __init__(self, name):
self.c_name = name
self.is_header= os.path.splitext(name)[1] == '.h'
self.decl = Source_decl_dict()
self.macro = Instance_dict(Macro)
# self.ada_name = None
#self.output_dir = the.output_dir
def interfaces(self, name):
self.partner = name
the.source(name).partner = self.c_name
# An Instance_dict is a dictionary that contains instances of the
# argument class "iclass". It creates an initial instance of this
# class whenever a key does not already have an instance.
#
class Instance_dict:
def __init__(self,iclass):
self.dict = {}
self.iclass = iclass
def __getitem__(self,key):
return self.dict[key]
def __call__(self,key):
try:
return self.dict[key]
except KeyError:
result = self.iclass(key)
self.dict[key] = result
return result
# These must correspond to the values of {ENUM,STRUCT,UNION}_PREFIX
# in il.h
#
prefix_dict = {'enum':'1', 'struct':'2', 'union':'3'}
def alt_key(key):
if string.find(key,' ')== -1:
return None
words = string.split(key)
if len(words)!=2: return None
try:
return prefix_dict[words[0]] + words[1]
except:
return None
class Source_decl_dict(Instance_dict):
def __init__(self):
Instance_dict.__init__(self, Decl)
def __call__(self,key):
try:
return self.dict[key]
except KeyError:
result = self.iclass(key)
self.dict[key] = result
alt = alt_key(key)
if alt:
self.dict[alt] = result
return result
class Macro:
writeable_attrs = { 'replacement' : 'string',
'returns' : 'string',
'signature' : 'string' }
def __init__(self,name):
self.name = name
# reconstitute restores a macro body, undoing the encoding
# that happened in grok_macro (cpp.c).
# This is really a subroutine, not a method that's
# intended to be invoked outside this class.
#
def reconstitute(self,body,formals):
result = body
for i in range(1,len(formals)+1):
mark = chr(1)+chr(i)
result = string.joinfields(string.splitfields(result,mark),
formals[i-1])
return result
returns = None #default attribute
signature = '' #default attribute
# rewrite: turn a #define directive into a declaration
# TBD: currently only handles function-like macros
#
def rewrite(self, name, body, formals, eol_comment):
cbody = self.reconstitute(body,formals)
sig= self.signature
if self.returns:
rtype = self.returns
stmt = "return %s;" % cbody
else:
rtype = 'void'
stmt = "%s;" % cbody
if eol_comment:
comment = '/*%s*/' % eol_comment
else:
comment = ''
fmt = 'inline %(rtype)s %(name)s (%(sig)s) { %(stmt)s } %(comment)s'
return fmt % locals()
class Decl:
writeable_attrs = {'ada_name' : 'string',
'return_type_is_void' : 'boolean',
'private' : 'boolean' }
def __init__(self,name):
self.name = name
# self.ada_name = None
# self.scope = Instance_dict(Decl)
#################
# define "the"; accessed as "the_data" in this module.
#
the_data = C2ada_project()
the = the_data
# "configure" takes as argument the name of a file which contains
# the configuration script for this run.
#
def configure(filename):
globals = {'the':the_data, 'True':1, 'False':0}
if filename:
try:
execfile(filename, globals)
except:
print "error in initialization file", filename
traceback.print_exc()
return None
return the_data
# "source_partner" returns the name, if any, of a source file associated
# with the argument file. "partners" are .h/.c files with a spec/body
# relationship, as specified in the configuration file.
#
# This function must be called after "configure" has been called.
def source_partner(filename):
try:
return the.source[filename].partner
except:
return ""
| {
"repo_name": "mikequentel/c2ada",
"path": "C2ada.py",
"copies": "1",
"size": "5918",
"license": "mit",
"hash": 7779880823728118000,
"line_mean": 25.5381165919,
"line_max": 74,
"alpha_frac": 0.6336600203,
"autogenerated": false,
"ratio": 3.4307246376811595,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.456438465798116,
"avg_score": null,
"num_lines": null
} |
### $SPARK_HOME/bin/spark-submit movielens_analysis.py
### Data visualization of movielens dataset.
from pyspark import SparkContext
import matplotlib.pyplot as plt
import numpy as np
sc = SparkContext("local", "Movielens Analysis")
sc.setLogLevel("ERROR")
PATH = "/Users/c/xueshu/bigdata/machineLearningWithSpark"
## 1. Do some statistics
user_data = sc.textFile("%s/ml-100k/u.user" % PATH)
user_fields = user_data.map(lambda line: line.split('|'))
num_users = user_fields.count()
num_genders = user_fields.map(lambda fields: fields[2]).distinct().count()
num_occupations = user_fields.map(lambda fields: fields[3]).distinct().count()
num_zipcodes = user_fields.map(lambda fields: fields[4]).distinct().count()
print("Users:%d, genders:%d, occupations:%d, ZIP codes:%d"
%(num_users, num_genders, num_occupations, num_zipcodes))
## 2. Draw histgrams of age
ages = user_fields.map(lambda fields: int(fields[1])).collect()
fig1 = plt.figure()
plt.hist(ages, bins = 20, edgecolor='black')
plt.title("Age histogram")
plt.xlabel("Age")
plt.ylabel("Number")
## 3. Draw job distribution
occupations = user_fields.map(lambda
fields: (fields[3], 1)).reduceByKey(lambda
x, y: x + y).sortBy(lambda x: x[1]).collect()
fig2 = plt.figure(figsize=(9, 5), dpi=100)
x_axis = [occu[0] for occu in occupations]
y_axis = [occu[1] for occu in occupations]
pos = np.arange(len(x_axis))
width = 1.0
ax = plt.axes()
ax.set_xticks(pos + 0.5)
ax.set_xticklabels(x_axis)
plt.bar(pos, y_axis, width, edgecolor='black')
plt.xticks(rotation=30)
plt.ylabel("Number")
plt.title("Job distribution")
## 4. Draw movie year distribution
movie_data = sc.textFile("%s/ml-100k/u.item" % PATH)
movie_fields = movie_data.map(lambda line: line.split('|'))
num_movies = movie_fields.count()
def get_year(fields):
try:
return int(fields[2][-4:])
except:
return 1900
movie_years = movie_fields.map(get_year)
movie_ages = movie_years.filter(lambda
year: year != 1900).map(lambda year: 1998 - year).countByValue()
y_axis = movie_ages.values()
x_axis = movie_ages.keys()
fig3 = plt.figure()
plt.bar(x_axis, y_axis, edgecolor='black')
plt.title("Movie age distribution")
plt.xlabel("Movie age")
plt.ylabel("Number")
## 5. Do statistics on the rating data
rating_data = sc.textFile("%s/ml-100k/u.data" % PATH)
rating_fields = rating_data.map(lambda line: line.split('\t'))
ratings = rating_fields.map(lambda fields: int(fields[2]))
num_ratings = ratings.count()
min_rating = ratings.reduce(lambda x, y: min(x, y))
max_rating = ratings.reduce(lambda x, y: max(x, y))
mean_rating = ratings.reduce(lambda x, y: x + y) / float(num_ratings)
median_rating = np.median(ratings.collect())
ratings_per_user = num_ratings / float(num_users)
ratings_per_movie = num_ratings / float(num_movies)
print("Min rating:%d, max rating:%d, average rating:%.2f, median rating:%d"
%(min_rating, max_rating, mean_rating, median_rating))
print("Average # of rating per user: %.1f" %(ratings_per_user))
print("Average # of rating per movie: %.1f" %(ratings_per_movie))
## 6. Draw movie rating distribution
ratings_count = ratings.countByValue()
x_axis = ratings_count.keys()
y_axis = ratings_count.values()
fig4 = plt.figure()
plt.bar(x_axis, y_axis, edgecolor='black')
plt.title("Movie ratings distribution")
plt.xlabel("Movie rate")
plt.ylabel("Number")
## 7. User rating number distribution
user_rating_number = rating_fields.map(lambda
fields: (int(fields[0]), 1)).reduceByKey(lambda
x, y: x + y).sortBy(lambda x: x[1], ascending=False)
x_axis = np.arange(num_users)
y_axis = user_rating_number.values().collect()
fig5 = plt.figure(figsize=(9, 5), dpi=100)
plt.bar(x_axis, y_axis)
plt.title("User rating numbers rank")
plt.xlabel("Number of movie rated")
plt.ylabel("User")
plt.show() | {
"repo_name": "luseiee/machineLearningWithSpark",
"path": "chapter03/movielens_analysis.py",
"copies": "1",
"size": "3783",
"license": "mit",
"hash": -4654984164280979000,
"line_mean": 35.3846153846,
"line_max": 78,
"alpha_frac": 0.7031456516,
"autogenerated": false,
"ratio": 2.91,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41131456516000003,
"avg_score": null,
"num_lines": null
} |
### $SPARK_HOME/bin/spark-submit movielens_feature.py
### Feature extraction of movielens dataset.
from pyspark import SparkContext
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import re
from scipy import sparse as sp
from pyspark.mllib.feature import Normalizer
sc = SparkContext("local", "Movielens Analysis")
sc.setLogLevel("ERROR")
PATH = "/Users/c/xueshu/bigdata/machineLearningWithSpark"
## 1. Occupation feature
user_data = sc.textFile("%s/ml-100k/u.user" % PATH)
user_fields = user_data.map(lambda line: line.split('|'))
occupation_data = user_fields.map(lambda user_fields: user_fields[3])
all_occupations = occupation_data.distinct().collect()
all_occupations.sort()
occupation_dict = {}
for i, occu in enumerate(all_occupations):
occupation_dict[occu] = i
user_tom_occupation = 'programmer'
tom_occupation_feature = np.zeros(len(all_occupations))
tom_occupation_feature[occupation_dict[user_tom_occupation]] = 1
print("Binary feature of tom's occupation (programmer) is:")
print(tom_occupation_feature)
## 2. Time stamp => classification feature
rating_data = sc.textFile("%s/ml-100k/u.data" % PATH)
rating_fields = rating_data.map(lambda line: line.split('\t'))
timestamps = rating_fields.map(lambda fields: int(fields[3]))
hour_of_day = timestamps.map(lambda ts: datetime.fromtimestamp(ts).hour)
times_of_day_dict = {}
for hour in range(24):
if hour in range(7, 12):
times_of_day_dict[hour] = "morning"
elif hour in range(12, 14):
times_of_day_dict[hour] = "lunch"
elif hour in range(14, 18):
times_of_day_dict[hour] = "afternoon"
elif hour in range(18, 23):
times_of_day_dict[hour] = "evening"
else:
times_of_day_dict[hour] = "night"
time_of_day = hour_of_day.map(lambda hour: times_of_day_dict[hour])
print
print("Converting timestamps to features.")
print(hour_of_day.take(5))
print(time_of_day.take(5))
## 3. Extract text feature, using bag-of-word method.
def extract_title(raw):
grps = re.search("\((\w+)\)", raw)
if grps:
return raw[:grps.start()].strip()
else:
return raw
movie_data = sc.textFile("%s/ml-100k/u.item" % PATH)
movie_fields = movie_data.map(lambda line: line.split('|'))
raw_titles = movie_fields.map(lambda fields: fields[1])
print
print("Remove year information in '()'")
for raw_title in raw_titles.take(5):
print(extract_title(raw_title))
movie_titles = raw_titles.map(extract_title)
title_terms = movie_titles.map(lambda line: line.split(' '))
print
print("Split words.")
print(title_terms.take(5))
all_terms = title_terms.flatMap(lambda x: x).distinct().collect()
all_terms_dict = {}
for i, term in enumerate(all_terms):
all_terms_dict[term] = i
print
print("Total number of terms: %d" % len(all_terms_dict))
# create sparse vector for each title
def create_vector(terms, term_dict):
num_terms = len(term_dict)
x = sp.csc_matrix((1, num_terms))
for t in terms:
if t in term_dict:
idx = term_dict[t]
x[0, idx] = 1
return x
all_terms_bcast = sc.broadcast(all_terms_dict)
term_vectors = title_terms.map(lambda
terms: create_vector(terms, all_terms_bcast.value))
print
print("The first five terms of converted sparse matrix of title")
print(term_vectors.take(5))
## 4. Feature normalization
np.random.seed(42)
x = np.random.randn(4)
norm_x = np.linalg.norm(x)
normalized_x = x / norm_x
print
print("x: %s" % x)
print("2-norm of x: %.4f" % norm_x)
print("normalized x: %s" % normalized_x)
normalizer = Normalizer()
vector = sc.parallelize([x])
normalized_x_mllib = normalizer.transform(vector).first().toArray()
print("MLlib normalized x: %s" % normalized_x)
| {
"repo_name": "luseiee/machineLearningWithSpark",
"path": "chapter03/movielens_feature.py",
"copies": "1",
"size": "3692",
"license": "mit",
"hash": -2934139508151443500,
"line_mean": 33.5046728972,
"line_max": 72,
"alpha_frac": 0.6971830986,
"autogenerated": false,
"ratio": 3.036184210526316,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9165828340146873,
"avg_score": 0.013507793795888533,
"num_lines": 107
} |
"""$<ticker symbol> for a quote on a stock price"""
from __future__ import print_function
import logging
import re
from urllib import quote
from bs4 import BeautifulSoup
import requests
logger = logging.getLogger(__name__)
def stockprice(ticker):
url = "https://www.google.com/finance?q={0}"
soup = BeautifulSoup(requests.get(url.format(quote(ticker))).text)
try:
company, ticker = re.findall(u"^(.+?)\xa0\xa0(.+?)\xa0", soup.text, re.M)[0]
except IndexError:
logging.info("Unable to find stock {0}".format(ticker))
return ""
price = soup.select("#price-panel .pr span")[0].text
change, pct = soup.select("#price-panel .nwp span")[0].text.split()
pct.strip('()')
emoji = ":chart_with_upwards_trend:" if change.startswith("+") else ":chart_with_downwards_trend:"
return "{0} {1} {2}: {3} {4} {5} {6}".format(emoji, company, ticker, price, change, pct, emoji)
def on_message(msg, server):
text = msg.get("text", "")
matches = re.findall(r"\$[a-zA-Z]\w{0,3}", text)
if not matches:
return
prices = [stockprice(ticker[1:].encode("utf8")) for ticker in matches]
return "\n".join(p for p in prices if p)
| {
"repo_name": "shawnsi/limbo",
"path": "limbo/plugins/stock.py",
"copies": "2",
"size": "1195",
"license": "mit",
"hash": 4095378867482222600,
"line_mean": 31.2972972973,
"line_max": 102,
"alpha_frac": 0.6334728033,
"autogenerated": false,
"ratio": 3.2037533512064345,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9827342355801962,
"avg_score": 0.001976759740894521,
"num_lines": 37
} |
"""$<ticker symbol> for a quote on a stock price"""
from __future__ import print_function
import logging
import re
try:
from urllib import quote
except ImportError:
from urllib.request import quote
from bs4 import BeautifulSoup
import requests
logger = logging.getLogger(__name__)
def stockprice(ticker):
url = "https://www.google.com/finance?q={0}"
soup = BeautifulSoup(requests.get(url.format(quote(ticker))).text, "html5lib")
try:
company, ticker = re.findall(u"^(.+?)\xa0\xa0(.+?)\xa0", soup.text, re.M)[0]
except IndexError:
logging.info("Unable to find stock {0}".format(ticker))
return ""
price = soup.select("#price-panel .pr span")[0].text
change, pct = soup.select("#price-panel .nwp span")[0].text.split()
pct.strip('()')
emoji = ":chart_with_upwards_trend:" if change.startswith("+") else ":chart_with_downwards_trend:"
return "{0} {1} {2}: {3} {4} {5} {6}".format(emoji, company, ticker, price, change, pct, emoji)
def on_message(msg, server):
text = msg.get("text", "")
matches = re.findall(r"\$[a-zA-Z]\w{0,3}", text)
if not matches:
return
prices = [stockprice(ticker[1:].encode("utf8")) for ticker in matches]
return "\n".join(p for p in prices if p)
| {
"repo_name": "uilab-github/slask",
"path": "limbo/plugins/stock.py",
"copies": "2",
"size": "1273",
"license": "mit",
"hash": -8679607974672437000,
"line_mean": 30.825,
"line_max": 102,
"alpha_frac": 0.638648861,
"autogenerated": false,
"ratio": 3.247448979591837,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9875449302693815,
"avg_score": 0.0021297075796045404,
"num_lines": 40
} |
"""$<ticker symbol> for a quote on a stock price"""
from __future__ import print_function
import re
import requests
from bs4 import BeautifulSoup
from urllib import quote
import json
def stockprice(ticker):
return "not implemented, go away"
# print ('tickers')
# url = "http://finance.yahoo.com/webservice/v1/symbols/{0}/quote?format=json"
# # print(url.format(quote(ticker)))
# # print (url.format(ticker))
# # soup = BeautifulSoup(requests.get(url.format(quote(ticker))).text)
# j = requests.get(url.format(quote(ticker)))
# # print (soup.text)
# # try:
# # company, ticker = re.findall(u"^(.+?)\xa0\xa0(.+?)\xa0", soup.text, re.M)[0]
# # price = soup.select("#price-panel .pr span")[0].text
# # change, pct = soup.select("#price-panel .nwp span")[0].text.split()
# # pct.strip('()')
# try:
# # print (j)
# data = j.text
# print (data)
# print ('------------------------------')
# resources = data["resources"]
# print (resources)
# company = resources[0]["name"]
# price = resources[0]["price"]
# print ('------------------------------')
# print (resources["resource"])
# # print (company)
# # print (price)
# # print ('the resources')
# # print (resources)
# change = ""
# emoji = ""
# pct = ""
# emoji = ":chart_with_upwards_trend:" if change.startswith("+") else ":chart_with_downwards_trend:"
# return "{0} {1} {2}: {3} {4} {5} {6}".format(emoji, company, ticker, price, change, pct, emoji)
# except Exception as e:
# return ""
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"\$\w{0,4}$", text)
if not match:
return
# prices = [stockprice(ticker[1:]) for ticker in match]
# return "\n".join(p for p in prices if p)
return "not implemented go away"
| {
"repo_name": "BobbyJohansen/BillBot",
"path": "plugins/stock.py",
"copies": "1",
"size": "1964",
"license": "mit",
"hash": -633237891168741000,
"line_mean": 32.8620689655,
"line_max": 108,
"alpha_frac": 0.5366598778,
"autogenerated": false,
"ratio": 3.33446519524618,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43711250730461804,
"avg_score": null,
"num_lines": null
} |
"""$<ticker symbol> for a quote on a stock price"""
from __future__ import print_function
import re
import requests
from bs4 import BeautifulSoup
from urllib import quote
def stockprice(ticker):
url = "https://www.google.com/finance?q={0}"
print(url.format(quote(ticker)))
soup = BeautifulSoup(requests.get(url.format(quote(ticker))).text)
try:
company, ticker = re.findall(u"^(.+?)\xa0\xa0(.+?)\xa0", soup.text, re.M)[0]
price = soup.select("#price-panel .pr span")[0].text
change, pct = soup.select("#price-panel .nwp span")[0].text.split()
pct.strip('()')
emoji = ":chart_with_upwards_trend:" if change.startswith("+") else ":chart_with_downwards_trend:"
return "{0} {1} {2}: {3} {4} {5} {6}".format(emoji, company, ticker, price, change, pct, emoji)
except Exception as e:
return ""
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"\$\w{0,4}", text)
if not match: return
prices = [stockprice(ticker[1:]) for ticker in match]
return "\n".join(p for p in prices if p)
| {
"repo_name": "NUKnightLab/slask",
"path": "plugins/_stock.py",
"copies": "3",
"size": "1102",
"license": "mit",
"hash": 5227433619215207000,
"line_mean": 32.3939393939,
"line_max": 106,
"alpha_frac": 0.6206896552,
"autogenerated": false,
"ratio": 3.2128279883381925,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5333517643538193,
"avg_score": null,
"num_lines": null
} |
"""$<ticker symbol> for a quote on a stock price"""
import re
import requests
from bs4 import BeautifulSoup
from urllib import quote
def stockprice(ticker):
url = "https://www.google.com/finance?q={}"
print url.format(quote(ticker))
soup = BeautifulSoup(requests.get(url.format(quote(ticker))).text)
try:
company, ticker = re.findall(u"^(.+?)\xa0\xa0(.+?)\xa0", soup.text, re.M)[0]
price = soup.select("#price-panel .pr span")[0].text
change, pct = soup.select("#price-panel .nwp span")[0].text.split()
pct.strip('()')
emoji = ":chart_with_upwards_trend:" if change.startswith("+") else ":chart_with_downwards_trend:"
return "{} {} {}: {} {} {} {}".format(emoji, company, ticker, price, change, pct, emoji)
except Exception as e:
return ""
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"\$\w{0,4}", text)
if not match: return
prices = [stockprice(ticker[1:]) for ticker in match]
return "\n".join(p for p in prices if p)
| {
"repo_name": "kesre/slask",
"path": "plugins/stock.py",
"copies": "4",
"size": "1053",
"license": "mit",
"hash": -8836745091303084000,
"line_mean": 34.1,
"line_max": 106,
"alpha_frac": 0.6144349478,
"autogenerated": false,
"ratio": 3.311320754716981,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5925755702516982,
"avg_score": null,
"num_lines": null
} |
# $ ./triton ./src/testers/qemu-test-x86_64.py ./src/samples/ir_test_suite/qemu-test-x86_64
from __future__ import print_function
from triton import *
import pintool as Pintool
# Get the Triton context over the pintool
Triton = Pintool.getTritonContext()
def sbefore(instruction):
Triton.concretizeAllRegister()
Triton.concretizeAllMemory()
return
def cafter(instruction):
ofIgnored = [
OPCODE.X86.RCL,
OPCODE.X86.RCR,
OPCODE.X86.ROL,
OPCODE.X86.ROR,
OPCODE.X86.SAR,
OPCODE.X86.SHL,
OPCODE.X86.SHLD,
OPCODE.X86.SHR,
OPCODE.X86.SHRD,
]
bad = list()
regs = Triton.getParentRegisters()
for reg in regs:
cvalue = Pintool.getCurrentRegisterValue(reg)
se = Triton.getSymbolicRegister(reg)
if se is None:
continue
expr = se.getAst()
svalue = expr.evaluate()
# Check register
if cvalue != svalue:
if reg.getName() == 'of' and instruction.getType() in ofIgnored:
continue
bad.append({
'reg': reg.getName(),
'svalue': svalue,
'cvalue': cvalue,
'expr': se.getAst()
})
if bad:
dump = '[KO] %#x: %s (%d register error(s))' %(instruction.getAddress(), instruction.getDisassembly(), len(bad))
for w in bad:
dump += '\n Register : %s' %(w['reg'])
dump += '\n Symbolic Value : %016x' %(w['svalue'])
dump += '\n Concrete Value : %016x' %(w['cvalue'])
dump += '\n Expression : %s' %(w['expr'])
print(dump)
with open('./semantics_issues', 'a') as fd:
fd.write(dump+'\n')
if len(instruction.getSymbolicExpressions()) == 0:
dump = '[unsupported] %#x: %s' %(instruction.getAddress(), instruction.getDisassembly())
print(dump)
with open('./semantics_issues', 'a') as fd:
fd.write(dump+'\n')
return
return
if __name__ == '__main__':
Pintool.setupImageWhitelist(['qemu-test-x86_64'])
Pintool.startAnalysisFromSymbol('main')
Pintool.insertCall(cafter, Pintool.INSERT_POINT.AFTER)
Pintool.insertCall(sbefore, Pintool.INSERT_POINT.BEFORE_SYMPROC)
Pintool.runProgram()
| {
"repo_name": "JonathanSalwan/Triton",
"path": "src/testers/pin/qemu-test-x86_64.py",
"copies": "1",
"size": "2370",
"license": "apache-2.0",
"hash": 1174026272070207000,
"line_mean": 25.9318181818,
"line_max": 120,
"alpha_frac": 0.5544303797,
"autogenerated": false,
"ratio": 3.356940509915014,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9391679486467528,
"avg_score": 0.003938280629497117,
"num_lines": 88
} |
# $URL$
# $Rev$
# PyPNG documentation build configuration file, created by
# sphinx-quickstart on Mon Mar 16 13:12:26 2009.
# Then brutally hacked down by drj.
# See http://sphinx.pocoo.org/config.html
import sys, os
# So that local modules get picked up, and picked up first.
sys.path.insert(0, os.path.abspath('../code'))
# So that setup.py can be picked up and used for its conf member
sys.path.insert(0, os.path.abspath('..'))
# Expecting to find ../setup.py
from setup import conf
# General configuration
# ---------------------
extensions = ['sphinx.ext.autodoc']
templates_path = []
source_suffix = '.rst'
master_doc = 'index'
project = u'PyPNG'
copyright = u'2009, ' + conf['author']
release = conf['version']
version = release[:release.rfind('.')]
language='en'
today_fmt = '%Y-%m-%d'
exclude_trees = ['build']
# Options for HTML output
# -----------------------
html_static_path = []
html_last_updated_fmt = '%Y-%m-%dT%H:%M:%S'
htmlhelp_basename = 'PyPNGdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'PyPNG.tex', ur'PyPNG Documentation',
ur'David Jones', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# http://sphinx.pocoo.org/ext/autodoc.html?highlight=__init__
autoclass_content='both'
| {
"repo_name": "mnaberez/pypng",
"path": "man/conf.py",
"copies": "1",
"size": "1621",
"license": "mit",
"hash": 8965899757170787000,
"line_mean": 24.328125,
"line_max": 81,
"alpha_frac": 0.6588525601,
"autogenerated": false,
"ratio": 3.166015625,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9299488361793364,
"avg_score": 0.005075964661327027,
"num_lines": 64
} |
# $URL: http://pypng.googlecode.com/svn/trunk/setup.py $
# $Rev: 186 $
# PyPNG setup.py
# This is the setup.py script used by disutils.
# You can install the png module into your Python distribution with:
# python setup.py install
# You can also do other standard disutil type things, but you can refer
# to the disutil documentation for that.
# This script is also imported as a module by the Sphinx conf.py script
# in the man directory, so that this file forms a single source for
# metadata.
conf = dict(
name='pypng',
version='0.0.9',
description='Pure Python PNG image encoder/decoder',
long_description="""
PyPNG allows PNG image files to be read and written using pure Python.
It's available from Google code:
http://code.google.com/p/pypng/downloads/list
Documentation is kindly hosted at python.org:
http://packages.python.org/pypng/
(and also available in the download tarball).
""",
author='David Jones',
author_email='drj@pobox.com',
url='http://code.google.com/p/pypng/',
package_dir={'':'code'},
py_modules=['png'],
classifiers=[
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python',
'Programming Language :: Python :: 2.3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
],
)
conf['download_url'] = \
'http://pypng.googlecode.com/files/%(name)s-%(version)s.tar.gz' % conf
if __name__ == '__main__':
from distutils.core import setup
setup(**conf)
| {
"repo_name": "evast/XNATImageViewer",
"path": "src/main/scripts/viewer/X/lib/pypng-0.0.9/setup.py",
"copies": "5",
"size": "1577",
"license": "bsd-3-clause",
"hash": 7596583590326448000,
"line_mean": 31.1836734694,
"line_max": 72,
"alpha_frac": 0.675967026,
"autogenerated": false,
"ratio": 3.650462962962963,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007037297677691766,
"num_lines": 49
} |
# $URL: https://pyisapie.svn.sourceforge.net/svnroot/pyisapie/Tags/1.1.0-rc4/PyISAPIe/Python/Examples/Django/Isapi.py $
# $Rev: 162 $ $Date: 2009-05-28 13:37:33 -0700 (Thu, 28 May 2009) $
# (C)2008 Phillip Sitbon <phillip@sitbon.net>
#
"""Global ISAPI request handler.
This serves up the Django site 'mysite' via WSGI. Currently the core
handler pyisapie.py is not up to date.
"""
from django.core.handlers.wsgi import WSGIHandler as DjangoHandler
from Http.WSGI import RunWSGI
from Http import Env
import os
# Change this!
os.environ["DJANGO_SETTINGS_MODULE"] = "WWW.mysite.settings"
# This is how the WSGI module determines what part of the path
# SCRIPT_NAME should consist of. If you configure PyISAPIe as
# a wildcard map on the root of your site, you can leave this
# value as-is.
#
Base = "/"
# This is an example of what paths might need to be handled by
# other parts of IIS that still come here first. This value's
# default of "/media" assumes that you've mapped a virtual
# directory to Django's admin media folder and so expect the
# files to be served by the static file handler.
#
Exclude = ["/media"]
# The main request handler.
# This object can be re-created for every request if desired.
#
Handler = DjangoHandler()
def Request():
PathInfo = Env.PATH_INFO
# There is no way to test if this ISAPI extension is configured
# as a wildcard handler, so this script will fail if it is not.
# If you'd rather have it as a script map, remove the checks below.
#
# You can also remove it if you set up this instance as a handler
# for a virtual directory and know that Base will always start
# with it. For example, if "/django_site_1" is the virtual directory
# you're running in, and Base is set to the same value, no need
# to ever pass control away from this handler.
# Pass through to the next handler if it's not
# part of our Django app.
#
if not PathInfo.startswith(Base):
return True
# Check for anything we know shouldn't be handled by Django and
# pass it back to IIS, which in most cases sends it to the static
# file handler.
#
for Excl in Exclude:
if PathInfo.startswith(Excl):
return True
return RunWSGI(Handler, Base=Base) | {
"repo_name": "babyliynfg/cross",
"path": "tools/project-creator/Python2.6.6/Lib/Django/Isapi.py",
"copies": "1",
"size": "2281",
"license": "mit",
"hash": -8951016337439244000,
"line_mean": 33.1230769231,
"line_max": 119,
"alpha_frac": 0.7049539676,
"autogenerated": false,
"ratio": 3.482442748091603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4687396715691603,
"avg_score": null,
"num_lines": null
} |
# $URL: https://pyisapie.svn.sourceforge.net/svnroot/pyisapie/Tags/1.1.0-rc4/PyISAPIe/Python/Examples/Django/pyisapie.py $
# $Rev: 91 $ $Date: 2008-01-10 23:53:59 -0800 (Thu, 10 Jan 2008) $
# (C)2008 Phillip Sitbon <phillip@sitbon.net>
#
"""Custom request handler for PyISAPIe.
"""
from django.core.handlers.base import BaseHandler
from django.core import signals
from django.dispatch import dispatcher
from django.utils import datastructures
from django import http
import os
from Http import *
class PyISAPIeRequest(http.HttpRequest):
def __init__(This):
This.method = Env.REQUEST_METHOD.upper()
This.path = Env.URL
# aren't list comprehensions fun?!
This._headers_in = \
dict((N.lower(), V) for N, V in [Item.split(': ',1) for Item in Env.ALL_RAW.split('\r\n') if Item])
def get_full_path(This):
Qs = Env.QUERY_STRING
return '%s%s' % (This.path, Qs and ('?' + Qs) or '')
def _load_post_and_files(This):
"Populates This._post and This._files"
Ctype = Env.HTTP_CONTENT_TYPE or ''
if Ctype.startswith('multipart'):
This._post, This._files = http.parse_file_upload(This._headers_in, This.raw_post_data)
else:
This._post, This._files = http.QueryDict(This.raw_post_data), datastructures.MultiValueDict()
def _get_request(This):
if not hasattr(This, '_request'):
This._request = datastructures.MergeDict(This.POST, This.GET)
return This._request
def _get_get(This):
if not hasattr(This, '_get'):
This._get = http.QueryDict(Env.QUERY_STRING)
return This._get
def _set_get(This, get):
This._get = get
def _get_post(This):
if not hasattr(This, '_post'):
This._load_post_and_files()
return This._post
def _set_post(This, post):
This._post = post
def _get_cookies(This):
if not hasattr(This, '_cookies'):
This._cookies = http.parse_cookie(This._headers_in.get('cookie', ''))
return This._cookies
def _set_cookies(This, cookies):
This._cookies = cookies
def _get_files(This):
if not hasattr(This, '_files'):
This._load_post_and_files()
return This._files
def _get_meta(This):
"Lazy loader that returns This.META dictionary"
if not hasattr(This, '_meta'):
This._meta = {
'AUTH_TYPE': Env.AUTH_TYPE,
'CONTENT_LENGTH': Env.CONTENT_LENGTH,
'CONTENT_TYPE': Env.CONTENT_TYPE,
'GATEWAY_INTERFACE': Env.GATEWAY_INTERFACE,
'PATH_INFO': Env.PATH_INFO,
'PATH_TRANSLATED': Env.PATH_TRANSLATED,
'QUERY_STRING': Env.QUERY_STRING,
'REMOTE_ADDR': Env.REMOTE_ADDR,
'REMOTE_HOST': Env.REMOTE_HOST,
'REMOTE_IDENT': None,
'REMOTE_USER': Env.REMOTE_USER,
'REQUEST_METHOD': Env.REQUEST_METHOD,
'SCRIPT_NAME': Env.SCRIPT_NAME,
'SERVER_NAME': Env.SERVER_NAME,
'SERVER_PORT': Env.SERVER_PORT,
'SERVER_PROTOCOL': Env.SERVER_PROTOCOL,
'SERVER_SOFTWARE': Env.SERVER_SOFTWARE
}
for key, value in This._headers_in.items():
key = 'HTTP_' + key.upper().replace('-', '_')
This._meta[key] = value
return This._meta
def _get_raw_post_data(This):
try:
return This._raw_post_data
except AttributeError:
This._raw_post_data = Read()
return This._raw_post_data
def _get_user(This):
if not hasattr(This, '_user'):
from django.models.auth import users
try:
user_id = This.session[users.SESSION_KEY]
if not user_id:
raise ValueError
This._user = users.get_object(pk=user_id)
except (AttributeError, KeyError, ValueError, users.UserDoesNotExist):
from django.parts.auth import anonymoususers
This._user = anonymoususers.AnonymousUser()
return This._user
def _set_user(This, user):
This._user = user
GET = property(_get_get, _set_get)
POST = property(_get_post, _set_post)
COOKIES = property(_get_cookies, _set_cookies)
FILES = property(_get_files)
META = property(_get_meta)
REQUEST = property(_get_request)
raw_post_data = property(_get_raw_post_data)
user = property(_get_user, _set_user)
class PyISAPIeHandler(BaseHandler):
def __call__(This):
from django.conf import settings
if This._request_middleware is None:
This.load_middleware()
dispatcher.send(signal=signals.request_started)
try:
request = PyISAPIeRequest()
response = This.get_response(request)
# Apply response middleware
for middleware_method in This._response_middleware:
response = middleware_method(request, response)
finally:
dispatcher.send(signal=signals.request_finished)
process_response(response)
def process_response(http_response):
from django.conf import settings
for itm in http_response.items():
Header("%s: %s" % itm)
for c in http_response.cookies.values():
Header('Set-Cookie: ' + c.output(header=''))
Header(Status = http_response.status_code)
for chunk in http_response:
Write(chunk)
| {
"repo_name": "babyliynfg/cross",
"path": "tools/project-creator/Python2.6.6/Lib/Django/pyisapie.py",
"copies": "1",
"size": "5817",
"license": "mit",
"hash": -2800554447685484000,
"line_mean": 33.6871165644,
"line_max": 122,
"alpha_frac": 0.5580195977,
"autogenerated": false,
"ratio": 3.896182183523108,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4954201781223108,
"avg_score": null,
"num_lines": null
} |
"""$URL: svn+ssh://svn.mems-exchange.org/repos/trunk/durus/connection.py $
$Id$
"""
import sys
from schevo.lib import optimize
from cPickle import loads
from heapq import heappush, heappop
from schevo.store.error import ConflictError, ReadConflictError, DurusKeyError
from schevo.store.logger import log
from schevo.store.persistent import ConnectionBase
from schevo.store.persistent_dict import PersistentDict
from schevo.store.serialize import ObjectReader, ObjectWriter
from schevo.store.serialize import split_oids, unpack_record, pack_record
from schevo.store.storage import Storage
from schevo.store.utils import p64
from itertools import islice, chain
from os import getpid
from time import time
from weakref import ref, KeyedRef
ROOT_OID = p64(0)
class Connection(ConnectionBase):
"""
The Connection manages movement of objects in and out of storage.
Instance attributes:
storage: Storage
cache: Cache
reader: ObjectReader
changed: {oid:str : Persistent}
invalid_oids: set([str])
Set of oids of objects known to have obsolete state.
transaction_serial: int
Number of calls to commit() or abort() since this instance was created.
This is used to maintain consistency, and to implement LRU replacement
in the cache.
"""
def __init__(self, storage, cache_size=100000):
"""(storage:Storage, cache_size:int=100000)
Make a connection to `storage`.
Set the target number of non-ghosted persistent objects to keep in
the cache at `cache_size`.
"""
assert isinstance(storage, Storage)
self.storage = storage
self.reader = ObjectReader(self)
self.changed = {}
self.invalid_oids = set()
try:
storage.load(ROOT_OID)
except KeyError:
self.storage.begin()
writer = ObjectWriter(self)
data, refs = writer.get_state(PersistentDict())
writer.close()
self.storage.store(ROOT_OID, pack_record(ROOT_OID, data, refs))
self.storage.end(self._handle_invalidations)
self.transaction_serial += 1
self.new_oid = storage.new_oid # needed by serialize
self.cache = Cache(cache_size)
def get_storage(self):
"""() -> Storage"""
return self.storage
def get_cache_count(self):
"""() -> int
Return the number of Persistent instances currently in the cache.
"""
return self.cache.get_count()
def get_cache_size(self):
"""() -> cache_size:int
Return the target size for the cache.
"""
return self.cache.get_size()
def set_cache_size(self, size):
"""(size:int)
Set the target size for the cache.
"""
self.cache.set_size(size)
def get_transaction_serial(self):
"""() -> int
Return the number of calls to commit() or abort() on this instance.
"""
return self.transaction_serial
def get_root(self):
"""() -> Persistent
Returns the root object.
"""
return self.get(ROOT_OID)
def get_stored_pickle(self, oid):
"""(oid:str) -> str
Retrieve the pickle from storage. Will raise ReadConflictError if
pickle the pickle is invalid.
"""
if oid in self.invalid_oids:
# someone is still trying to read after getting a conflict
raise ReadConflictError([oid])
try:
record = self.storage.load(oid)
except ReadConflictError:
invalid_oids = self.storage.sync()
self._handle_invalidations(invalid_oids, read_oid=oid)
record = self.storage.load(oid)
oid2, data, refdata = unpack_record(record)
assert oid == oid2
return data
def get(self, oid):
"""(oid:str|int|long) -> Persistent | None
Return object for `oid`.
The object may be a ghost.
"""
if type(oid) is not str:
oid = p64(oid)
obj = self.cache.get(oid)
if obj is not None:
return obj
try:
pickle = self.get_stored_pickle(oid)
except KeyError:
return None
klass = loads(pickle)
obj = self.cache.get_instance(oid, klass, self)
return obj
__getitem__ = get
def get_crawler(self, start_oid=ROOT_OID, batch_size=100):
"""(start_oid:str = ROOT_OID, batch_size:int = 100) ->
sequence(Persistent)
Returns a generator for the sequence of objects in a breadth first
traversal of the object graph, starting at the given start_oid.
The objects in the sequence have their state loaded at the same time,
so this can be used to initialize the object cache.
This uses the storage's bulk_load() method to make it faster. The
batch_size argument sets the number of object records loaded on each
call to bulk_load().
"""
def get_object_and_refs(object_record):
oid, data, refdata = unpack_record(object_record)
obj = self.cache.get(oid)
if obj is None:
klass = loads(data)
obj = self.cache.get_instance(oid, klass, self)
state = self.reader.get_state(data, load=True)
obj.__setstate__(state)
obj._p_set_status_saved()
elif obj._p_is_ghost():
state = self.reader.get_state(data, load=True)
obj.__setstate__(state)
obj._p_set_status_saved()
return obj, split_oids(refdata)
queue = [start_oid]
seen = set()
while queue:
batch = queue[:batch_size]
queue = queue[batch_size:]
seen.update(batch)
for record in self.storage.bulk_load(batch):
obj, refs = get_object_and_refs(record)
for ref in refs:
if ref not in seen:
queue.append(ref)
yield obj
def get_cache(self):
return self.cache
def load_state(self, obj):
"""(obj:Persistent)
Load the state for the given ghost object.
"""
assert self.storage is not None, 'connection is closed'
assert obj._p_is_ghost()
oid = obj._p_oid
setstate = obj.__setstate__
try:
pickle = self.get_stored_pickle(oid)
except DurusKeyError:
# We have a ghost but cannot find the state for it. This can
# happen if the object was removed from the storage as a result
# of packing.
raise ReadConflictError([oid])
state = self.reader.get_state(pickle)
setstate(state)
def note_access(self, obj):
assert obj._p_connection is self
assert obj._p_oid is not None
obj._p_serial = self.transaction_serial
self.cache.recent_objects.add(obj)
def note_change(self, obj):
"""(obj:Persistent)
This is done when any persistent object is changed. Changed objects
will be stored when the transaction is committed or rolled back, i.e.
made into ghosts, on abort.
"""
# assert obj._p_connection is self
self.changed[obj._p_oid] = obj
def shrink_cache(self):
"""
If the number of saved and unsaved objects is more than
twice the target cache size (and the target cache size is positive),
try to ghostify enough of the saved objects to achieve
the target cache size.
"""
self.cache.shrink(self)
def _sync(self):
"""
Process all invalid_oids so that all non-ghost objects are current.
"""
invalid_oids = self.storage.sync()
self.invalid_oids.update(invalid_oids)
for oid in self.invalid_oids:
obj = self.cache.get(oid)
if obj is not None:
obj._p_set_status_ghost()
self.invalid_oids.clear()
def abort(self):
"""
Abort uncommitted changes, sync, and try to shrink the cache.
"""
for oid, obj in self.changed.iteritems():
obj._p_set_status_ghost()
self.changed.clear()
self._sync()
self.shrink_cache()
self.transaction_serial += 1
def commit(self):
"""
If there are any changes, try to store them, and
raise ConflictError if there are any invalid oids saved
or if there are any invalid oids for non-ghost objects.
"""
if not self.changed:
self._sync()
else:
if self.invalid_oids:
# someone is trying to commit after a read or write conflict
raise ConflictError(list(self.invalid_oids))
self.storage.begin()
new_objects = {}
for oid, changed_object in self.changed.iteritems():
writer = ObjectWriter(self)
try:
for obj in writer.gen_new_objects(changed_object):
oid = obj._p_oid
if oid in new_objects:
continue
elif oid not in self.changed:
new_objects[oid] = obj
self.cache[oid] = obj
data, refs = writer.get_state(obj)
self.storage.store(oid, pack_record(oid, data, refs))
obj._p_set_status_saved()
finally:
writer.close()
try:
self.storage.end(self._handle_invalidations)
except ConflictError, exc:
for oid, obj in new_objects.iteritems():
obj._p_oid = None
del self.cache[oid]
obj._p_set_status_unsaved()
obj._p_connection = None
obj._p_ref = None
raise
self.changed.clear()
self.shrink_cache()
self.transaction_serial += 1
def _handle_invalidations(self, oids, read_oid=None):
"""(oids:[str], read_oid:str=None)
Check if any of the oids are for objects that were accessed during
this transaction. If so, raise the appropriate conflict exception.
"""
conflicts = []
for oid in oids:
obj = self.cache.get(oid)
if obj is None:
continue
if not obj._p_is_ghost():
self.invalid_oids.add(oid)
if obj._p_serial == self.transaction_serial:
conflicts.append(oid)
if conflicts:
if read_oid is None:
raise ConflictError(conflicts)
else:
raise ReadConflictError([read_oid])
def pack(self):
"""Clear any uncommited changes and pack the storage."""
self.abort()
self.storage.pack()
class ObjectDictionary (object):
def __init__(self):
self.mapping = {}
self.dead = set()
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
self.dead.add(wr.key)
self.remove = remove
def get(self, key, default=None):
ref = self.mapping.get(key, None)
if ref is not None:
value = ref()
if value is not None and key not in self.dead:
return value
return default
def __setitem__(self, key, value):
self.dead.discard(key)
self.mapping[key] = KeyedRef(value, self.remove, key)
def __delitem__(self, key):
self.dead.add(key)
def __contains__(self, key):
return self.get(key, None) is not None
def __len__(self):
return len(self.mapping) - len(self.dead)
def __iter__(self):
while self.dead:
self.mapping.pop(self.dead.pop(), None)
for key in self.mapping:
if key not in self.dead:
yield key
class Cache(object):
def __init__(self, size):
self.objects = ObjectDictionary()
self.recent_objects = set()
self.set_size(size)
self.finger = 0
def get_size(self):
"""Return the target size of the cache."""
return self.size
def get_count(self):
"""Return the number of objects currently in the cache."""
return len(self.objects)
def set_size(self, size):
if size <= 0:
raise ValueError, 'cache target size must be > 0'
self.size = size
def get_instance(self, oid, klass, connection):
"""
This returns the existing object with the given oid, or else it makes
a new one with the given class and connection.
This method is called when unpickling a reference, which may happen at
a high frequency, so it needs to be fast. For the sake of speed, it
inlines some statements that would normally be executed through calling
other functions.
"""
# if self.get(oid) is not None: return self.get(oid)
objects = self.objects
obj = objects.get(oid)
if obj is None:
# Make a new ghost.
obj = klass.__new__(klass)
obj._p_oid = oid
obj._p_connection = connection
obj._p_status = -1 # obj._p_set_status_ghost()
objects[oid] = obj
return obj
def get(self, oid):
return self.objects.get(oid)
def __setitem__(self, key, obj):
assert key not in self.objects or self.objects[key] is obj
self.objects[key] = obj
def __delitem__(self, key):
obj = self.objects.get(key)
if obj is not None:
self.recent_objects.discard(obj)
assert obj._p_oid is None
del self.objects[key]
def _build_heap(self, transaction_serial):
"""(transaction_serial:int) -> [(serial, oid)]
"""
all = self.objects
heap_size_target = (len(all) - self.size) * 2
start = self.finger % len(all)
heap = []
for oid in islice(chain(all, all), start, start + len(all)):
self.finger += 1
obj = all.get(oid)
if obj is None:
continue # The ref is dead.
if obj._p_serial == transaction_serial:
continue # obj is current. Leave it alone.
heappush(heap, (obj._p_serial, oid))
if len(heap) >= heap_size_target:
break
self.finger = self.finger % len(all)
return heap
def shrink(self, connection):
"""(connection:Connection)
Try to reduce the size of self.objects.
"""
current = len(self.objects)
if current <= self.size:
# No excess.
log(10, '[%s] cache size %s recent %s',
getpid(), current, len(self.recent_objects))
return
start_time = time()
heap = self._build_heap(connection.get_transaction_serial())
num_ghosted = 0
while heap and len(self.objects) > self.size:
serial, oid = heappop(heap)
obj = self.objects.get(oid)
if obj is None:
continue
if obj._p_is_saved():
obj._p_set_status_ghost()
num_ghosted += 1
self.recent_objects.discard(obj)
log(10, '[%s] shrink %fs removed %s ghosted %s size %s recent %s',
getpid(), time() - start_time, current - len(self.objects),
num_ghosted, len(self.objects), len(self.recent_objects))
def touch_every_reference(connection, *words):
"""(connection:Connection, *words:(str))
Mark as changed, every object whose pickled class/state contains any
of the given words. This is useful when you move or rename a class,
so that all references can be updated.
"""
get = connection.get
reader = ObjectReader(connection)
for oid, record in connection.get_storage().gen_oid_record():
record_oid, data, refs = unpack_record(record)
state = reader.get_state_pickle(data)
for word in words:
if word in data or word in state:
get(oid)._p_note_change()
def gen_every_instance(connection, *classes):
"""(connection:Connection, *classes:(class)) -> sequence [Persistent]
Generate all Persistent instances that are instances of any of the
given classes."""
for oid, record in connection.get_storage().gen_oid_record():
record_oid, state, refs = unpack_record(record)
record_class = loads(state)
if issubclass(record_class, classes):
yield connection.get(oid)
optimize.bind_all(sys.modules[__name__]) # Last line of module.
| {
"repo_name": "Schevo/schevo",
"path": "schevo/store/connection.py",
"copies": "1",
"size": "16905",
"license": "mit",
"hash": 6348602136280125000,
"line_mean": 33.8556701031,
"line_max": 79,
"alpha_frac": 0.5664596273,
"autogenerated": false,
"ratio": 4.148466257668711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00027600105403216205,
"num_lines": 485
} |
"""$URL: svn+ssh://svn.mems-exchange.org/repos/trunk/durus/error.py $
$Id$
"""
from schevo.lib import optimize
from schevo.store.utils import format_oid
class DurusError(StandardError):
"""Durus error."""
class DurusKeyError(KeyError, DurusError):
"""Key not found in database."""
def __str__(self):
return format_oid(self.args[0])
class InvalidObjectReference(DurusError):
"""
An object contains an invalid reference to another object.
A reference is invalid if it refers to an object managed
by a different database connection.
Instance attributes:
obj: Persistent
is the object for which the reference is invalid.
connection: Connection
the connection that attempted to store it.
obj._p_connection != connection
"""
def __init__(self, obj, connection):
self.obj = obj
self.connection = connection
def __str__(self):
return "Invalid reference to %r with a connection %r." % (
self.obj,
self.obj._p_connection)
class ConflictError(DurusError):
"""
Two transactions tried to modify the same object at once.
This transaction should be resubmitted.
The object passed to the constructor should be an instance of Persistent.
"""
def __init__(self, oids):
self.oids = oids
def __str__(self):
if len(self.oids) > 1:
s = "oids=[%s ...]"
else:
s = "oids=[%s]"
return s % format_oid(self.oids[0])
class ReadConflictError(ConflictError):
"""
Conflict detected when object was loaded.
An attempt was made to read an object that has changed in another
transaction (eg. another process).
"""
class ProtocolError(DurusError):
"""
An error occurred during communication between the storage server
and the client.
"""
import sys
optimize.bind_all(sys.modules[__name__]) # Last line of module.
| {
"repo_name": "Schevo/schevo",
"path": "schevo/store/error.py",
"copies": "1",
"size": "1951",
"license": "mit",
"hash": 7480329694719760000,
"line_mean": 24.0128205128,
"line_max": 77,
"alpha_frac": 0.6412096361,
"autogenerated": false,
"ratio": 3.9574036511156185,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5098613287215619,
"avg_score": null,
"num_lines": null
} |
"""$URL: svn+ssh://svn.mems-exchange.org/repos/trunk/durus/serialize.py $
$Id$
"""
from schevo.lib import optimize
import struct
from cPickle import Pickler, Unpickler, loads
from cStringIO import StringIO
from schevo.store.error import InvalidObjectReference
from schevo.store.persistent import Persistent
from schevo.store.utils import p32, u32
from zlib import compress, decompress, error as zlib_error
WRITE_COMPRESSED_STATE_PICKLES = True
def pack_record(oid, data, refs):
"""(oid:str, data:str, refs:str) -> record:str
"""
return ''.join([oid, p32(len(data)), data, refs])
def unpack_record(record):
"""(record:str) -> oid:str, data:str, refs:str
The inverse of pack_record().
"""
oid = record[:8]
data_length = u32(record[8:12])
data_end = 12 + data_length
data = record[12:data_end]
refs = record[data_end:]
return oid, data, refs
def split_oids(s):
"""(s:str) -> [str]
s is a packed string of oids. Return a list of oid strings.
"""
if not s:
return []
num, extra = divmod(len(s), 8)
assert extra == 0, s
fmt = '8s' * num
return list(struct.unpack('>' + fmt, s))
def extract_class_name(record):
oid, state, refs = unpack_record(record)
class_name = state.split('\n', 2)[1]
return class_name
class ObjectWriter(object):
"""
Serializes objects for storage in the database.
The client is responsible for calling the close() method to avoid
leaking memory. The ObjectWriter uses a Pickler internally, and
Pickler objects do not participate in garbage collection.
"""
def __init__(self, connection):
self.sio = StringIO()
self.pickler = Pickler(self.sio, 2)
self.pickler.persistent_id = self._persistent_id
self.objects_found = []
self.refs = set() # populated by _persistent_id()
self.connection = connection
def close(self):
# see ObjectWriter.__doc__
# Explicitly break cycle involving pickler
self.pickler.persistent_id = None
self.pickler = None
def _persistent_id(self, obj):
"""
This function is used by the pickler to test whether an object
is persistent. If the obj is persistent, it returns the oid and type,
otherwise it returns None.
"""
if not isinstance(obj, Persistent):
return None
if obj._p_oid is None:
obj._p_oid = self.connection.new_oid()
obj._p_connection = self.connection
self.objects_found.append(obj)
elif obj._p_connection is not self.connection:
raise InvalidObjectReference(obj, self.connection)
self.refs.add(obj._p_oid)
return obj._p_oid, type(obj)
def gen_new_objects(self, obj):
def once(obj):
raise RuntimeError('gen_new_objects() already called.')
self.gen_new_objects = once
yield obj # The modified object is also a "new" object.
for obj in self.objects_found:
yield obj
def get_state(self, obj):
self.sio.seek(0) # recycle StringIO instance
self.sio.truncate()
self.pickler.clear_memo()
self.pickler.dump(type(obj))
self.refs.clear()
position = self.sio.tell()
self.pickler.dump(obj.__getstate__())
uncompressed = self.sio.getvalue()
pickled_type = uncompressed[:position]
pickled_state = uncompressed[position:]
if WRITE_COMPRESSED_STATE_PICKLES:
state = compress(pickled_state)
else:
state = pickled_state
data = pickled_type + state
self.refs.discard(obj._p_oid)
return data, ''.join(self.refs)
class ObjectReader(object):
def __init__(self, connection):
self.connection = connection
def _get_unpickler(self, file):
connection = self.connection
get_instance = connection.get_cache().get_instance
def persistent_load(oid_klass):
oid, klass = oid_klass
return get_instance(oid, klass, connection)
unpickler = Unpickler(file)
unpickler.persistent_load = persistent_load
return unpickler
def get_ghost(self, data):
klass = loads(data)
instance = klass.__new__(klass)
instance._p_set_status_ghost()
return instance
def get_state(self, data, load=True):
s = StringIO()
s.write(data)
s.seek(0)
unpickler = self._get_unpickler(s)
klass = unpickler.load()
position = s.tell()
if data[s.tell()] == 'x':
# This is almost certainly a compressed pickle.
try:
decompressed = decompress(data[position:])
except zlib_error:
pass # let the unpickler try anyway.
else:
s.write(decompressed)
s.seek(position)
if load:
return unpickler.load()
else:
return s.read()
def get_state_pickle(self, data):
return self.get_state(data, load=False)
import sys
optimize.bind_all(sys.modules[__name__]) # Last line of module.
| {
"repo_name": "Schevo/schevo",
"path": "schevo/store/serialize.py",
"copies": "1",
"size": "5177",
"license": "mit",
"hash": 1066256198992033300,
"line_mean": 30.5670731707,
"line_max": 77,
"alpha_frac": 0.6071083639,
"autogenerated": false,
"ratio": 3.846210995542348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9910384290192504,
"avg_score": 0.008587013849968895,
"num_lines": 164
} |
"""$URL: svn+ssh://svn.mems-exchange.org/repos/trunk/durus/storage.py $
$Id$
"""
import sys
from schevo.lib import optimize
from schevo.store.serialize import (
unpack_record, split_oids, extract_class_name)
from schevo.store.utils import p64
class Storage(object):
"""
This is the interface that Connection requires for Storage.
"""
def __init__(self):
raise RuntimeError("Storage is abstract")
def load(self, oid):
"""Return the record for this oid.
"""
raise NotImplementedError
def begin(self):
"""
Begin a commit.
"""
raise NotImplementedError
def store(self, oid, record):
"""Include this record in the commit underway."""
raise NotImplementedError
def end(self, handle_invalidations=None):
"""Conclude a commit."""
raise NotImplementedError
def sync(self):
"""() -> [oid:str]
Return a list of oids that should be invalidated.
"""
raise NotImplementedError
def gen_oid_record(self):
"""() -> sequence([oid:str, record:str])
"""
raise NotImplementedError
def new_oid(self):
"""() -> oid:str
Return an unused oid. Used by Connection for serializing new persistent
instances.
"""
raise NotImplementedError
def get_packer(self):
"""
Return an incremental packer (a generator).
Used by StorageServer.
"""
raise NotImplementedError
def pack(self):
"""Remove obsolete records from the storage."""
raise NotImplementedError
def get_size(self):
"""() -> int | None
Return the number of objects available, or None if the number is not known.
"""
return None
def bulk_load(self, oids):
"""(oids:sequence(oid:str)) -> sequence(record:str)
"""
for oid in oids:
yield self.load(oid)
def gen_referring_oid_record(storage, referred_oid):
"""(storage:Storage, referred_oid:str) -> sequence([oid:str, record:str])
Generate oid, record pairs for all objects that include a
reference to the `referred_oid`.
"""
for oid, record in storage.gen_oid_record():
if referred_oid in split_oids(unpack_record(record)[2]):
yield oid, record
def gen_oid_class(storage, *classes):
"""(storage:Storage, classes:(str)) ->
sequence([(oid:str, class_name:str)])
Generate a sequence of oid, class_name pairs.
If classes are provided, only output pairs for which the
class_name is in `classes`.
"""
for oid, record in storage.gen_oid_record():
class_name = extract_class_name(record)
if not classes or class_name in classes:
yield oid, class_name
def get_census(storage):
"""(storage:Storage) -> {class_name:str, instance_count:int}"""
result = {}
for oid, class_name in gen_oid_class(storage):
result[class_name] = result.get(class_name, 0) + 1
return result
def get_reference_index(storage):
"""(storage:Storage) -> {oid:str : [referring_oid:str]}
Return a full index giving the referring oids for each oid.
This might be large.
"""
result = {}
for oid, record in storage.gen_oid_record():
for ref in split_oids(unpack_record(record)[2]):
result.setdefault(ref, []).append(oid)
return result
class MemoryStorage (Storage):
"""
A concrete Storage that keeps everything in memory.
This may be useful for testing purposes.
"""
def __init__(self):
self.records = {}
self.transaction = None
self.oid = 0
def new_oid(self):
self.oid += 1
return p64(self.oid)
def load(self, oid):
return self.records[oid]
def begin(self):
self.transaction = {}
def store(self, oid, record):
self.transaction[oid] = record
def end(self, handle_invalidations=None):
self.records.update(self.transaction)
self.transaction = None
def sync(self):
return []
def gen_oid_record(self):
for oid, record in self.records.iteritems():
yield oid, record
optimize.bind_all(sys.modules[__name__]) # Last line of module.
| {
"repo_name": "Schevo/schevo",
"path": "schevo/store/storage.py",
"copies": "1",
"size": "4294",
"license": "mit",
"hash": -7207973311782903000,
"line_mean": 26.3503184713,
"line_max": 83,
"alpha_frac": 0.6059618072,
"autogenerated": false,
"ratio": 4.047125353440151,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5153087160640151,
"avg_score": null,
"num_lines": null
} |
"""$URL: svn+ssh://svn/repos/trunk/durus/persistent_dict.py $
$Id$
"""
import sys
from schevo.lib import optimize
from copy import copy
from schevo.store.persistent import PersistentData
class PersistentDict(PersistentData):
"""
Instance attributes:
data : dict
"""
data_is = dict
__slots__ = []
def __init__(self, *args, **kwargs):
self.data = dict(*args, **kwargs)
self._p_note_change()
def __cmp__(self, dict):
if isinstance(dict, PersistentDict):
return cmp(self.data, dict.data)
else:
return cmp(self.data, dict)
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, item):
self._p_note_change()
self.data[key] = item
def __delitem__(self, key):
self._p_note_change()
del self.data[key]
def clear(self):
self._p_note_change()
self.data.clear()
def copy(self):
if self.__class__ is PersistentDict:
return PersistentDict(self.data)
# Use the copy module to copy self without data, and then use the
# update method to fill the data in the new instance.
changed = self.get_p_changed()
data = self.data
try:
self.data = {} # This is why we saved _p_changed.
c = copy(self)
finally:
self.data = data
self._p_note_change(changed)
c.update(self)
return c
def keys(self):
return self.data.keys()
def items(self):
return self.data.items()
def iteritems(self):
return self.data.iteritems()
def iterkeys(self):
return self.data.iterkeys()
def itervalues(self):
return self.data.itervalues()
def values(self):
return self.data.values()
def has_key(self, key):
return self.data.has_key(key)
def update(self, other):
self._p_note_change()
if isinstance(other, PersistentDict):
self.data.update(other.data)
elif isinstance(other, dict):
self.data.update(other)
else:
for k, v in dict.items():
self[k] = v
def get(self, key, failobj=None):
return self.data.get(key, failobj)
def setdefault(self, key, failobj=None):
if key not in self.data:
self._p_note_change()
self.data[key] = failobj
return failobj
return self.data[key]
def pop(self, key, *args):
self._p_note_change()
return self.data.pop(key, *args)
def popitem(self):
self._p_note_change()
return self.data.popitem()
def __contains__(self, key):
return key in self.data
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
fromkeys = classmethod(fromkeys)
def __iter__(self):
return iter(self.data)
optimize.bind_all(sys.modules[__name__]) # Last line of module.
| {
"repo_name": "Schevo/schevo",
"path": "schevo/store/persistent_dict.py",
"copies": "1",
"size": "3106",
"license": "mit",
"hash": 3972912567789004300,
"line_mean": 23.0775193798,
"line_max": 73,
"alpha_frac": 0.5602060528,
"autogenerated": false,
"ratio": 3.742168674698795,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4802374727498795,
"avg_score": null,
"num_lines": null
} |
"""$URL: svn+ssh://svn/repos/trunk/durus/persistent_list.py $
$Id$
"""
import sys
from schevo.lib import optimize
from schevo.store.persistent import PersistentData
class PersistentList(PersistentData):
"""
Instance attributes:
data : list
"""
data_is = list
__slots__ = []
def __init__(self, *args, **kwargs):
self.data = list(*args, **kwargs)
self._p_note_change()
def __cast(self, other):
if isinstance(other, PersistentList): return other.data
else: return other
def __lt__(self, other):
return self.data < self.__cast(other)
def __le__(self, other):
return self.data <= self.__cast(other)
def __eq__(self, other):
return self.data == self.__cast(other)
def __ne__(self, other):
return self.data != self.__cast(other)
def __gt__(self, other):
return self.data > self.__cast(other)
def __ge__(self, other):
return self.data >= self.__cast(other)
def __cmp__(self, other):
return cmp(self.data, self.__cast(other))
def __contains__(self, item):
return item in self.data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]
def __setitem__(self, i, item):
self._p_note_change()
self.data[i] = item
def __delitem__(self, i):
self._p_note_change()
del self.data[i]
def __getslice__(self, i, j):
i = max(i, 0); j = max(j, 0)
return self.__class__(self.data[i:j])
def __setslice__(self, i, j, other):
self._p_note_change()
i = max(i, 0); j = max(j, 0)
if isinstance(other, PersistentList):
self.data[i:j] = other.data
elif isinstance(other, type(self.data)):
self.data[i:j] = other
else:
self.data[i:j] = list(other)
def __delslice__(self, i, j):
self._p_note_change()
i = max(i, 0); j = max(j, 0)
del self.data[i:j]
def __add__(self, other):
if isinstance(other, PersistentList):
return self.__class__(self.data + other.data)
elif isinstance(other, type(self.data)):
return self.__class__(self.data + other)
else:
return self.__class__(self.data + list(other))
def __radd__(self, other):
if isinstance(other, PersistentList):
return self.__class__(other.data + self.data)
elif isinstance(other, type(self.data)):
return self.__class__(other + self.data)
else:
return self.__class__(list(other) + self.data)
def __iadd__(self, other):
self._p_note_change()
if isinstance(other, PersistentList):
self.data += other.data
else:
self.data += list(other)
return self
def __mul__(self, n):
return self.__class__(self.data * n)
__rmul__ = __mul__
def __imul__(self, n):
self._p_note_change()
self.data *= n
return self
def append(self, item):
self._p_note_change()
self.data.append(item)
def insert(self, i, item):
self._p_note_change()
self.data.insert(i, item)
def pop(self, i=-1):
self._p_note_change()
return self.data.pop(i)
def remove(self, item):
self._p_note_change()
self.data.remove(item)
def count(self, item):
return self.data.count(item)
def index(self, item, *args):
return self.data.index(item, *args)
def reverse(self):
self._p_note_change()
self.data.reverse()
def sort(self, *args):
self._p_note_change()
self.data.sort(*args)
def extend(self, other):
self._p_note_change()
if isinstance(other, PersistentList):
self.data.extend(other.data)
else:
self.data.extend(other)
optimize.bind_all(sys.modules[__name__]) # Last line of module.
| {
"repo_name": "Schevo/schevo",
"path": "schevo/store/persistent_list.py",
"copies": "1",
"size": "4002",
"license": "mit",
"hash": 4569390063576464000,
"line_mean": 24.3291139241,
"line_max": 64,
"alpha_frac": 0.5404797601,
"autogenerated": false,
"ratio": 3.535335689045936,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45758154491459363,
"avg_score": null,
"num_lines": null
} |
#$ZEUUX: src/ss/lib/pinyin.py,v 1.2 2008/11/26 07:06:35 qingran Exp $
# -*- coding: utf-8 -*-
"""
the PinYinDict is generated from xyb's pinyin_zopeext.py, using:
>>> f = open('tt', 'w')
>>> for (k, v) in pinyin_zopeext.pinyinDict.items():
... f.write( u"'%s':%s, \n" %(k, str(tuple(v.split(' '))) ) )
...
>>> f.close()
Author: Junyong Pan from zopen.cn, panjy at zopen dot cn
"""
PinYinDict = {
'่':('yao',),
'ๆถ':('she',),
'่ฐ':('tan',),
'ไผ':('yi',),
'ไบ':('ji', 'qi'),
'ๆด':('dong',),
'ๅฃ':('guang',),
'็ข':('hu',),
'ๆซ':('que',),
'็ผช':('mou', 'miu', 'miao'),
'ๆฒณ':('he',),
'ไธด':('lin',),
'็ท':('guang',),
'ๅธ':('xi',),
'ๅจผ':('chang',),
'็บฟ':('xian',),
'ๅ':('fan',),
'็':('pai',),
'้':('chu',),
'ๅง':('gu',),
'็ก':('dong',),
'้ณ':('le',),
'ๅข':('lu',),
'็ฅ':('zhi',),
'้ค':('chu',),
'ๅฝช':('biao',),
'้น':('nao',),
'ๅปฟ':('nian',),
'่ฆ':('fu',),
'่พ':('gun',),
'ๅบ':('ying',),
'่ ':('mie',),
'่ธ':('chi',),
'ๅก':('lei',),
'็ค':('po',),
'ๅดฉ':('beng',),
'่ฌ':('fen',),
'ๅถ':('ye',),
'้ธ':('bu',),
'้ชผ':('ge',),
'่ต':('lai',),
'ๆน':('huang',),
'่ท':('li',),
'ๅค':('tun', 'dun'),
'ๅณจ':('e',),
'ๆฌ':('kuo',),
'ๆฃฐ':('chui',),
'่ฑณ':('bin',),
'ๆปด':('di',),
'้กป':('xu',),
'ๅญฝ':('nie',),
'้นฟ':('lu',),
'ๆฒ':('chen',),
'ไธ':('shang',),
'็':('qian', 'jian'),
'ๅ':('hou',),
'็ข':('bei',),
'้ค':('can',),
'ๆ':('bu',),
'่':('meng',),
'้ก':('fang',),
'็ ฆ':('zhai',),
'้ฒฉ':('huan',),
'ๆฏ':('ku',),
'ไผด':('ban',),
'็ท':('dian',),
'้ถ':('wei',),
'็ฆป':('li',),
'็พฟ':('yi',),
'ๆต':('zhen',),
'ๅฃ':('he',),
'็ฝ':('wang',),
'็ฅ':('guan',),
'่ฃฑ':('biao',),
'ๅท':('hao',),
'็ถ':('zao',),
'้น':('lao',),
'ๅงป':('yin',),
'ๅฟฟ':('fen',),
'่':('jie',),
'ๅ':('yan',),
'้':('qian',),
'้ช':('ke',),
'่':('ming',),
'่ค':('lv',),
'ๅก':('chang',),
'็ ':('yi',),
'้ฃ':('luo',),
'็จฝ':('ji', 'qi'),
'ๆ':('fu',),
'่ฑ':('chi',),
'ๅ
':('ke',),
'ๅ':('huai',),
'้น':('hu',),
'่':('pan', 'pang'),
'ๅ ':('ji',),
'ๅค':('pi', 'bi', 'bei'),
'ๅทจ':('ju',),
'้ปช':('can',),
'่ตณ':('jiu',),
'็ฏผ':('dou',),
'้ฝฟ':('chi',),
'่':('yao',),
'่':('tao',),
'ๆพ':('gan',),
'ๆ':('wa',),
'ๅฃ':('xiao',),
'็ฌฆ':('fu',),
'ๆซ':('pi',),
'่ฎ':('yu',),
'็ท':('zhu',),
'้บ':('zhong',),
'้ผพ':('han',),
'ๆ':('suo',),
'ๆ':('she',),
'็':('guan',),
'ๆ':('xia',),
'ไฝ':('ning',),
'ๅ
ข':('jing',),
'ๅญฆ':('xue',),
'็ฟฉ':('pian',),
'้นจ':('liu',),
'่ญ':('xiu', 'chou'),
'่ฏฑ':('you',),
'ไปณ':('pi',),
'่ทต':('jian',),
'็ฝพ':('zeng',),
'ๆ':('ze',),
'่':('lv',),
'ๆ ':('biao',),
'ไบ':('yu',),
'่ถ':('yue',),
'็':('lian',),
'้ฆ':('man',),
'ๅฒ':('cha',),
'่':('hao',),
'่ฐ':('mo',),
'ๅก':('kuang',),
'ๅผฉ':('nu',),
'็บจ':('wan',),
'ๆดต':('xun',),
'ๅถ':('si',),
'ๆ':('dong',),
'้
':('pei',),
'้ญ':('chi',),
'็ง':('miao',),
'่':('chang',),
'ๆฉ':('jue',),
'ๆฑ':('jiang',),
'่ฟ':('lian',),
'็ฃ':('xuan',),
'ๅปจ':('xie',),
'ๆฌ':('qie',),
'็ธ':('xiang',),
'้ฉป':('zhu',),
'ๅฅฝ':('hao',),
'้ฑฟ':('you',),
'ๆ
':('jue',),
'ๆบ':('gai',),
'ๅ':('ga',),
'็ช':('yao',),
'็ฒ':('po',),
'ๆ':('huo',),
'ๅฃ':('yuan',),
'้ฅ':('yao', 'yue'),
'็ฐช':('zan',),
'ๆฏ':('si',),
'ๆพณ':('ao',),
'่ ฒ':('juan',),
'ๅคผ':('kuang',),
'้ธพ':('luan',),
'่ง':('chan',),
'ไป':('zhang',),
'็
':('huang',),
'็ญ':('kuang',),
'ๆ':('shu',),
'ไน':('qi',),
'็ก':('jiao',),
'้ค':('pu',),
'็ปฉ':('ji',),
'่ญ':('yan',),
'ๆฎ':('pu',),
'ไฟณ':('pai',),
'้น':('bao',),
'ๅฏป':('xun',),
'ไพ':('chi',),
'่ฒ':('mo',),
'ๅ':('ce',),
'็':('beng',),
'็ผ':('huan',),
'ๆ':('rui',),
'ๆฆ':('bang',),
'่ด':('fu',),
'็ค':('ba',),
'ๆจฑ':('ying',),
'่ฎฐ':('ji',),
'ๆฐต':('shui',),
'่ถด':('pa',),
'ๅบพ':('yu',),
'่น':('cuo',),
'ๆณ':('bo', 'po'),
'้':('bian',),
'็':('ang',),
'ๅฝ':('dang',),
'ๅ ':('die',),
'็ฃ':('du',),
'้ข':('ying',),
'ๅค':('chi',),
'็ฅง':('tiao',),
'้ซฆ':('mao',),
'่ซ':('yin',),
'่ฝณ':('lu',),
'้
ท':('ku',),
'็ธ':('gan',),
'่':('xi',),
'ๆฆ':('ju',),
'็':('sou',),
'้':('wen',),
'็ฎ':('ji',),
'ๆ':('zhi',),
'ๅฃ':('dai',),
'็ข':('yuan',),
'ๅบง':('zuo',),
'็ฌช':('da',),
'ๆขณ':('shu',),
'้ถ':('si',),
'ๅธผ':('guo',),
'็ชฟ':('long',),
'่':('wei',),
'ๆก':('an',),
'้':('bi',),
'็':('ai',),
'้ฃ':('ju',),
'่':('pin',),
'็ฅ':('er',),
'็งฉ':('zhi',),
'ๅญช':('luan',),
'ๆฝฒ':('shao',),
'่ฏต':('song',),
'ๅณป':('jun',),
'ๆ':('wu',),
'่':('lang',),
'ๅฒ':('qi',),
'่ฐ':('di',),
'ๆบ':('liu',),
'ๅคฉ':('tian',),
'ๆญ':('zha',),
'่ฌ':('xian',),
'ๆดฑ':('er',),
'้ธ':('di',),
'ๅฆพ':('qie',),
'ไน':('tuo',),
'ๆท':('xiao',),
'ๅ':('fu',),
'็':('liao',),
'ๆ':('han',),
'ๆฑ':('xun',),
'่ง':('shang',),
'ๅค':('qin',),
'่ซ':('hu',),
'่ฏ':('liang',),
'ๆณฐ':('tai',),
'้ท':('xian',),
'ๅ
น':('ci', 'zi'),
'็ปผ':('zong',),
'้ฉฟ':('yi',),
'่':('jia', 'qie'),
'็ฒ':('ba',),
'็ช':('tiao',),
'ๆ':('qi',),
'้ก':('bei',),
'็ข':('ban',),
'็ฐฆ':('deng',),
'ๆซ':('zhuo',),
'ๅผผ':('bi',),
'่':('yong',),
'่ฟ':('guo',),
'็ญ':('da',),
'ๅข':('o',),
'ๅฑฆ':('ju',),
'้ฅจ':('zhun', 'tun'),
'ๅฉช':('lan',),
'่ญ':('zhi',),
'็ถ':('yao',),
'็บ':('xi',),
'ๅฏฟ':('shou',),
'่':('ma',),
'่ช':('teng',),
'ๅ':('tong',),
'็ค':('jiang',),
'้บ':('qi',),
'่':('wo',),
'ไธ':('si',),
'ๆพ':('lan',),
'ๅก':('bi',),
'็ ':('li',),
'้ฃ':('zhang',),
'ๆญ':('zhao',),
'่ฌ':('cao',),
'ไบฒ':('qin', 'qing'),
'ๆจต':('qiao',),
'่ฎด':('ou',),
'็น':('yue',),
'้':('xiang', 'jiang'),
'ๅ':('xie',),
'็ป':('rong',),
'่':('bi',),
'ไฟ':('ping',),
'ๅค':('gu',),
'ๅงจ':('yi',),
'ๆฌ':('jian',),
'่ฏ':('yao',),
'ๅนฝ':('you',),
'็ฟผ':('yi',),
'ๆ':('ji',),
'่':('mao',),
'ๆฎ':('xun',),
'ไบ':('xie',),
'ๆ':('yu',),
'ๅฃ':('lie',),
'็ผฆ':('man',),
'ๆฏ':('min',),
'ไธฐ':('feng',),
'่ดฒ':('fei', 'pan', 'bi', 'fen', 'ben', 'lu', 'ban'),
'ๅธ':('quan', 'xuan'),
'ๅฐผ':('ni',),
'ๅ':('suo',),
'้':('jia',),
'ๅฟ':('te',),
'้ซ':('sui',),
'่':('sou',),
'ๆท':('fei',),
'็ฅ':('zao',),
'ๅฝฆ':('yan',),
'ๆฎ':('ju',),
'็
บ':('tui',),
'็ญพ':('qian',),
'ๆฐ':('lu',),
'่พ':('liang',),
'ๅ':('he', 'huo'),
'ๅบ':('lu',),
'ๆ':('she', 'zhe'),
'็ชจ':('yin', 'xun'),
'ๆผฑ':('shu',),
'็น':('cha',),
'้ฒผ':('fen',),
'ๅฎพ':('bin',),
'ๆง':('shuo',),
'้':('nie',),
'ๅ':('pian',),
'้ฝ':('ji',),
'้ฅ':('tao',),
'ๆน':('zhan',),
'่ฏ':('dan',),
'ๅ ':('yin',),
'่ซ':('chong',),
'็ธ':('li',),
'้กฟ':('dun',),
'่':('shu',),
'้':('xin',),
'ๅ':('wen',),
'้ผ':('nai',),
'่':('fu',),
'ไพ':('gong',),
'ๅฃ':('yan',),
'็ข':('zhuo', 'zuo'),
'ๆซ':('feng',),
'ไผฐ':('gu',),
'ๆฎณ':('shu',),
'่ฐฒ':('jue',),
'้ถ':('jie',),
'ๆ':('sha',),
'ๅ':('xiang',),
'็':('jie',),
'้':('liang',),
'ๅป':('qin', 'jin'),
'็ฝ':('guan',),
'ๅฃ':('hao',),
'ๆ':('zhuo',),
'่ต':('ze',),
'้ ':('kao',),
'ๅข':('ne', 'ni'),
'้
ค':('gu',),
'ๆฎ':('ban',),
'ๆฑฒ':('ji',),
'ๅท':('ao',),
'ๅฟป':('xin',),
'ๅงฟ':('zi',),
'่':('di',),
'ๆด':('yin',),
'็':('kan',),
'้':('qiong',),
'ๅพ':('xu',),
'้ฒ':('jie',),
'่':('zhe',),
'ๆ':('jue',),
'็ ':('cheng',),
'ๅฅ':('kong',),
'ๅจฉ':('mian',),
'็ฎจ':('tuo',),
'ๆญ':('da',),
'่พฐ':('chen',),
'็น':('die',),
'ๅบ':('chi',),
'ๅชพ':('gou',),
'ๆ':('lei',),
'็':('xia',),
'้น':('chun',),
'ๅญ':('jue',),
'่':('pei',),
'ๅ ':('bu',),
'้ข':('zhu',),
'ๅค':('feng',),
'ๅฏจ':('zhai',),
'ๆฌ':('xun',),
'ไนฑ':('luan',),
'้ฅฟ':('e',),
'็ฎ':('gu',),
'ๅด':('qi',),
'้':('zhu',),
'้':('kuo',),
'ๅฒฃ':('gou',),
'็ฆ':('jiao',),
'ๆบฏ':('su',),
'่ดฎ':('zhu',),
'ๅฐธ':('shi',),
'็ป':('shi',),
'็ฟ':('min',),
'้พ':('huan',),
'ๆก':('jie',),
'ไฟ
':('qiu',),
'ๆน':('mei',),
'่':('wei',),
'ๆ':('bin',),
'้ป':('nian',),
'ๅ':('cou',),
'ๅ':('cheng',),
'็':('yi',),
'ๆท':('cong',),
'ไฝ':('yi',),
'ๅญข':('bao',),
'็ฟฅ':('zhu',),
'้นค':('he',),
'ๅช':('ping',),
'่ฏญ':('yu',),
'ๆตฎ':('fu',),
'ๅป':('pi',),
'ๅฟ':('you',),
'ๅผฅ':('mi',),
'็บค':('qian', 'xian'),
'็จ':('can',),
'ๆต':('duo',),
'่ด':('you',),
'้ฒธ':('jing',),
'ๅพ':('ku',),
'่ฅ':('qiang',),
'ๆฏ':('gu',),
'่':('jiao',),
'้ญ':('wang',),
'ๅ':('yi',),
'่ง':('jue',),
'ๆ':('sheng',),
'็ง':('mu',),
'ๅจ':('shao',),
'่กซ':('shan',),
'้ช':('wei',),
'่นฏ':('fan',),
'ๆฐ':('duo',),
'่ณ':('wei',),
'ๆด':('geng',),
'้ฉท':('si',),
'็ณธ':('mi',),
'็ผ':('lang',),
'ๆข':('liang',),
'ๆบ
':('jian',),
'็ช':('qiao',),
'ๅ':('hua',),
'ๆ ':('qi',),
'ๆธ':('zhu',),
'่':('zhi',),
'ๆ':('gao',),
'้ขก':('sang',),
'้ฉ':('gou',),
'ๆฆซ':('sun',),
'็ช':('bie',),
'่ฐฎ':('jian', 'zen'),
'้ธบ':('xiu',),
'็ฟ':('ju', 'qu'),
'้พ':('bei',),
'่ท':('yue',),
'ไป
':('jin',),
'่':('gu',),
'ๆ':('xiao',),
'็ญ':('quan',),
'่ต':('zhui',),
'่':('bi',),
'ๅฉข':('bi',),
'็ปฅ':('sui',),
'้จ':('pu',),
'ๅช':('zao',),
'ไฟฏ':('fu',),
'่ฑ':('guang',),
'ๅป':('ji',),
'็พ':('jin',),
'ไพ':('zhi',),
'ๆ':('zhi',),
'่':('si',),
'่':('wen',),
'ๅฎ':('wan',),
'็ผ':('bian', 'pian'),
'้ฒ':('hou',),
'็':('pi',),
'ๅ ก':('pu', 'bao', 'bu'),
'็พค':('qun',),
'ๅฉ':('fen',),
'ๆต':('ni',),
'็ฝ':('shuang',),
'ๆ':('chou',),
'ๅฅ':('fen',),
'้':('huang',),
'็':('fei',),
'้':('shan',),
'ๆ':('shu',),
'ๅฟค':('wu',),
'้ฆ':('li',),
'ๅจ':('hai',),
'้ช':('xue',),
'่ฝฏ':('ruan',),
'ๆด':('chai',),
'ๅฑน':('yi',),
'ๅฝ':('piao',),
'ๆฎ':('mo',),
'่จ':('yan',),
'ๆถ
':('nie',),
'่ฐ':('chan',),
'ๆ':('wang',),
'็':('ji',),
'ๅ':('ru',),
'่พ':('zhe',),
'ๆด':('jiang',),
'่':('xi',),
'ๅง':('bin',),
'ๆชซ':('cha',),
'่ฒ':('niao',),
'ๅธธ':('chang',),
'ๅผ':('zhi',),
'้พ':('mai',),
'้ซ':('kuan',),
'็ก':('luo',),
'้ณ':('guan',),
'ๅ':('fa',),
'็ฏก':('cuan',),
'ๅฅข':('she',),
'ๅฆ':('gua',),
'็ฉ':('ju',),
'้จ':('yun',),
'ๅช':('pa',),
'ๆฝฎ':('chao',),
'่ฑ':('ling',),
'ๅฟ':('ge',),
'็พ':('gao',),
'่':('pu',),
'็จ':('cheng',),
'็ฐ':('lu',),
'ๅ':('huo',),
'้':('xing',),
'ๅ':('ka',),
'ไผ':('huo',),
'ๆบ':('ke',),
'่':('chuan',),
'็ช ':('ke',),
'้ธฃ':('ming',),
'็ฒค':('yue',),
'ๅฉ':('xu', 'wei'),
'ๆคญ':('tuo',),
'ไพฎ':('wu',),
'่ฐ':('ji',),
'ๆต':('po',),
'่ด':('shuo',),
'ๅฎถ':('jia',),
'้บธ':('fu',),
'ไน':('nai',),
'่ต
':('gai',),
'ๆ':('kui',),
'่':('chan',),
'ๅฑ':('bing', 'ping'),
'็ฟ':('ling',),
'่ฏ':('gua',),
'ไป':('fu',),
'ๆ':('kang', 'gang'),
'่ท':('shan',),
'ๅซ ':('li',),
'ๅณค':('jiao', 'qiao'),
'้ช':('shan',),
'้กท':('qing',),
'ๅฝน':('yi',),
'็ปธ':('chou',),
'ๅ
ฝ':('shou',),
'ๆฒ
':('yuan',),
'่':('wu',),
'็ข':('ai',),
'ๅ':('dao',),
'้':('ju',),
'ๆฐ':('chuan',),
'่':('gan',),
'้ชก':('luo',),
'้ฒฅ':('shi',),
'็ช':('qi',),
'ๆถฏ':('ya',),
'่ธฎ':('dian',),
'ๆณ':('zhi',),
'็ป':('bo',),
'ๆ':('cha',),
'็':('zhang',),
'ๅ':('hui', 'yue'),
'็':('pan',),
'ๆ':('chang',),
'ๆนฎ':('yan', 'yin'),
'่ฑ':('ying',),
'็ฉถ':('jiu',),
'้ปน':('zhi',),
'ๅป':('le',),
'้ฝ':('dou', 'du'),
'็พ':('bai', 'bo'),
'่ข':('mei',),
'็ฌ':('sun',),
'ๅ':('o', 'wo'),
'ไธ':('bing',),
'่':('gen',),
'่':('cu',),
'ๅฐฅ':('liao',),
'ไบฎ':('liang',),
'่ฐ':('kan',),
'ไฝ':('dian',),
'ๆฃ':('ling',),
'ๆ':('chai',),
'็ป':('yi',),
'ๆฅ':('jian',),
'ไฟ':('fu',),
'่':('kuo',),
'็
ง':('zhao',),
'้ช':('jia', 'ke', 'ha', 'ge'),
'้ฅท':('xiang',),
'้ฟ':('zhang', 'chang'),
'่ฐ':('yu',),
'็':('yi',),
'็ผข':('yi',),
'ๆฒซ':('mo',),
'็ช':('zhu',),
'ไธฌ':('qiang', 'pan'),
'็บท':('fen',),
'ๆฑ':('ting',),
'ๆฉ':('gan',),
'่':('she',),
'้ณ':('sao',),
'ๅฟ':('ren',),
'ๅ':('ke',),
'้':('yun',),
'ๆ':('tuo',),
'ๅฝข':('xing',),
'็ฏฅ':('li',),
'ๅฆ':('la',),
'้จ':('ao',),
'่ฟญ':('die',),
'ๆฅฎ':('chu',),
'ๆฒ':('gao',),
'ๅปท':('ting',),
'ๅฟ':('wu',),
'่พ':('lu',),
'็ฐ':('gui',),
'ๅ':('fu',),
'้':('qiao',),
'่':('zhou',),
'ๆผญ':('mang',),
'่บฌ':('gong',),
'่ด':('wu',),
'้ผ':('sun',),
'่ต':('lin',),
'ๆฟ':('lian',),
'ๆ':('pei',),
'ๅฑ':('wu',),
'้':('bang', 'pang'),
'่ท':('zhi',),
'่ฏ':('cheng',),
'่ฑซ':('yu',),
'ๆด':('shuan',),
'ๅฅน':('ta',),
'็ผ':('pan',),
'ๆฒ':('qin',),
'ๆ':('xi',),
'้':('rui',),
'ไพ':('dong',),
'ๆฐ':('nai',),
'่':('hao',),
'้ฒก':('li',),
'ๅฆฃ':('bi',),
'้ชฅ':('ji',),
'ๅง':('shang',),
'็ฆ':('qi',),
'ๆถซ':('guan',),
'็ช':('zhao', 'zhua'),
'ๅคธ':('kua',),
'้พ':('yu',),
'ๆ
':('ci',),
'ๅ':('ya',),
'ๆ':('pu',),
'ๅนข':('zhuang', 'chuang'),
'็ซฅ':('tong',),
'ๅฆ':('you',),
'็ฉ':('hui',),
'ๅช':('jian',),
'็ฉบ':('kong',),
'็พ':('zai',),
'่':('qian',),
'ๅพ':('hou',),
'็ฌ':('hu',),
'็':('han',),
'ๆฆ':('ju',),
'ๆ':('guo',),
'ๅจฅ':('e',),
'ๅฉ':('qian',),
'็จ':('yun', 'yu'),
'ๆธญ':('wei',),
'่ฐ':('xun',),
'่ด':('wo',),
'็ น':('ai',),
'่ฑ':('huo',),
'ๆป':('pang',),
'ๅต':('mei',),
'ๆ':('fu',),
'็':('he',),
'ๆ':('lian',),
'ๆ':('sun',),
'่':('bao',),
'้ปข':('qu',),
'ๅฏค':('wu',),
'่ตซ':('he',),
'ๆฐ':('gan',),
'ๆด':('yuan',),
'็งธ':('jie',),
'ๆ':('diao',),
'็':('ming',),
'้':('chan',),
'้':('tu',),
'ๆ':('ta',),
'็ดข':('suo',),
'้ฆฅ':('fu',),
'ๅง':('lie',),
'็ฆ':('zi',),
'ๆขฏ':('ti',),
'ๅผ':('hou',),
'้พ':('zan',),
'ๆก':('guang',),
'ๆ':('hu',),
'ๅท':('wei',),
'ๆฏ':('bi',),
'่':('qia',),
'็ฟก':('fei',),
'ๅฆ':('tan',),
'็ฉ':('heng',),
'ไปซ':('mu',),
'ๆฒ':('qiao',),
'็ฅบ':('qi',),
'้ฝ':('min',),
'็พ':('huan',),
'่':('bo',),
'ๅฒ':('ji',),
'่ฐ':('chan',),
'็บ ':('jiu',),
'ๅคฅ':('huo',),
'ๆฑ':('zhu',),
'่ฐ':('you',),
'่ด':('yao',),
'้ชธ':('hai',),
'้ผ':('mu',),
'่':('cao',),
'ๆ':('ao',),
'็':('yang',),
'ๆฑ':('han',),
'่ง':('gu',),
'ๅฃค':('rang',),
'้ฆ':('wei',),
'ๆฐ':('yue',),
'่ณ':('yan',),
'ๆด':('zhui',),
'้ฟ':('bi',),
'ๆข
':('mei',),
'ๆ':('jue',),
'่':('ci', 'zi'),
'็':('ai',),
'ๅ':('zha',),
'็':('kang',),
'ๆธ':('shen',),
'ๅฎฃ':('xuan',),
'้ขฅ':('ru',),
'ๅง':('xuan',),
'็ฆ':('shou',),
'ๆณ':('lu',),
'้ธถ':('si',),
'็ป':('zhan',),
'ๅผ':('sha',),
'ไป':('ren',),
'่ฏ':('he',),
'็':('lai',),
'ไน':('guai',),
'็ปก':('xiao',),
'็ฉ':('shou',),
'่ฑ':('jia',),
'ๅทท':('xiang', 'hang'),
'้ซน':('xiu',),
'้ฝ':('te',),
'ๅฟ':('zao',),
'็พ':('wu',),
'่ฒ':('diao',),
'็ผ':('hui',),
'็':('guo',),
'้':('fan',),
'ๆ':('xie',),
'้ฌฃ':('lie',),
'ๅฉ':('li',),
'ๆฑ':('yu',),
'ๆต':('chu',),
'ๅบถ':('shu',),
'็จน':('zhen',),
'ๅพ':('he',),
'่น':('pian',),
'่ก
':('xin',),
'่':('wei',),
'ๆ':('jing',),
'ๅฅ':('zou',),
'ๅ':('zhuo',),
'็':('zan',),
'่':('bao',),
'ๅฟ ':('zhong',),
'้ณข':('li',),
'ๅจ':('dao', 'tao'),
'่ฝซ':('ren',),
'ๆทฌ':('cui',),
'ๆฐ':('nai',),
'ๅฝ':('ming',),
'็ผ':('gu',),
'้
ฟ':('niang',),
'้':('tui',),
'็
':('chou',),
'้':('jiu',),
'ๅผ':('bi',),
'่':('she',),
'่ธ':('chuo',),
'็':('men',),
'็':('sheng',),
'ๅบ':('fei',),
'ๆข':('jiao',),
'็ซ':('xuan',),
'ๆผช':('yi',),
'้ฎ':('jian',),
'ๅ ฐ':('yan',),
'็ฒณ':('jing',),
'้ฌฒ':('li', 'ge'),
'่ถ':('luo',),
'่ดพ':('jia', 'gu'),
'้':('ling',),
'ๆ':('shou',),
'่':('cheng',),
'ๆก':('tong',),
'ๆน':('jian',),
'ๅ
':('yan',),
'ๅ':('jian',),
'็ฏ':('gou',),
'ๆฅ':('cha', 'zha'),
'็ญฒ':('shao',),
'่ฏฝ':('fei',),
'ไปฟ':('fang',),
'ๅ':('kui',),
'็':('qiu',),
'้ฆ':('guan',),
'ๅฒ':('ya',),
'้พ':('chuo',),
'ๆ':('hun',),
'ๆฐ':('mang',),
'ไบ':('wu',),
'็ช':('cuan',),
'้ธ':('niao',),
'ๆ ':('you',),
'็ผต':('zuan',),
'้บด':('qu',),
'ๆน':('gai',),
'่ธ':('zheng',),
'ๅ':('e',),
'็':('ke',),
'็ง':('gan',),
'็ฟ':('yi',),
'ๆ':('ni',),
'ๆฏ':('du',),
'ๅซ':('zhang',),
'่งฆ':('chu',),
'่ฟช':('di',),
'็ฌ':('du',),
'็ปด':('wei',),
'่นฟ':('cuan',),
'ๅ':('dao',),
'้':('xiao',),
'ๅ ':('dui',),
'็ฒ':('fen',),
'่':('chi',),
'่ค':('he',),
'้':('gai',),
'้ช':('liu',),
'่ก':('gu',),
'ๆข':('ji',),
'ๆท':('zhi',),
'่ถ':('ye',),
'ๆฆป':('ta',),
'ๆ':('ba',),
'ไป':('shi',),
'้':('man',),
'ๅฑ':('shu', 'zhu'),
'่
':('cou',),
'่ค':('you', 'qiu'),
'ๅซ':('fu',),
'ๅฏ':('an',),
'ๅทณ':('si',),
'ๆถ':('wu', 'e'),
'ๆบ':('zhi',),
'ๆฑพ':('fen',),
'ๅ':('ji',),
'ๅ':('long',),
'้ข':('ling',),
'่':('pai',),
'ไพ':('mou',),
'็ฎ':('kong',),
'ๆถจ':('zhang',),
'ๅชฒ':('pi',),
'ๆจฝ':('zun',),
'่ฎผ':('song',),
'้':('dun',),
'ๅ':('ken',),
'็':('kuang',),
'้
':('ji',),
'็ป':('ban',),
'ๆ':('qing',),
'็':('jue',),
'็ฅ':('qu',),
'็ฝ':('gu',),
'้ณ':('lin',),
'ๆฃ':('san',),
'ๆญง':('qi',),
'็งฐ':('cheng', 'chen'),
'่ท':('he',),
'่ฅป':('pan',),
'่ฝฟ':('jiao',),
'่':('er',),
'ๆถ':('ti',),
'่ฐ':('e',),
'็':('chang',),
'ๅช':('yuan',),
'็':('shen',),
'่พฉ':('bian',),
'ๆดช':('hong',),
'ๅฌ':('zhuo',),
'็บณ':('na',),
'ๅฐด':('gan',),
'่ถ':('cha',),
'่บ':('lin',),
'ๆฒฟ':('yan',),
'ๅ':('san',),
'ๅ
':('xiu',),
'ๅฟ':('dao',),
'่':('xian',),
'ๆฉ':('tuo',),
'ๆฑ':('qi',),
'ๅ':('mai',),
'ๆก':('wo',),
'ๆฅ':('ri',),
'ๆถ':('chui',),
'่น':('xie',),
'็':('zhu',),
'ๅ':('duo',),
'ๆ':('jian',),
'ๆ':('bo',),
'้':('dou',),
'็ข':('chen',),
'ๆ ':('kou',),
'ๆค':('che',),
'้ฌ':('wu',),
'ๅฎ':('kua',),
'็ฌฑ':('gou',),
'้ชฐ':('tou',),
'ๅฆฒ':('da',),
'้ฒด':('gu',),
'ๆคฝ':('chuan',),
'่ขผ':('ge',),
'้
':('mei',),
'็ฏ':('zhuan',),
'ไน':('pang',),
'่ต':('dan', 'tan'),
'้':('ao',),
'ๆกง':('gui', 'hui'),
'่ฏฆ':('xiang',),
'ไปจ':('sa',),
'่ทช':('gui',),
'ๅ
ญ':('lu', 'liu'),
'็ฌ':('jin',),
'ๅญฑ':('can', 'chan'),
'้นณ':('guan',),
'ๆฃผ':('fen',),
'็':('li',),
'้':('chu',),
'ๅจ':('rao',),
'ๅฐ':('zun',),
'ๆ':('han',),
'ๆช':('lei',),
'ๅ':('rong',),
'้':('xi',),
'ๅฎ':('wan',),
'็':('ju',),
'้ข':('e',),
'่ก':('qian',),
'็ซ':('mei',),
'ๆฐช':('ke',),
'็ฏ':('feng',),
'้ฎ':('ruan',),
'ๅดด':('wei', 'wai'),
'ๆท':('chu',),
'่ถ':('bo',),
'ๆถฟ':('zhuo',),
'ๅป':('lian',),
'ๆ
':('huang',),
'่':('zang', 'cang'),
'่ฟ':('ya',),
'ๆต':('xun',),
'ๅ':('pou',),
'ๅน':('fu',),
'่ ':('ji', 'qi'),
'่ค':('rui',),
'ๆปฉ':('tan',),
'่ฝจ':('gui',),
'ๅซ':('jiao',),
'้ญ':('guo',),
'ๅฏ':('ng',),
'ๆถ':('fu',),
'่น':('yong',),
'ๆบ':('zhe',),
'ๆนพ':('wan',),
'้':('xie',),
'ๅ':('shan',),
'้ช':('luo',),
'ๅพ':('hen',),
'ๆผ':('li',),
'่ข':('tan',),
'ๆ ':('lve',),
'่ฃ':('qiang',),
'ๆค':('jin',),
'ๆ ฝ':('zai',),
'็':('lan',),
'ๅฅ':('ji', 'qi'),
'้น':('wu',),
'็ณ':('hu',),
'่':('ning',),
'ไฝ':('ti',),
'ๆฃ':('bang',),
'่ฑ':('shi',),
'ๅ':('shi',),
'็ญ':('shai',),
'ๅง':('jiang',),
'ไฟจ':('yan',),
'้ฏ':('zhuo', 'shu'),
'ๅด':('kong',),
'ๆถ':('su',),
'่ฐ':('xie',),
'ๅฒ':('dao',),
'็ค':('meng',),
'ไบง':('chan',),
'ๅฌ':('zuo', 'chuai'),
'็ฏ':('fan',),
'็ขณ':('tan',),
'ๅจด':('xian',),
'ๆฒป':('zhi',),
'่บ':('chong',),
'ๆ':('shang',),
'่':('su',),
'ๆฑ':('xi',),
'ๅ':('dan',),
'้':('gou',),
'ๅ':('bo',),
'้น':('wu',),
'่กจ':('biao',),
'้ญ':('jiu',),
'็
ฎ':('zhu',),
'ๅฃณ':('ke', 'qiao'),
'็ฝฒ':('shu',),
'ๆบ':('na',),
'่ฟฝ':('zhui',),
'ๅ':('ju',),
'ๆ':('chuai',),
'ๆ':('xi',),
'ๆ ':('shuan',),
'้':('tian',),
'ๆค':('hu',),
'ๆขจ':('li',),
'็ญ':('sui',),
'้ฒฐ':('zou',),
'้':('mei',),
'ๅ':('yan',),
'ๆฟ':('bin',),
'ๆฃ':('zi',),
'้ซ':('xin',),
'ๅญ':('ni',),
'ๅญต':('fu',),
'็':('cong',),
'้':('xiao',),
'ๅฐ':('jiang',),
'็ข':('diao',),
'้':('mang',),
'้บ':('she',),
'่ก':('bang',),
'่ฅ':('jie', 'gai'),
'ไพง':('ce',),
'ๆฐฆ':('hai',),
'็ซ':('yi',),
'ๆจช':('heng',),
'ๅฌ':('wu',),
'็ฏ':('huan',),
'้ฎ':('dai',),
'็ฆณ':('rang',),
'้ธฒ':('qu',),
'ๅฌด':('ying',),
'ๆท':('jia',),
'ไผผ':('si', 'shi'),
'ๆฎฟ':('dian',),
'ๅ':('pin',),
'็':('bi',),
'็ฅ':('qi',),
'ๆ':('wu',),
'่':('he',),
'ๅ':('li',),
'็':('lao', 'luo'),
'็ป':('jue',),
'ๅก':('sai', 'se'),
'ๆฅ':('yong',),
'่ค':('hun',),
'ๅซ':('nie',),
'ๅฏ':('ke',),
'ๅ':('ka',),
'ๅ':('e',),
'็ผ':('ti',),
'้ฒ':('ping',),
'ๅฆ':('ma',),
'้ช':('li',),
'ๅ':('hou',),
'็':('liu',),
'ๅธ':('di',),
'้ฌ':('huan',),
'ๆฆจ':('zha',),
'็ญ':('biao',),
'ๅ
':('yuan',),
'็':('yu',),
'่':('hu',),
'้':('cheng',),
'้ฃ':('fei',),
'้ซ':('deng',),
'็ฌ':('cuo',),
'ๅฑฑ':('shan',),
'ๅฉต':('chan',),
'้':('fa',),
'้':('pang',),
'่':('meng',),
'ๆฆ':('rong',),
'็':('fen',),
'ๅข':('xu',),
'็ฌ':('chi',),
'่ฎฉ':('rang',),
'ๅฌ':('ting',),
'็ฏ':('jiong',),
'็ชณ':('yu',),
'ๆท':('xie',),
'ๆบป':('ta',),
'็':('huang',),
'้ณ':('huang',),
'่':('dan',),
'ไฟ':('yong',),
'ๆก':('jie',),
'็':('gong',),
'ๅ
':('dang',),
'ๅซ':('hu',),
'้ญ':('bi',),
'่น':('zu',),
'ไปป':('ren',),
'่ทฝ':('ji',),
'็ฐ':('cu',),
'้พ':('tiao',),
'้ฆ':('sou',),
'ๆ':('yang',),
'่':('sha', 'suo'),
'ๅ':('shi', 'chi'),
'็ฒ':('tiao',),
'่ฃ':('yi',),
'่ฐง':('mi',),
'็ญ':('beng',),
'้ฌ':('huo',),
'ๅฎ':('si',),
'็ผฑ':('qian',),
'ๆน':('yi',),
'่ธ':('yun',),
'ๆดฝ':('qia',),
'ๅ':('ti',),
'็':('jia',),
'้ญ':('liang',),
'ๆ':('xuan',),
'็':('yuan',),
'ๅ':('kan',),
'ๆฃ':('kou',),
'่ฟฆ':('jia',),
'ๅญ':('zhuan',),
'ๅฝฑ':('ying',),
'็ปฐ':('chuo', 'chao'),
'้ฉณ':('bo',),
'็ฃด':('deng',),
'ๆณผ':('po',),
'่กฟ':('jin',),
'้':('xu',),
'็
':('gui',),
'้':('juan',),
'ๅธ':('fan',),
'้ฌ':('quan',),
'่':('su',),
'็':('han',),
'็จ':('ke',),
'่ฅ':('fei',),
'ๆ ช':('zhu',),
'ๅฌ':('hu',),
'ๅคด':('tou',),
'่จพ':('zi',),
'็':('zhuo', 'zhe', 'zhao'),
'ๅซ':('ji',),
'่':('xi',),
'ๆ':('di',),
'ไป':('lun',),
'่ฏ':('kuang',),
'ๆฅ':('xie',),
'็':('ju',),
'ๅ':('zuo',),
'่ ':('fu',),
'ไนฆ':('shu',),
'้ญ':('ming',),
'ๅฏ':('kai',),
'็ฎ':('jian',),
'ๆถ':('jing',),
'่น':('ping',),
'ๆบ':('kai',),
'็':('xian',),
'ๅฎ':('shou',),
'ๆ':('peng',),
'้ข':('jia',),
'ๆ':('yi',),
'่':('xiong',),
'้':('zhe',),
'ๅ':('ze',),
'ๅฐ':('chang',),
'่ดง':('huo',),
'่ธ':('you',),
'ๆฐฝ':('tun',),
'่ถผ':('jian',),
'ๅ':('qian',),
'ๅต':('ji',),
'็ป':('xi',),
'็ฃ':('lei',),
'ๆ':('lin',),
'่น':('nie',),
'ๅ':('bian',),
'็ฅ':('sui',),
'ๆฃ':('dao',),
'้
ฏ':('zhi',),
'็ฟฐ':('han',),
'ๆธ':('zha',),
'่ฝป':('qing',),
'ๆทผ':('miao',),
'่ฅฟ':('xi',),
'็ฆ':('jin',),
'ๅ':('jing',),
'ๅ':('qing',),
'ๆฎ':('piao',),
'่':('nai',),
'ๆ':('zhen',),
'็ผ':('fu',),
'ๆฌข':('huan',),
'ๆฆ':('kui',),
'ๆช':('wei',),
'็บฏ':('chun',),
'ๅด':('zui',),
'่คถ':('zhe',),
'ๆป':('xing',),
'็ก':('chu',),
'้ณ':('sai',),
'ๅฟ
':('bi',),
'ๅ':('su',),
'ๆ':('kong',),
'่':('bei',),
'ๆ':('ye',),
'ๅฅ':('jiang',),
'ๆฉ':('zao',),
'ๅฃซ':('shi',),
'็ตฎ':('xu',),
'ๅณ':('zha',),
'้ต':('yun',),
'่ทน':('xian',),
'ๆฝบ':('chan',),
'่ฝ':('shu',),
'ๅข':('chi',),
'ๅบ':('zhuang',),
'ๆ ':('dong',),
'ๆ':('cuo',),
'่':('ju',),
'็ฒ':('zhan',),
'้':('guang',),
'ๅ':('za',),
'้':('dui',),
'่ฐฃ':('yao',),
'ไผฅ':('chang',),
'็ฑ':('yan',),
'้ฐ':('tai',),
'็ต':('chi',),
'ๆดน':('huan',),
'ๆฝ':('wan',),
'้':('ge',),
'ไน':('fa',),
'่
':('fu',),
'ๆ':('cha',),
'็ฉ':('sui',),
'่ฏข':('xun',),
'ไปค':('ling',),
'ๆง':('zhi',),
'่ช':('hui',),
'ๅ
ฑ':('gong',),
'็ด':('zhi',),
'ๆณธ':('lu',),
'ๆผ':('pin',),
'่ฟ':('chai',),
'้':('xiu',),
'่':('qi',),
'ๅฎ':('zong',),
'็ ':('zhuan',),
'ๅ':('jun',),
'ๆฆ':('peng',),
'็พฏ':('jie',),
'็ณ':('gan',),
'ๆฎท':('yan', 'yin'),
'่ธบ':('jian',),
'ๅฃ':('bi',),
'็ฅ':('si',),
'็ฝ':('qing',),
'ๅ':('zai',),
'็':('fan',),
'่ง':('xi',),
'ๆญ':('ge',),
'่':('xian',),
'็ณ':('cao',),
'ๆปฅ':('lan',),
'่จ':('jue',),
'้ซญ':('zi',),
'ๅณ':('ai',),
'็ฒ':('sheng',),
'ๆกถ':('tong',),
'้ช':('ma',),
'ๅพ':('jing',),
'ๅ':('jie',),
'ๆค':('liang',),
'ๆผ':('lou',),
'่':('mang',),
'ๅ':('chu',),
'่ดฃ':('ze',),
'ไธฅ':('yan',),
'่ง':('jian',),
'ๅฒ':('jing', 'jin'),
'็ต':('zhai',),
'้ด':('bei',),
'่ถธ':('dun',),
'ไบบ':('ren',),
'ๆฝ':('hu',),
'่ผ':('chun',),
'้น
':('e',),
'ๅ':('sheng',),
'็':('yi',),
'ไฝ':('zhu',),
'ๆ':('ju',),
'้':('diao',),
'ๆตฃ':('huan',),
'ๆง':('peng',),
'้ฅซ':('yu',),
'้ณ':('biao',),
'่
ป':('ni',),
'ๅค':('zhi',),
'ๅ':('cong',),
'้':('yu',),
'ๆฆ':('xie',),
'ไผ':('ji',),
'่':('shi',),
'้พ':('long',),
'ๅ':('li',),
'่ฎก':('ji',),
'ๅฐ':('dao',),
'ๅด':('wu',),
'ไธธ':('wan',),
'ๆป':('chen',),
'่ดบ':('he',),
'่พ':('guo',),
'ๅท
':('dian',),
'้':('xun', 'huan'),
'ๆก':('zhuo',),
'ๆ':('shuai',),
'้น':('hu', 'gu'),
'ๅ':('wu',),
'ๆฏก':('zhan',),
'ๆฉ':('jiu',),
'ๅซซ':('mo',),
'็ฝฎ':('zhi',),
'็
ฒ':('bao',),
'้ต':('min',),
'่ฟน':('ji',),
'ๆพ':('song',),
'ๆฐ':('shi',),
'่พ':('zi',),
'ๆ':('yi',),
'็':('wei',),
'ๆช ':('qing',),
'่ธฃ':('bo',),
'ๆฒค':('ou',),
'่ง':('lou',),
'ๆจ':('ji',),
'ๅพฎ':('wei',),
'็ฑ':('nao',),
'ๅฒ':('chong',),
'็ต':('dian',),
'้ด':('gu',),
'่ผ':('jing',),
'้ญ':('kui',),
'ๅ':('jia',),
'็':('quan',),
'่':('ti', 'yi'),
'ๆ':('lan',),
'็ก':('qiao',),
'็':('niu',),
'็':('se',),
'ๆง':('nv',),
'็ฃฌ':('qing',),
'ๅฑ':('wei',),
'็ฐ':('zheng',),
'ๅต':('bo',),
'็ด':('ling',),
'่กท':('zhong',),
'็ช':('tu',),
'ๅ ':('tang',),
'ๅ':('fen',),
'้':('pei',),
'ๅ':('diao',),
'ไธ':('yu',),
'ๆ':('yi',),
'ๆ':('si',),
'ๅฆ':('jin',),
'้':('kuang',),
'ๅ':('kui',),
'ไพฃ':('lv',),
'่ฉ':('chi',),
'็ฎซ':('xiao',),
'ๆช':('tang',),
'็ณ':('tong',),
'ๆฆท':('que',),
'ไผธ':('shen',),
'ๅซ':('jia',),
'้':('cai',),
'่ฏ':('di',),
'่ท':('jia',),
'ๆ':('juan',),
'่':('xiang',),
'็ป':('gei', 'ji'),
'ๆณฅ':('ni',),
'่ตค':('chi',),
'ๆฉ':('nang',),
'ๅทฏ':('qiu',),
'้ต':('an',),
'่ฝ':('neng',),
'ๆพ':('liang',),
'ๅฎ':('mian',),
'็ผ':('xiang',),
'้ข':('song',),
'้':('zhao',),
'ๅธ':('zhi',),
'้':('kun',),
'ๆถค':('di',),
'ๆจ':('cheng',),
'็ ญ':('bian',),
'็ฑ':('ai',),
'ๅฒ':('ao',),
'็ต':('pi',),
'้':('jing', 'xing'),
'่ก':('yan',),
'็ฅ':('zhi',),
'ๅฟ':('wang',),
'่ฃข':('lian',),
'ๆ
ง':('hui',),
'่ฆ':('ku',),
'้ณ':('jin',),
'ๅต':('he', 'ke'),
'็ด':('chi',),
'่ป':('di',),
'ๅผ':('yi',),
'็ฎ
':('bi',),
'ๅ':('suo',),
'ๆพ':('shu',),
'่ธ':('chou',),
'ๆ':('ban',),
'่':('tian',),
'ๅบ':('dian',),
'้ฆ':('xiang',),
'ๅ':('ning',),
'ๅ':('gong',),
'่ฎฅ':('ji',),
'ๆช':('nuo',),
'้คฎ':('tie',),
'็ณ':('bing',),
'้ฒ':('lou', 'lu'),
'ๆบท':('hun',),
'่ดถ':('kuang',),
'ๆฟ':('min',),
'่พ':('pi',),
'็ฑ':('zhou',),
'ๅฏ
':('yin',),
'้':('gu',),
'ๅ':('liang',),
'ๆ':('reng',),
'ๅญ':('fu',),
'ๆทก':('dan',),
'้ฑ':('wei',),
'ๅณ':('su',),
'ไปท':('jia',),
'ๅ':('li',),
'ไบ':('er',),
'ๅค':('su',),
'็ข':('dian',),
'่ง':('qu',),
'ๆจ':('nin',),
'็ผญ':('liao',),
'ๅฆฎ':('ni',),
'็ฑ':('you',),
'้ฐ':('yu',),
'ๅฒ':('kai',),
'ๆคน':('zhen', 'shen'),
'ๆฝ':('xiu',),
'้ญ
':('mei',),
'้
':('you',),
'็':('shan',),
'ๆง':('cha',),
'ๆ':('qi',),
'็':('ying',),
'็':('mou', 'mu'),
'ๆกฃ':('dang',),
'่ฟข':('tiao',),
'่ช':('dang',),
'ๅฝญ':('peng',),
'้ฉฏ':('xun',),
'ๅต':('luan',),
'็ด':('bi',),
'ๆผ':('man',),
'่ฟ':('nai',),
'ๅธ':('shi',),
'ๅ':('yao',),
'็':('lu',),
'ๅ':('kan',),
'ๆ':('cheng',),
'ๆ':('tou',),
'่':('ying',),
'ๅพ':('de', 'dei'),
'็ฐ':('duan',),
'็จ':('zhi',),
'ๅ':('yuan',),
'ๆฆ':('nuo',),
'่ฉ':('jian',),
'ๆช':('jie',),
'ๅฐ':('shua',),
'็ณ':('man',),
'้ฒ':('fang',),
'่ฐถ':('chen',),
'ๅณ':('mao',),
'่ท':('ba',),
'ไป':('reng',),
'่ฏ':('zhao',),
'็ฃ':('gun',),
'ๅฉ':('hun',),
'ๆณก':('pao',),
'่ต ':('zeng',),
'ๆฉ':('cheng',),
'ๅทซ':('wu',),
'็กฎ':('que',),
'้ฑ':('yi',),
'ๅณ':('deng',),
'ๆฑถ':('wen',),
'่ฃน':('guo',),
'้บ':('ji',),
'ๅฎ':('gui',),
'็':('miao',),
'้':('ga',),
'ๅ':('gang',),
'ๆด':('yang',),
'้ผ':('tao',),
'ๅ ':('yin',),
'้':('ben',),
'ๆถ ':('wei',),
'ๆจ':('han',),
'็ต':('jue',),
'ๆ น':('gen',),
'็ป':('fu',),
'ๅ':('jun',),
'่':('feng',),
'็ฝ':('luo',),
'้ณ':('bie',),
'ๅง':('pin',),
'็':('zhen',),
'ๆฅฃ':('mei',),
'่ฆ':('mo',),
'ๅฑ':('gua',),
'็ฐ':('tan',),
'่ฝท':('hu',),
'่ป':('hong',),
'็พ':('ji',),
'ๅด':('lao',),
'็ฆ
':('shan', 'chan'),
'ๅ':('yuan',),
'็':('yu',),
'่ฐ':('chen',),
'่':('zi',),
'ๅฒ':('gang',),
'็ผ':('bian',),
'็':('she',),
'ๆฆ':('meng',),
'่ฉ':('fu',),
'็บซ':('ren',),
'ๅฐฌ':('ga',),
'้ฒ':('qie',),
'ๅ':('cha',),
'็':('ban',),
'่':('ling',),
'ๅฝ':('tuan',),
'็ฏ':('gao',),
'ๅฅ':('xi',),
'ๅ':('bian',),
'ๆฟก':('ru',),
'่กค':('yi',),
'ๆฉ':('kai',),
'่จ':('meng',),
'็ญฎ':('shi',),
'็ฒ':('she',),
'่ฏน':('zou',),
'ๅบ':('pi',),
'็จ':('fu',),
'ๅ':('pi',),
'ๆ ':('lan',),
'็ช':('jiong',),
'็':('huang',),
'้':('su',),
'่ง':('xiao',),
'ๆจ':('peng',),
'้ฐ':('xi',),
'้ด':('bing',),
'ๆฌน':('yi', 'qi'),
'ไน':('zhi',),
'่ต':('ji',),
'ๆ':('liu',),
'่
':('wan',),
'ๅซ':('lei',),
'ๅ':('ji',),
'ๆง':('cui',),
'้นซ':('jiu',),
'่ช':('zi',),
'ๅ
ต':('bing',),
'่ป':('meng',),
'่ฟ':('yu',),
'ๆ':('shu',),
'่':('luan',),
'ๅ':('duo',),
'็ ':('yan',),
'้':('dun',),
'ๆฐข':('qing',),
'่ฉ':('qin',),
'ๆช':('guai',),
'ๅฐ':('de', 'di'),
'็ณ':('dai',),
'่พ':('xi',),
'ๅฃ
':('yong',),
'ๅ':('mian',),
'่ง':('gou',),
'ๆ':('cai',),
'็ซ':('zhan',),
'้':('jing',),
'ๅ':('ji',),
'ๆปก':('man',),
'ๆฉ':('ze', 'zhai'),
'่จ':('qian', 'xun'),
'ๅฟซ':('kuai',),
'ๅณ':('you',),
'ๆพ':('zhao',),
'ๅพ':('wang',),
'็ฌ':('du',),
'้ฒ':('fang',),
'ๅฆ':('wang',),
'้':('long',),
'ๅ':('lin',),
'ๆจ':('tui',),
'็ต':('fen',),
'้ด':('li',),
'่ฎธ':('xu',),
'ๆฝ':('cha',),
'้น':('bo',),
'็':('pen',),
'็':('yang',),
'่':('lv',),
'ๆ':('qin',),
'็':('jing',),
'้ฃ':('biao',),
'ๅ':('shi',),
'ๆญฃ':('zheng',),
'่ช':('lu',),
'้ฅฏ':('jian',),
'ๅฑ':('xue',),
'่ตท':('qi',),
'่ป':('nan',),
'่
ฟ':('tui',),
'ๆ':('gua',),
'้':('la',),
'ๅ':('she',),
'้':('sui',),
'ๅพ':('lai',),
'ๆ':('kang',),
'่':('nai',),
'ๆบ':('ming',),
'้ ':('zao',),
'ๅผช':('jing',),
'ๆฌ':('jian',),
'ๆธด':('ke',),
'็จผ':('jia',),
'็':('zhen',),
'ๅ':('la',),
'้':('na',),
'็ง':('ke',),
'็ฟ':('xi',),
'ๅน':('man',),
'่':('miao',),
'ๆต':('zhuo',),
'ๅก':('fan',),
'้ฃ':('xi', 'xian'),
'ๅทฉ':('gong',),
'ๆซ':('bei',),
'็ฝช':('zui',),
'่กฐ':('shuai',),
'ๆณณ':('yong',),
'่นด':('cu',),
'็ท':('juan',),
'ๅ
ถ':('qi',),
'้ฉผ':('tuo',),
'็ปฟ':('lu', 'lv'),
'ๆ':('xuan',),
'ๆฒ':('shen',),
'ๅ':('jia', 'gu'),
'่ฐณ':('yan',),
'้ธฟ':('hong',),
'่':('zhang',),
'่':('gu',),
'ๆฉ':('qiao',),
'ไฟ':('jun',),
'ๅฏ':('han',),
'้ป':('qian',),
'่':('hui',),
'ๆ':('bo',),
'ไน':('ye',),
'้
ก':('tuo',),
'้ฅ':('ye',),
'ๅฉง':('jing',),
'็ณจ':('jiang',),
'ๆญ':('bai',),
'ๆฅฑ':('cou',),
'่ฏฒ':('hui',),
'็น':('wan',),
'้':('cu',),
'ๆธ':('yuan',),
'ๅบ':('di',),
'ๆฆ':('zhen',),
'็ฃ':('you',),
'็ฎง':('qie',),
'่ญ':('ba',),
'่ขฑ':('fu',),
'ๆคฐ':('ye',),
'้น':('bo',),
'็ธ':('dian',),
'ๅฆป':('qi',),
'็ฌผ':('long',),
'ๆณ':('quan',),
'่น':('xi', 'qi'),
'็':('pao',),
'็ป':('rao',),
'ๆ':('wan',),
'่ฟ':('chi',),
'ๆฑ':('gong',),
'็ข':('lao',),
'่ฌ':('mai',),
'ๆฏ':('ke',),
'ๆทณ':('chun',),
'่ฝด':('zhou',),
'ๅถ':('ou',),
'็งป':('yi',),
'่':('lao',),
'่
':('heng',),
'ๆ':('gou',),
'ๆถ':('xiao',),
'ๅ':('dan',),
'็พ':('gao',),
'่':('xiao',),
'ๅ ':('guan',),
'็ฅ':('chai', 'cuo'),
'ๅค':('die',),
'้ขฆ':('pin',),
'ๆฎ':('zuo', 'cuo'),
'่ดณ':('shi',),
'ๅน':('chui',),
'็ฒพ':('jing',),
'่':('tun',),
'ๆญ':('xie',),
'ไป':('jin',),
'ๅ':('ai',),
'ๅซ':('ai',),
'่':('xiang',),
'ไฝ':('tong',),
'็ ':('yu',),
'ๆญ':('niu',),
'ๅฏผ':('dao',),
'ไพ':('kua',),
'ๅ':('zai',),
'้':('chuan',),
'้บ':('lu',),
'ๆฒ':('gou',),
'ไผ':('san',),
'็ขง':('bi',),
'้ธจ':('bao',),
'ไบณ':('bo',),
'่ถต':('bo', 'bao'),
'ๆฐด':('shui',),
'็ธ':('que',),
'็ ผ':('tong',),
'ๅฒฟ':('kui',),
'ๆ
':('lv',),
'ๆฟ':('sui',),
'ไน':('me',),
'่ต':('she',),
'ๅ':('ruo',),
'็ฏ':('kui',),
'่':('zhen',),
'ๆฝ':('lu',),
'ๅฅ':('hao',),
'้ฃง':('sun',),
'็ท':('wan',),
'้กผ':('xu',),
'่':('zhuo',),
'ๆ':('ji',),
'ๆ':('ying',),
'ๅ':('li',),
'่พ':('ci',),
'็ก':('luo',),
'ๅ ':('jia',),
'้ข':('hai',),
'็ฅ':('sheng',),
'ๅค':('zha',),
'็ผฉ':('su', 'suo'),
'่ซ':('fang',),
'ๅน':('kuang',),
'ๆ':('huang',),
'่':('xu',),
'ๅ':('cheng',),
'ๅง':('si',),
'้ณ':('biao',),
'่':('bian',),
'็ ':('hen',),
'้ฅ':('lu',),
'็ค':('rang',),
'็ปจ':('ti',),
'่ฎ':('shan',),
'็น':('du',),
'้บ':('gui',),
'่':('pang',),
'ๆ':('song',),
'ไบ':('zheng',),
'ๅ':('bang',),
'็ ':('pi',),
'่':('zhi',),
'ๆฎ':('ji',),
'ๆถ':('lian',),
'ไธ':('cheng',),
'้ค':('chui',),
'็ฆง':('xi',),
'่ญ':('na',),
'ๅท':('leng',),
'้น':('cui', 'zhui'),
'็ธ':('mou',),
'็คผ':('li',),
'่':('nie',),
'ๆ
':('shan',),
'ๅญ':('zi',),
'็ณ':('gao',),
'่ฏ':('zhu',),
'่ท':('gen',),
'ๅฅ':('ge',),
'ๆฏ':('la',),
'่ตด':('fu',),
'้ฅผ':('bing',),
'่':('yu',),
'ๆ':('xian',),
'่ธ':('liang',),
'ๆพ':('che',),
'้':('xiao',),
'ๅ':('xi',),
'็ฎ':('qing',),
'ๅผ':('gong',),
'ๆ':('tan',),
'ๆ ':('gua',),
'่บ':('xie',),
'้ข':('xing',),
'็ฅ':('hu',),
'ๅพจ':('huang',),
'่ซ':('mang',),
'ๆฎ':('mu',),
'็ถ':('zhuang',),
'ๅน':('liao',),
'้ฌป':('yu',),
'็บ':('wen',),
'็บพ':('shu',),
'่':('wan', 'yu'),
'ๆ':('quan',),
'่ฟ':('mai',),
'้':('li',),
'็':('wei',),
'ๆ':('zhe',),
'็ ':('zhu',),
'็ค':('cuo',),
'้นฉ':('liao',),
'ๆญ':('gong',),
'่ฎ':('man',),
'ๆฉฑ':('chu',),
'ๅธ':('yi',),
'้บ':('pu',),
'้ปพ':('min',),
'่':('su',),
'่บ':('chu',),
'้':('po',),
'้ข':('tui',),
'็ผ':('zhui',),
'ๅฎ':('dang',),
'ๆ':('an',),
'่':('yun',),
'ๆฒ':('pei',),
'็บง':('ji',),
'ๅดฆ':('yan',),
'่ญ':('chi',),
'่ถฑ':('zan',),
'ๆฐฐ':('qing',),
'่ฎต':('ju',),
'ๅท':('rang',),
'ๆ':('pang',),
'่ต':('jin',),
'็':('jue',),
'ๅ':('cheng', 'ceng'),
'้
':('zhou',),
'ๅฑ':('ji',),
'ๆ':('ao',),
'ไฟ':('bao',),
'ๆฅ':('leng',),
'ๅก':('weng',),
'็ข':('sui',),
'ๅฅ':('ju',),
'้ปง':('li',),
'่ฌ':('qiu',),
'ๆฏ':('zheng',),
'ๅถ':('ding',),
'็ณป':('xi', 'ji'),
'ๅบ':('jin',),
'็ซฟ':('gan',),
'่
':('mao',),
'ๆ':('chao',),
'ไธ':('mo', 'wan'),
'้':('huo',),
'็ฅ':('wei',),
'ๅขจ':('mo',),
'่ฏ':('zhong',),
'็บ':('xi',),
'็ฆพ':('he',),
'ๆ':('shi',),
'่':('bian',),
'้':('ci',),
'้ณ':('yao',),
'ๅฟ':('te',),
'่
':('zong',),
'ๆ':('wang',),
'้ก':('xin', 'chan', 'tan'),
'็ ':('hu',),
'้ฅฉ':('xi',),
'็ฃจ':('mo',),
'ๆญ':('hang',),
'้ถ':('kang',),
'ๆ':('ang',),
'่พ':('nian',),
'ๆฐ':('pu',),
'ๆจ':('fan',),
'้':('liu',),
'ๅฒ':('cen',),
'้ฆ':('san',),
'ๆ':('lin',),
'่':('wan',),
'ๆถ':('tao',),
'็พง':('suo',),
'ๆฌ':('ben',),
'้น':('zou',),
'้บฝ':('mo',),
'ๅฎฟ':('su', 'xiu'),
'ๆ
':('dan',),
'่ฑ':('dou',),
'ๆฃ':('mian',),
'็':('he',),
'ๅ':('ben',),
'ๅญ':('kong',),
'่ท':('bo',),
'ไป':('tong',),
'่ฏ':('gou',),
'่ตฐ':('zou',),
'ๅถ':('nao',),
'้
ธ':('suan',),
'ๅกพ':('shu',),
'่':('ji',),
'่
':('jiu',),
'ๆฆ':('lv',),
'้':('hun',),
'็':('ke',),
'ๅ':('pao',),
'็ฎ':('bo',),
'ๆ':('hu',),
'ๆธ':('yu',),
'้ฒฆ':('tiao',),
'็ ฉ':('fei', 'fu'),
'ๅฆจ':('fang',),
'่ซ':('nian',),
'่ฏ':('fu',),
'ๆบฒ':('sou',),
'ๅธฝ':('mao',),
'็ขพ':('nian',),
'ๆฝ':('xiao',),
'่ง':('lan',),
'็':('tian',),
'็ฅ':('fu',),
'ๅป':('ao',),
'้ถ':('yin',),
'ๅธ':('tu',),
'ๆผ':('qi',),
'้':('sui',),
'็':('lie',),
'้ช':('zhui',),
'่ฐ':('mi',),
'ๅข':('suo',),
'ๅผฆ':('xian',),
'ๅคช':('tai',),
'ๆฌ':('ban',),
'่พฑ':('ru',),
'็ธ':('ba',),
'ๆ':('tuo',),
'็':('yi',),
'็ง':('bi',),
'ๅก':('ta',),
'ๆ':('zhang',),
'่ฃ':('sha',),
'่ฌ':('zang',),
'ๆฏ':('guan',),
'่นฐ':('chu',),
'็ท':('ci',),
'ๅถ':('mu',),
'็ปป':('quan',),
'้ฑผ':('yu',),
'ๆ':('qiao',),
'้':('liu',),
'ๅ':('shu',),
'ๅจ':('wei',),
'ๆฎ':('qian',),
'ๆถฒ':('ye',),
'ๅน':('pi',),
'ๅดฝ':('zai',),
'่':('zhu',),
'ๆ':('jiong',),
'่':('zhou',),
'ๆฑ':('hui',),
'้':('ni',),
'็':('hao',),
'ๆ':('ju',),
'่ต':('yan',),
'้ก':('mi',),
'้
ฅ':('su',),
'็ค':('kao',),
'ไปฐ':('yang',),
'้ถ':('shao',),
'ๆ':('lou',),
'้':('xie',),
'ๆ ':('long',),
'ๅบ':('wu',),
'ๆ':('dou',),
'ๅธฆ':('dai',),
'ๅ ช':('kan',),
'ๆคด':('duan',),
'ๅท':('pen',),
'็ธ':('mei',),
'้ฒฝ':('die',),
'ๆ
':('qing',),
'็ป':('bang',),
'ๅฝ':('ji',),
'็ฃ':('ke',),
'ๅฅ':('ben',),
'ๆ':('hui',),
'่ฟ':('jin',),
'ๆ':('hui',),
'ๅก':('nan',),
'ๅซฉ':('nen',),
'็กช':('wo',),
'่ฝฐ':('hong',),
'ๆฏณ':('cui',),
'ๅถ':('ga',),
'็ฟป':('fan',),
'่
':('zhe',),
'่':('du',),
'ๅ ':('yin',),
'้ข':('gang',),
'ๅค':('yuan',),
'้บฆ':('mai',),
'่ฏ':('pu', 'fu'),
'ๆฒฒ':('duo',),
'้ท':('e',),
'ๅน':('sha', 'cha'),
'้ผป':('bi',),
'ๅฐฝ':('jin',),
'่':('shuai',),
'ๆ':('jun',),
'ๆต':('jiao',),
'ไป':('pu',),
'่ฏ':('zha',),
'ๅณ':('dong', 'tong'),
'ไฝ':('fo', 'fu'),
'้ก':('dou',),
'ๅฃ':('you',),
'้ฅ':('yao',),
'ๆญ':('zhi',),
'้ถ':('ling',),
'็น':('te',),
'ๅธ':('si',),
'็ฑฝ':('zi',),
'ๆ':('qian',),
'ๆ':('ruan',),
'้':('diao',),
'ๆช':('bo',),
'่ฐ':('zi',),
'ๆ':('quan',),
'ไผ':('kuai', 'hui'),
'่':('shun',),
'ๅช':('ge',),
'่ฎญ':('xun',),
'่ต':('piao',),
'ๆด':('mao',),
'็ฐธ':('bo',),
'ๅป':('sha',),
'ๆง':('gao',),
'่
':('tian',),
'ๅ':('deng',),
'่':('jia',),
'่':('jiao',),
'ๆ':('chang',),
'็ญข':('pa',),
'็
ฆ':('xu',),
'ๅฅถ':('nai',),
'ๅพ':('jiu',),
'ๅ ':('beng', 'peng'),
'ๅธ':('wei',),
'ๅ':('he', 'xia'),
'ๆด':('zhu',),
'่พ':('lin',),
'ๆ':('zhua', 'wo'),
'็ผฅ':('piao',),
'็ฉ':('shuai',),
'ๅจ':('zi',),
'่ธฏ':('zhi',),
'ๆถฎ':('shuan',),
'้ป':('luo',),
'้ฟ':('e', 'a'),
'็พ':('ji',),
'ๆ':('shan',),
'้ซ':('bin',),
'้':('kuai',),
'่ต':('fu',),
'ๆ':('bai',),
'ๅฑฃ':('xi',),
'ๅง':('ju',),
'้ฉ':('cuan',),
'่ฒ':('fei',),
'ๆต':('chu',),
'้พ':('lv',),
'ๆ':('chuang',),
'้ฆ':('chan',),
'ๆ':('hao',),
'ไธ':('ye',),
'่':('qian', 'xi'),
'ๅฆ':('juan',),
'้จ':('xian',),
'ไพฏ':('hou',),
'่ฑ':('gong',),
'่ต':('he', 'ke'),
'ๆด':('pu', 'po', 'piao'),
'ๅป':('dong',),
'่':('ling',),
'่ฏ':('shi',),
'ๆ':('luo',),
'ๅฉ':('li',),
'่ญฌ':('pi',),
'ๆฟฏ':('zhuo',),
'่
ฐ':('yao',),
'่ด':('hu',),
'็ฏท':('peng',),
'ๅนบ':('yao',),
'็ฟ':('kuang',),
'ๆฆ':('pin',),
'่ธ
':('xue',),
'ๆพ':('cheng', 'deng'),
'ๅผ':('shi',),
'้':('qiu',),
'้':('que',),
'่ข':('xiu',),
'ๆฐ':('xian',),
'้ชข':('cong',),
'้ฆ':('bang',),
'่คซ':('chi',),
'ๆฒฎ':('ju',),
'ๅฐน':('yin',),
'็บบ':('fang',),
'ๅฝ':('gui',),
'ๆต':('jia',),
'่ฟ':('qi',),
'ๅ':('le', 'lei'),
'ๆท':('tao',),
'ๅง':('wo',),
'็จ':('lao',),
'ๅผ':('diao',),
'้พ':('lian',),
'่':('zhao',),
'้ช':('cheng',),
'ๅฆ':('yan',),
'็ผ':('duan',),
'ๆข':('geng',),
'ๆ':('pao',),
'่':('si',),
'็บฃ':('zhou',),
'ๅฆ':('gui',),
'ๅช':('feng',),
'่ฑ':('lai',),
'ๆด':('qian',),
'ๅฟ':('yi',),
'ๆฏ':('hui',),
'ๆท
':('xi',),
'่':('jing',),
'่':('rui',),
'็ง':('zhong', 'chong'),
'ๅก':('ta',),
'็':('long',),
'ๅ':('gua',),
'้
':('jiu',),
'็':('hen',),
'ๅ':('dai',),
'่':('ke',),
'ๆต':('xun', 'jun'),
'่':('ji',),
'ๆ':('qi',),
'ๅฏก':('gua',),
'็ฅข':('ni', 'mi'),
'ๅทฅ':('gong',),
'้ง':('yun',),
'็ช':('pan', 'fan'),
'่กฌ':('chen',),
'ๆณฏ':('min',),
'่ฐ':('jian',),
'ๆณ':('ye',),
'็ฃท':('lin',),
'็ป':('suan',),
'็ฟ':('bu',),
'ๆช':('tan',),
'ไธ':('qi',),
'่':('fu',),
'้ค':('yan',),
'็ข':('lu', 'liu'),
'ๅฐ':('xiao',),
'้':('a',),
'ไบ':('gen',),
'็ฉ':('xuan',),
'่ฐฏ':('qiao',),
'ๅผน':('dan', 'tan'),
'ๆน':('pai',),
'ๆ':('xi',),
'็ซ ':('zhang',),
'้ฉ':('ge',),
'่ฟฎ':('ze',),
'ๆฑ':('ai',),
'็ฝน':('li',),
'็
ฝ':('shan',),
'่พ':('jiao',),
'ๆ':('zhuang', 'gang'),
'ๅ':('gu',),
'ๆ':('zhen',),
'่ขญ':('xi',),
'่ฑ':('hua',),
'ๆด':('po',),
'็ผธ':('gang',),
'ๅป':('qu',),
'ๆณ
':('qiu',),
'้น':('bei',),
'้':('qiu',),
'ไป':('xian',),
'ๅซก':('di',),
'ๅณฅ':('zheng',),
'็ฆ':('mao',),
'ๆฏซ':('hao',),
'ๆณ':('liu',),
'่ด':('yun',),
'ๅฉถ':('shen',),
'ๅฑบ':('qi',),
'็ฟ':('wei',),
'่ฐ
':('liang',),
'็':('ding',),
'ๆ':('si',),
'ๅฎ ':('chong',),
'้ขข':('hao',),
'็ฉ':('da',),
'้ช':('kang',),
'่ดฏ':('guan',),
'ๆฒ':('bei',),
'้ฟ':('ai',),
'่ฏ':('jue',),
'่ท':('qiang',),
'็ญ':('jin',),
'่น':('cu',),
'้ฉ':('xian',),
'่ฒ':('gai', 'hai'),
'ๅฟธ':('niu',),
'ๅผ':('dang',),
'้พ':('wu',),
'ๆผ':('piao',),
'้ฒ':('fu',),
'้ช':('jun',),
'ๅ':('yin',),
'้':('deng',),
'ๅ':('ce',),
'ๆ':('tuan',),
'็ขฃ':('jie',),
'ๅช':('fei',),
'ๆด':('dai',),
'ๅบท':('kang',),
'็ ธ':('za',),
'ๅป':('xiu',),
'็ผ':('mo',),
'ๅฟ':('shi',),
'ๆฏ
':('yi',),
'ๅน':('huang',),
'ๅ':('na',),
'้':('qing',),
'ๅ':('ti',),
'่':('gou',),
'ๅทก':('xun',),
'็ฝข':('ba',),
'ๅฏฅ':('liao',),
'็ฆ':('qi',),
'ๆณซ':('xuan',),
'่นฌ':('deng',),
'ๆณ':('xiang',),
'่ด':('fu',),
'็ปท':('beng',),
'้กธ':('han',),
'ๅญบ':('ru',),
'ๆช':('xi',),
'่':('mo', 'mai'),
'้ผ':('tuo',),
'้':('ti',),
'็':('lu',),
'ๆ':('zhao', 'chao'),
'็ฌฅ':('si',),
'ๅจ':('chu',),
'่ฐซ':('jian',),
'่ณ':('er',),
'้ธท':('zhi',),
'ๅคน':('jia', 'ga'),
'ๆก':('tao',),
'้':('kao',),
'ๅ':('lie',),
'ๆฃ':('ji',),
'ๆ':('yao',),
'็ณ ':('kang',),
'้
ฉ':('ming',),
'็จ':('ye',),
'ๆฝญ':('tan',),
'ไปฌ':('men',),
'็ฝ':('kui',),
'่ฆ':('qin', 'tan'),
'ๆ':('wu',),
'ๆ':('hu',),
'่':('mi',),
'็ง':('qiao',),
'ๅฆ':('pi', 'fou'),
'ๅพท':('de',),
'ๅป':('yu',),
'้ฝ':('juan',),
'็ผ':('yan',),
'ๅฟ':('xian',),
'่น':('rou',),
'่':('bao',),
'็ป':('shao',),
'ๅ':('cui',),
'่':('tu',),
'ๅณก':('xia',),
'้ง':('ren',),
'ๆทซ':('yin',),
'่ฝฌ':('zhuan', 'zhuai'),
'ๆฏฏ':('tan',),
'ๅฉบ':('wu',),
'้ผ':('da',),
'ๅพ':('fen',),
'่ฐ':('shei',),
'ๆฎ':('tian',),
'็พ':('qiang',),
'ๆ':('zan',),
'ๅฎค':('shi',),
'้ฆ':('qin',),
'่ดซ':('pin',),
'ไธญ':('zhong',),
'้ผท':('xi',),
'้ป':('duan',),
'ไป':('le',),
'่ฏ':('ping',),
'็ญ':('fa',),
'ไฝ':('tuo',),
'่ก':('ya',),
'ๆ':('gui', 'ju'),
'่ฒ':('nao',),
'็ฝ':('bai',),
'้พ':('yan',),
'่':('chan',),
'้บ':('mi',),
'้ข':('ke',),
'ๅ':('mian',),
'่':('tie',),
'ๆ':('wu',),
'้ธ ':('jiu',),
'็ง':('bi',),
'ไบซ':('xiang',),
'ๅฒท':('min',),
'็ผ':('qiong',),
'่ต':('lu',),
'ๆ':('rou',),
'่
':('la',),
'้':('yi',),
'็':('yan',),
'่':('zhu',),
'ๆฅ':('chu',),
'ๆ':('lao',),
'ๅฟก':('chong',),
'ๅงฅ':('lao',),
'้ง':('hua',),
'็ฆ':('mu',),
'ๅฉ':('kou',),
'ๆณ':('quan',),
'้ฉธ':('fu',),
'่':('mo',),
'็':('gui', 'que'),
'่พ':('xia',),
'่':('guan', 'wan'),
'็ผก':('li',),
'ๅบ ':('xiang',),
'็ฉ':('xing',),
'ๅจ':('dong',),
'้ช':('lao',),
'่ณ':('zhu',),
'้ป':('zu',),
'็ฆบ':('yu',),
'ๆ':('yao',),
'่':('li',),
'้ณ':('ao',),
'ๅ':('da',),
'็ป ':('geng',),
'้ฅฅ':('ji',),
'็จ':('rong',),
'่ฏฎ':('qiao',),
'ๆ
ต':('yong',),
'็ญน':('chou',),
'ๅผ':('heng',),
'่':('wei',),
'ๆ':('kun',),
'้พ':('qu',),
'ๆ':('chao',),
'ๅฒ':('qian',),
'้ฆ':('liu',),
'็จ':('shui',),
'ไธ':('shi',),
'้ผ ':('shu',),
'ๅฐข':('you', 'wang'),
'ๅช':('ni',),
'่ฑ':('zha',),
'ๅถท':('yi',),
'็ฌธ':('po',),
'้ฝ':('tan',),
'ๆป':('chu',),
'ๆ':('la',),
'่':('liao',),
'็ณ':('ci',),
'ๅต':('qian',),
'็':('jian',),
'ๅ':('zuo',),
'ๅ
':('tu',),
'่ท':('fu',),
'ๆน':('hu',),
'่':('peng',),
'่ฐ':('kui',),
'่
ด':('yu',),
'ๅนถ':('bing',),
'ๆฆ':('lan',),
'ๆ':('shou',),
'ๅผ':('yi',),
'ๅค':('xia',),
'้':('lan',),
'็':('rong',),
'้ฒข':('lian',),
'็ ฅ':('di',),
'ๅฆค':('yu',),
'ๅจ':('chu',),
'้ช':('xie', 'ye'),
'่ณ':('jiang',),
'็บถ':('lun', 'guan'),
'ๆญ':('sha',),
'่ง':('gui',),
'ๅ':('da',),
'็ฟ ':('cui',),
'ๅญฃ':('ji',),
'็งค':('cheng',),
'ๅง':('ze',),
'็ฉน':('qiong',),
'ๅฏธ':('cun',),
'ๅ':('chan',),
'ๅ
':('long',),
'้ช':('hai',),
'่':('lei',),
'็ข':('qi',),
'็บ':('mi',),
'ๅด':('guo',),
'่ ':('huo',),
'่ฐจ':('jin',),
'ไผช':('wei',),
'็ฌ':('pa',),
'ๅฏ':('huo',),
'็ ฐ':('peng',),
'ๅฒณ':('yue',),
'่น':('tai',),
'่ฎฝ':('feng',),
'ไบฟ':('yi',),
'ๅฑ':('qu',),
'ๆ':('zou',),
'ไน':('qiao',),
'็':('ya',),
'ๅฏ':('qin',),
'้ป':('yi',),
'่ฃ':('yu',),
'ๆข':('huan',),
'ๆฅฆ':('xuan',),
'็ซ':('tang',),
'็ฏ':('ding',),
'ๅฎ':('nie', 'yao'),
'ๆท':('kao',),
'่ธ':('xi',),
'็':('yang',),
'ๅ':('chi',),
'็':('de', 'di'),
'ๅจ':('jiao',),
'็':('dai',),
'็ฎ':('chuang',),
'ๅคฑ':('shi',),
'่ท':('xian',),
'ๅ':('can', 'cen', 'shen'),
'้':('xiong',),
'ๅง':('mu',),
'ๆ':('lian',),
'่':('chang',),
'ๆ':('yan',),
'้':('luo',),
'ๆ ':('hui',),
'่ฅ':('mao',),
'ๆณจ':('zhu',),
'็ญ':('ta',),
'้ฎ':('wen',),
'ๅฃฐ':('sheng',),
'ๅปด':('yin',),
'ๅ
':('ru',),
'ไบ':('jing',),
'้':('nuo', 'tian'),
'็พ':('qian', 'qiang'),
'่ดจ':('zhi',),
'ๅซ':('bing',),
'ไธช':('ge',),
'็ฌ':('wei',),
'็ผด':('jiao',),
'่น':('jian',),
'ๆธ':('you',),
'ไพฟ':('bian', 'pian'),
'็':('shuo',),
'ๅ
':('wu',),
'็
':('zhong', 'ye'),
'้น':('que',),
'ๆ':('pai',),
'ๆป':('teng',),
'ๅ':('kuai',),
'่ฏง':('cha',),
'ๆกฆ':('hua',),
'้ฅฐ':('shi',),
'้':('song',),
'้
':('yue',),
'็':('miao',),
'่':('she',),
'ๆ':('zhang',),
'ๅ':('chuan',),
'ๅฎ':('yi',),
'้ฒ':('xiang',),
'ๆก':('yi',),
'ๆฅ':('chun',),
'ๆฐฉ':('ya',),
'ๅญ':('jing',),
'ๆฒพ':('zhan',),
'็ญ':('qiong',),
'ๅป':('lang',),
'่':('jun',),
'ๆ':('xing',),
'่ง':('jin',),
'ๆต':('nong',),
'่ฟ':('fan',),
'ๅ':('nan',),
'็':('dou',),
'ๅฌ':('zhao',),
'้ฎ':('zheng',),
'่ถ':('jiao',),
'ๆน':('mo',),
'้ข':('po',),
'็ฌ':('ba',),
'ๅพ':('yang',),
'่':('yu',),
'็ด':('wen',),
'ๆค':('jiao',),
'ๅ':('hua',),
'็ฒ':('su',),
'่ค':('ying',),
'ๆขง':('wu',),
'ๅซ':('jie',),
'้ญ':('bu',),
'ๅฏ':('luo', 'ka', 'ge', 'lo'),
'ๅบณ':('bi',),
'ๆ ผ':('ge',),
'ๅ':('kai',),
'็
':('bing',),
'่':('xie',),
'้':('xi',),
'ๅ':('xun',),
'็':('ben',),
'ๅง':('shu',),
'้ณ':('zun',),
'ๆ
ข':('man',),
'ๆญฆ':('wu',),
'ไฟฉ':('liang', 'lia'),
'ๆตช':('lang',),
'็ฏ':('ou',),
'้ฉฐ':('chi',),
'็ปณ':('sheng',),
'ไฝพ':('yi',),
'็':('xi',),
'้
':('guo',),
'ๅ ':('jin',),
'่':('kuai',),
'ๆ':('ti',),
'ๆช':('yan',),
'ไธ':('zhuan',),
'ๆฒ':('mian',),
'็':('yi',),
'ๆก':('dang',),
'ๆฅ':('zuan',),
'่ฎฆ':('jie',),
'ๆผฉ':('xuan',),
'ไบจ':('heng',),
'้ธณ':('yuan',),
'่ท':('da',),
'็':('long',),
'้':('shuo',),
'็ก':('nao',),
'ๅฏ':('mi',),
'ๆ':('shou',),
'่':('bei',),
'ๆน':('pen',),
'้':('jing',),
'็':('pan',),
'่ก':('dang',),
'ๆค':('xing',),
'่ฝฉ':('xuan',),
'ๆปจ':('bin',),
'ๅฌ':('jiao',),
'็ญฑ':('xiao',),
'ๅ':('qiao',),
'้ฆ':('cha',),
'็จ':('lv',),
'่':('yi',),
'ๆ':('ming',),
'่พ':('shu',),
'ๅ':('yue',),
'ๆฃ':('zao',),
'ๆฆง':('fei',),
'้ชฑ':('jie', 'xie'),
'้ฒต':('ni',),
'็':('yun',),
'้':('sui',),
'ๅ':('zhuo',),
'็ป':('zhou',),
'่':('hu',),
'ๆณ':('fa',),
'็':('you',),
'็':('hu',),
'ๆข':('hui',),
'ๆฑช':('wang',),
'้
ฌ':('chou',),
'็ฟณ':('yi',),
'ไนพ':('qian',),
'ๅ':('pu',),
'ๅค':('bei',),
'่':('shua',),
'่ฐ':('yu',),
'ๆถ':('cen',),
'็':('sao',),
'้':('bu',),
'็ ':('fa',),
'ๆก':('kan',),
'่ข':('zhi',),
'ๆธฉ':('wen',),
'ไพจ':('qiao',),
'ๅญ':('wo',),
'้ฏ':('ju',),
'็ฎ':('pi',),
'ๅจฑ':('yu',),
'็ขฒ':('di',),
'ๆบ':('cuan',),
'่ดฟ':('hui',),
'็':('suo',),
'ๆ':('luo', 'lv'),
'่':('gu',),
'ๆ':('min',),
'ๅญ':('bei', 'bo'),
'ๆ ':('ya',),
'่นฉ':('bie',),
'็ญ':('mie',),
'ๅฏฐ':('huan',),
'ๅทด':('ba',),
'ๅ':('yong',),
'้':('sui',),
'ๅ
':('ting',),
'้บ':('qun', 'jun'),
'่':('jiang',),
'็ฌ':('zhao',),
'่':('du',),
'ๆ':('bi',),
'ๅ':('xi', 'fang'),
'็ช':('ku',),
'ๆฃ':('huan',),
'ๆบง':('li',),
'ๅซ':('zhi',),
'้ญ':('bian',),
'่พฝ':('liao',),
'ๅ':('ya',),
'็ง':('bing',),
'ๆท':('shu',),
'ๆฏ':('bi',),
'ๅฟ':('tian',),
'้ซ':('biao', 'shan'),
'็ฅ':('shen',),
'ๆตฆ':('pu',),
'ๆญช':('wai',),
'้ฌ':('zou',),
'็ฏ':('kuai',),
'ๅ
ฎ':('xi',),
'้ฉด':('lv',),
'ๆท':('he',),
'ๆณป':('xie',),
'่นผ':('pu',),
'้':('suo',),
'็':('cui',),
'็':('ji',),
'่':('chu',),
'ๆ':('kui',),
'ๆฒ':('mu',),
'็':('bei',),
'็ผ':('feng',),
'ๅบ':('fu',),
'ๆฅ':('hui',),
'่ฎช':('shan',),
'ๅญ':('gui',),
'็พฒ':('xi',),
'ๆบ':('shan', 'chan'),
'ไผฝ':('jia', 'ga', 'qie'),
'้':('you',),
'ๅ':('geng',),
'็น':('zhou', 'you', 'yao'),
'ๆก':('huan',),
'ๅ':('pu',),
'็':('hong',),
'็ป':('lao', 'luo'),
'่ฅ':('ying', 'xing'),
'ๅณฐ':('feng',),
'็ญต':('yan',),
'้พ':('chai', 'zi'),
'ๅข':('yong',),
'ๆ':('zen',),
'ๅ':('a',),
'ๅ':('gang',),
'ๅ ':('die',),
'ๆพง':('li',),
'้ฒฑ':('fei',),
'้':('pi', 'po', 'bei'),
'็ฃ':('sang',),
'ๅฅ':('nai',),
'่น':('pan',),
'่ฃ':('chen',),
'็ซ':('xian',),
'ๅนฒ':('gan',),
'ๆทป':('tian',),
'่ฝผ':('shi',),
'็':('xuan',),
'่ฐ':('xue',),
'ๅพ':('chang',),
'้ข':('nie',),
'ๆก':('sang',),
'ๆฅ':('deng',),
'ๆ ฉ':('xu',),
'้ซ':('pei',),
'ๅญ':('peng',),
'็ฎ':('pao', 'bao'),
'ๅฐฑ':('jiu',),
'็บฒ':('gang',),
'่ท':('quan',),
'่ดป':('yi',),
'ไธฝ':('li',),
'ๆ':('nie',),
'่ฏ':('lei',),
'ๅ':('kuai',),
'ๅต':('yu',),
'ๅญ':('meng',),
'่ก':('pu',),
'ๆ ':('wu',),
'ไฝง':('ka',),
'่กฉ':('cha',),
'ๆน':('yan',),
'่ฃพ':('ju',),
'ๅ
':('nei',),
'้ฒ':('nian',),
'็ผ':('lan',),
'ๅฎ':('an',),
'่':('rui',),
'ๆ':('kua',),
'ไพ':('you',),
'่ช':('shi',),
'ๆด':('sa',),
'็บ':('dao',),
'็ข':('die',),
'ๆฒง':('cang',),
'ไผฆ':('lun',),
'็ฌ':('wan',),
'ๅชณ':('xi',),
'็ ด':('po',),
'่น':('ying',),
'ไบป':('ren',),
'้':('lou',),
'ๆฟ':('lai',),
'ไน':('le', 'yue'),
'้':('jia',),
'ๅ':('xu',),
'้ป':('dai',),
'ๅท':('chuan',),
'้ฃ':('shi',),
'่ฃ':('ju', 'qu'),
'ๆข':('gan',),
'ๆฝฆ':('liao',),
'็ฏ':('xi',),
'ๅฎ':('zhi',),
'้นฐ':('ying',),
'้':('ji',),
'ๅ':('ren',),
'็ช':('yao',),
'ๆบ':('yuan',),
'้':('mi',),
'ๅฒ':('ba',),
'้ฏ':('lu',),
'็ฎ':('wei',),
'ๅผฑ':('ruo',),
'ๆถ':('jia',),
'้':('qiao', 'que'),
'้':('qie',),
'ๅฟ':('yi',),
'ๆ':('jin',),
'ๅง':('zi',),
'่ฃ':('yi',),
'ๅ':('bei',),
'ๅน':('zhang',),
'็ณ':('mi',),
'่
ฅ':('xing',),
'็ฝฑ':('lan',),
'่ฏพ':('ke',),
'ๅ
':('fu',),
'่':('sou',),
'่':('gao',),
'ๆ':('rong',),
'ไบ':('yun',),
'็':('liao',),
'ๅ':('yi',),
'ๆฃ':('che',),
'ๆถง':('jian',),
'ๅซ':('dian',),
'้ญ':('tou',),
'็ฌ':('yong',),
'ๅฏ':('feng', 'ping'),
'็ผฐ':('jiang',),
'ๅฎณ':('hai',),
'็คด':('bo',),
'่น':('qin',),
'ๆดผ':('wa',),
'ๅ':('zhi',),
'ๅ
':('xiong',),
'้น':('yu',),
'ๆ':('cao',),
'ๆป':('hua',),
'ไฝ':('zuo',),
'ๆฃ':('zong',),
'็':('pi',),
'็ซ':('jiao',),
'้ฌ':('huo',),
'้ฅด':('yi',),
'ๆท':('kuang',),
'่ธ':('bi',),
'ๆงฟ':('jin',),
'้':('ge',),
'็':('mao',),
'้
':('hou',),
'็':('xi',),
'ๅด':('chong',),
'่':('ping',),
'ๆ':('bin',),
'่ ':('ru',),
'ๆฆ':('lang',),
'ๅ':('li',),
'้ช':('qian',),
'ๆฅ':('ji',),
'ๅญ':('keng', 'hang'),
'้ฏ':('shan',),
'ๅธฑ':('dao', 'chou'),
'็ฒฒ':('can',),
'ๅ ต':('du',),
'่ท':('qiang',),
'ๅ':('shen',),
'่ฟ':('yun',),
'ๆ ':('ning',),
'็ญ':('tang',),
'ๅฌ':('he',),
'ๆน':('pi',),
'่บ':('an',),
'ๆฑฝ':('qi',),
'ๅฆ':('shuo',),
'ๅพ
':('dai',),
'ๅ':('hou',),
'่ข':('dai',),
'่บ':('lin',),
'ๆด':('ji',),
'็ช':('chuang',),
'ๅค':('wai',),
'ๆฒฃ':('feng',),
'่จ':('sa',),
'ๅณ':('ke', 'hai'),
'็ด':('zhang',),
'ๆฐธ':('yong',),
'่ฝ':('mang',),
'็ง':('si',),
'้ญ':('hun',),
'็ฟ
':('chi',),
'ๅน':('wo',),
'้
':('feng',),
'็':('jing',),
'ๅ':('cheng',),
'ๆฏ':('mu',),
'่ต':('shu',),
'ๆ':('men',),
'่':('huang',),
'็ฅ':('zu',),
'้':('fu',),
'็ฝ':('fa',),
'ๆญข':('zhi',),
'ๆช':('miao',),
'้ฉฌ':('ma',),
'็ปฏ':('fei',),
'้ด':('pi',),
'่ฑธ':('zhi',),
'ๆฟ':('ti',),
'ๅ':('qie',),
'้':('cuo',),
'่':('nao',),
'่':('tui',),
'้ฆ':('shou',),
'ๅฒ':('xian',),
'้พ':('gong',),
'็':('cu',),
'่ฎข':('ding',),
'ไบค':('jiao',),
'่ฆ':('lin',),
'้ธฏ':('yang',),
'็ถฎ':('qing', 'qi'),
'็ฒ':('ling',),
'่ป':('chi',),
'ๅฏ':('ji',),
'้ป':('huang',),
'็':('pian',),
'ๅ':('zhun',),
'้':('shi',),
'่':('hui',),
'ๆฃ ':('tang',),
'ๆปค':('lv',),
'ๅด':('wei',),
'ๅข':('man',),
'็จ':('lang',),
'็':('hou',),
'่พ':('cou',),
'ๆ':('jie',),
'็ฎ':('suan',),
'ๅธ':('zhou',),
'็':('piao',),
'ๅ':('tun',),
'ๆถฃ':('huan',),
'ๆง':('kong',),
'ๅฆซ':('gui',),
'็ฌฌ':('di',),
'้ฑ':('qiu',),
'็ฐ':('yan',),
'ๅณ':('cha', 'zha'),
'่ฝ':('ya',),
'็ฃ':('ci',),
'็ป
':('shen',),
'ๆ':('huo',),
'็ก':('xia',),
'ๅณ':('shi', 'zhi'),
'็':('rui',),
'่ทฃ':('xian',),
'ไปฅ':('yi',),
'็งซ':('shu',),
'ๅฑฎ':('che',),
'้
ฐ':('xian',),
'ๅฒ':('ge',),
'้ด':('xue',),
'่ผ':('tu',),
'ๆฟ':('shi',),
'็ฆ':('bing',),
'ๅด':('lai',),
'ไผ':('fu',),
'้ข':('ying',),
'็':('jue',),
'้':('chao',),
'ๆ ก':('jiao', 'xiao'),
'ๆธฅ':('wo',),
'่ช':('dou',),
'้ผฏ':('wu',),
'็ฒฎ':('liang',),
'ๆบบ':('ni',),
'่ฟ':('wan',),
'ๅซ':('sao',),
'ๅ':('nang',),
'่ฏ':('zhou',),
'ๅญ':('zi',),
'ๅ':('fen',),
'่กก':('heng',),
'ๆง ':('zhu',),
'่ฉ':('pa',),
'ๆจ':('zhi',),
'ๅฐ':('huang',),
'ๅด':('zhi',),
'ๆฑน':('xiong',),
'ๅฎ':('ning',),
'่':('yin',),
'็ข':('wan',),
'ๅฌ':('bi',),
'่ฐค':('bang',),
'็ ฌ':('la', 'li'),
'็ฐ':('yuan',),
'็ด':('qin',),
'่พน':('bian',),
'ๆธธ':('you',),
'ๆผ':('dui',),
'็ฏ':('huang',),
'ๅฉ':('e',),
'้':('nie',),
'็ญ':('ce',),
'ๅฟ':('mang',),
'ๅ':('ning',),
'้':('yin',),
'่ง':('long',),
'็ณฏ':('nuo',),
'็ช':('zhun',),
'ๅธ':('bu',),
'้':('mei',),
'็':('gui',),
'่':('niao',),
'็ผ':('jin',),
'ๆฌก':('ci',),
'ๆดฅ':('jin',),
'่ช':('xin',),
'ๅคญ':('yao',),
'้ณ':('yang',),
'่ป':('lu',),
'้ซ':('bi',),
'้ณ':('e',),
'ไฟ':('zu',),
'ๆ':('xiao',),
'็ป':('hui',),
'้
':('yun',),
'็ฅญ':('ji',),
'ๅฃฌ':('ren',),
'ๅฐ':('xie',),
'่ฏถ':('ei',),
'่ทบ':('duo',),
'่พ':('chan',),
'้พ':('ju', 'zha'),
'็ ':('sha',),
'้':('chun',),
'็':('bao',),
'็':('ye', 'ya'),
'ๆธ':('du',),
'่':('qin',),
'ๆ':('nu',),
'็':('ma',),
'ๅฐ':('shang',),
'็':('yao', 'nve'),
'ๅ':('lei',),
'่ดค':('xian',),
'ๆง':('chong',),
'ๅฎซ':('gong',),
'้ต':('bo',),
'่ฝ':('tai',),
'ๅญ':('shuang',),
'็ณ
':('rou',),
'ๆฃ':('gun',),
'่':('guo',),
'็':('lai',),
'ๆกข':('zhen',),
'่ง':('zang',),
'ๆฆ':('qian',),
'้ฅฌ':('chi',),
'็ณ':('dan', 'shi'),
'่ฝธ':('zhen',),
'่
ผ':('mian',),
'็ฎ':('jian',),
'ๅผ':('qi',),
'ๅ':('xuan',),
'้':('yan',),
'ๆ':('qia',),
'้ช':('can',),
'ๅพ':('pai',),
'้ฒ':('ji',),
'ๅ':('nong',),
'ๆจก':('mo', 'mu'),
'่ขข':('pan',),
'่ฆ':('lu',),
'ๅต':('chao',),
'ไธน':('dan',),
'ๆพ':('tun',),
'่ง':('di',),
'ๆต':('liu',),
'ไป':('cong',),
'ๆ':('biao', 'shao'),
'ๅฅ':('tao',),
'็ฟ':('qiao',),
'ไฝฃ':('yong',),
'ๆทค':('yu',),
'็กญ':('mang',),
'ๅงฌ':('ji',),
'ๅฐ':('tai',),
'ไฟธ':('feng',),
'ๆฝ':('yun',),
'่พ':('e',),
'้ข':('hang',),
'็ผ':('ke',),
'ๅฎ
':('zhai',),
'ๅ':('ran',),
'็':('ni',),
'ไพ':('shi',),
'ๆค':('zhui',),
'่':('huang',),
'ๅด':('ya',),
'้':('fu',),
'็':('jiang',),
'่ฐ ':('dang',),
'ไผข':('ya',),
'ๆง':('ai',),
'่จ':('nou',),
'ๅฒซ':('xiu',),
'็ฐ':('yan',),
'ๆ ธ':('he', 'hu'),
'่ฝ':('zhong',),
'ๆผ':('zhou',),
'ๅฑ':('ju',),
'้':('mo',),
'ๅ':('jie', 'ji'),
'ไน':('wu',),
'้':('cheng', 'dang', 'tang'),
'็ญ':('bi',),
'ๅ':('nian',),
'็
':('sha',),
'่ฃฃ':('lian',),
'ๆฝข':('huang',),
'ๆฆ':('dun',),
'้นฌ':('yu',),
'็ซฏ':('duan',),
'้ด':('jian',),
'่ผ':('ge',),
'ๆฟ':('na',),
'็ช':('zhai',),
'่':('mie',),
'่':('qiong',),
'่ช':('e',),
'ๅผญ':('mi',),
'ๅฑ':('chang',),
'ๆพ':('han',),
'ๅ':('ji',),
'่':('ru',),
'่':('tai',),
'ๅ':('qiang',),
'่ตก':('dan', 'shan'),
'ๆณ ':('ling',),
'่
ฉ':('nan',),
'ๆจ':('can',),
'ๆฅน':('ying',),
'่ฏบ':('nuo',),
'่พ':('yu',),
'ๅฒ':('sui',),
'็':('li',),
'่ถ':('qu',),
'ไบ':('chu',),
'ๆ ':('li', 'yue'),
'ๅฐ':('jian',),
'้':('shuang',),
'็':('min', 'wen'),
'ไธข':('diu',),
'ๆง':('jian',),
'็ผฌ':('xie',),
'้ฑ':('qian',),
'็ฐ':('tian',),
'ๅณ':('jue',),
'็ด':('hou',),
'็ณ':('san',),
'้น':('li',),
'ๅ
':('xian',),
'ๅ':('jian',),
'็':('ba',),
'่งฃ':('jie', 'xie'),
'ๆช':('men',),
'้ฐ':('lian',),
'่ผ':('lou',),
'ๆฟ':('qin',),
'้':('xuan',),
'ๆพ':('si',),
'่':('huan',),
'ๆ':('fei',),
'้ช':('sao',),
'ๆฐก':('dong',),
'ๆฉ':('ni',),
'่ช':('qi',),
'ๅธญ':('xi',),
'้ฌฏ':('chang',),
'ๅฑ':('zi', 'zhi'),
'็ฒ':('jun',),
'่ป':('kou',),
'็':('di',),
'ๅ':('duo',),
'ๆต':('ce',),
'่':('miao',),
'่':('fu',),
'ๅฝ':('hui',),
'็ง':('bi', 'mi'),
'็':('qin', 'jin'),
'ๅ':('bu',),
'่ฑก':('xiang',),
'ๆท ':('pi',),
'่ฉ':('kui',),
'็ต':('qian',),
'ๅพ':('zheng',),
'้':('yi',),
'ๅ':('li',),
'็':('han',),
'ๆ':('cuan', 'zan'),
'็ฒ':('cu',),
'ๅค':('duo',),
'่จ':('ban',),
'็ฐ':('luo',),
'ๅณ':('lao',),
'้ต':('ju',),
'่ฎน':('e',),
'็ฟ':('weng',),
'ๅก':('leng',),
'็':('min',),
'้
':('ding',),
'ๆ':('gan',),
'้ณ':('man',),
'็ฅ':('zuo',),
'ๅ':('ge',),
'ไฟก':('xin',),
'็ปซ':('ling',),
'็ณ':('yu',),
'้ด':('lin',),
'ๆณท':('long', 'shuang'),
'ไฝถ':('ji',),
'ๅจ':('wa',),
'ไธ':('xia',),
'ๆฒ':('zhuan', 'dun'),
'ๆ':('hui',),
'็ฌ':('sheng',),
'ไบ ':('tou',),
'้ธซ':('dong',),
'่ช':('chuai',),
'ๅดญ':('zhan',),
'็ฒ':('pi',),
'่ฐท':('gu',),
'ๆพถ':('chan',),
'่ฟ':('geng',),
'ๆพ':('yuan',),
'้':('wu',),
'่':('qu',),
'ๆ':('da',),
'้':('di',),
'็':('gua',),
'ๆป ':('she',),
'่ฉ':('jin',),
'ๆจ':('bo',),
'ๅฐ':('kun',),
'้ฒ':('xian',),
'ๆญน':('dai',),
'ๅข
':('shu',),
'่พ':('wang',),
'่':('mei',),
'ๅธ':('tie',),
'็':('shu',),
'ๆง':('fu',),
'่จ':('ci',),
'้ฒญ':('qing', 'zheng'),
'ๅฆฏ':('zhou',),
'้ต':('shao',),
'่ฝ':('en',),
'็ป':('xie',),
'ๅฝ':('gou',),
'็ฃ
':('bang', 'pang'),
'ๅฅ':('yan',),
'้':('lu', 'liu'),
'ๅ':('wu',),
'ๆ':('ti',),
'ๅ':('nong',),
'ไปก':('ge',),
'่ฏฃ':('yi',),
'ๆฆ':('hui',),
'ๆช':('ke',),
'็งฏ':('ji',),
'ๅฒ':('ci',),
'้
ด':('tu',),
'ๆทท':('hun',),
'ๆฟ':('yi',),
'็ฆ':('lu',),
'ๅ':('chun',),
'่ฐ':('die',),
'ๆถ':('yong',),
'่':('mo',),
'ๆ':('xi',),
'่':('geng',),
'ๆ':('qiao',),
'ๅฎ':('guan',),
'้ข':('e',),
'ๅ':('xi',),
'ๆธก':('du',),
'ไพ ':('xia',),
'่ช':('fang',),
'ๅฑ':('zhu',),
'่ดท':('dai',),
'ๆบถ':('rong',),
'่ป':('qing',),
'ๆฝ':('lian',),
'่ท':('die',),
'ๅ':('tan',),
'ๆฟ ':('hao',),
'่กฅ':('bu',),
'้ฒ':('chan',),
'็ต':('ling',),
'ๆฉน':('lu',),
'่':('mao',),
'้':('kai',),
'ๅธ':('pei',),
'่':('xiang',),
'้ฃ':('ban',),
'็ค':('liu',),
'้ชง':('xiang',),
'ๆญ':('tian',),
'่ฌ':('ban',),
'ๆฆฑ':('cui',),
'็น':('zhen',),
'ๅบ':('chang',),
'ๅดพ':('yao',),
'่':('lian',),
'ๆ':('wu',),
'็':('quan',),
'็ญ':('tong',),
'ๆ':('niu', 'ao'),
'ๆป':('yan',),
'่ต':('zan',),
'็ฃ':('ban',),
'ๅค':('jin',),
'่ซ':('jing', 'keng'),
'ๆฌ':('yang',),
'ๆ':('chou',),
'่':('yu',),
'้ข':('yi',),
'็ผ':('lv',),
'ๆ':('mei',),
'ๆถ':('lai',),
'ๅฃ':('qin',),
'็ข':('fen', 'bin'),
'ๆซ':('fu',),
'ๆฏ':('shi',),
'่ฎ':('sou',),
'่ฎฒ':('jiang',),
'็ท':('ye',),
'ๅธ':('xian',),
'็ ป':('long',),
'ๅฅ':('qi',),
'็ฃ':('pan',),
'ๅฝ':('lu',),
'็ป':('ku',),
'ๆ':('jiao',),
'็ฅ':('pi',),
'ๅฟช':('song', 'zhong'),
'่ญ':('hong',),
'ๆฎ':('ga',),
'่ฝต':('zhi',),
'ๅนฟ':('guang',),
'่':('chong',),
'ๆฎ':('shang',),
'่ธ':('yong',),
'ๆ':('rang',),
'่พ':('pi', 'bi'),
'ๅฒฉ':('yan',),
'่ฌ':('cha',),
'่คฐ':('qian',),
'้ธ':('cha',),
'้ฌผ':('gui',),
'ๅฐพ':('wei', 'yi'),
'่':('xie',),
'ๆก':('kuang',),
'ๅ':('diao',),
'ๅ':('shan',),
'้ฃ':('biao',),
'ๆง':('jian', 'kan'),
'่':('xu',),
'็ฃ':('zhi',),
'ๅค':('kun',),
'้นช':('jiao',),
'่ทณ':('tiao',),
'ไปต':('wu',),
'ๆฝด':('zhu',),
'็ธ':('ji',),
'ๆ':('tai',),
'่':('bang', 'pang'),
'ๅ':('jiu',),
'็ ':('ya',),
'้ฆ':('xiu',),
'ๅช':('mei',),
'ๆ':('bei',),
'ไผ':('wei',),
'้ก':('qun',),
'ๅฃ':('zao',),
'ๆซ':('su',),
'็บช':('ji',),
'ๆฏ':('shu', 'zhu'),
'ๆดณ':('ru',),
'็ท':('you',),
'้ถ':('bei',),
'็คป':('shi',),
'่':('bo', 'fan'),
'ๆ':('bing',),
'้
':('qiu',),
'ๅ':('qian',),
'ๅก':('su',),
'ๅน':('mu',),
'็ฟ':('xiang',),
'่ฏ':('shen',),
'ๅปช':('lin',),
'ๅท':('juan',),
'้ฉฝ':('nu',),
'่':('zhi',),
'ไธ':('zhang',),
'็':('ju',),
'ๅ':('guan',),
'้':('sha',),
'ๅฐ':('er',),
'้ขง':('quan',),
'ๆถต':('han',),
'่ฐด':('qian',),
'็ฆฝ':('qin',),
'่':('jing',),
'ๆญ':('xin',),
'ๅ':('xun',),
'ๆต':('zhuo',),
'้':('zhong', 'chong'),
'ๅ':('gen',),
'ๅป':('kuo',),
'้
ข':('cu', 'zuo'),
'ๅค':('ling',),
'่ซ':('shan',),
'ๆฌ':('tian',),
'ๆฑด':('bian',),
'ๅน':('pei',),
'็ธ':('gui',),
'้ซป':('ji',),
'็กผ':('peng',),
'ๆ':('chi',),
'ๆผ':('lu',),
'็ฌ':('xiao',),
'้ช':('qi',),
'ๅฆ':('du',),
'้ฒ':('wei',),
'ๆ':('ye',),
'ๅฃ':('beng',),
'้ฅ':('zhui',),
'็ฎฆ':('ze',),
'็จป':('dao',),
'ๅขผ':('ji',),
'็ฐฟ':('bu',),
'ๅ
':('mian',),
'ๅญ':('jie',),
'่':('yang',),
'ๆฅ':('lian',),
'ไฟ':('yu',),
'็ฅ':('lve',),
'้ค':('ting', 'ding'),
'็ฝฉ':('zhao',),
'่
ญ':('e',),
'ไนณ':('ru',),
'ๆงฒ':('hu',),
'่ตต':('zhao',),
'ๅท':('tou',),
'้ฅฝ':('bo',),
'ๅฑฟ':('yu',),
'ๆฆ':('chen',),
'็':('wang',),
'่ฐ':('yi',),
'็':('shu',),
'้':('yan',),
'ๅด':('cui',),
'่ถ':('tang',),
'ๅก':('wu',),
'้ฃ':('rou',),
'็ผจ':('ying',),
'ๆญ':('qian',),
'่ฌ':('pao',),
'่ดด':('tie',),
'ๅถ':('zhi',),
'้ธ':('ba',),
'่':('cong',),
'ๆ':('xun',),
'ๅ':('you',),
'ๆฑ':('cha',),
'้ซ':('du',),
'็ก':('xi',),
'้ณ':('xue',),
'ๆฏ':('mao',),
'่':('shen', 'ren'),
'ๅ ':('zhan',),
'็ฃ':('yi',),
'้ข':('ta',),
'ๅค':('pi',),
'็งง':('yang',),
'่ฟณ':('jing',),
'ๆตด':('yu',),
'ๅน':('ling',),
'ๆ
':('jiao',),
'่':('bo', 'bao'),
'่พ':('bei',),
'้':('kun',),
'ๅ':('hao',),
'ๅข':('shang',),
'ๆ':('dou',),
'ๅฃ':('sheng',),
'ๅคง':('da', 'dai'),
'็ชฆ':('dou',),
'้ธฉ':('zhen',),
'ๆซ':('cuo',),
'็ฒช':('fen',),
'ๆฏ':('zhi',),
'ๆผณ':('zhang',),
'ๅธ':('huan',),
'้บ':('yue',),
'้บพ':('hui',),
'ๆ':('mao',),
'ไน':('yi',),
'้':('liu',),
'ๅฑ':('zhan',),
'่ง':('zui', 'zi'),
'ไป':('ren',),
'็ก':('mu',),
'ๅซฆ':('chang',),
'ๅณช':('yu',),
'ไฝณ':('jia',),
'ๅ
ท':('ju',),
'้กฝ':('wan',),
'็ปพ':('wan',),
'ๆ':('kun',),
'่':('feng',),
'็':('zhang',),
'ๅ':('piao',),
'็':('pian',),
'็ข':('dui',),
'ไพ':('yi',),
'้ฃ':('na',),
'ๅฅ':('jue',),
'ๅฆฉ':('wu',),
'ๆญ':('xiao',),
'่ฐฐ':('lan',),
'ไผฒ':('ni',),
'ๆฎต':('duan',),
'็น':('xi',),
'้ธ':('yi',),
'ๅบ':('qu', 'ou'),
'็พฝ':('yu',),
'่':('gao',),
'ๆ':('za',),
'่
':('jian',),
'ไป':('chou', 'qiu'),
'ๆต':('jiang',),
'่ง':('jiao', 'jue'),
'็
':('jian',),
'่':('qu',),
'ๆณ':('fan',),
'่':('jia',),
'ไน':('nie', 'mie'),
'่ฅ':('bi',),
'้ข':('mian',),
'็ปง':('ji',),
'่ฏ':('ben',),
'ๆฑฐ':('tai',),
'่ฃณ':('shang',),
'้ท':('lei',),
'ๅน':('ao',),
'็ธ':('jiu',),
'ๅฟฝ':('hu',),
'่':('wan',),
'่ข':('jia',),
'็':('kou',),
'้ฒ':('tai',),
'ๅพ':('tu',),
'็ฌ':('jian',),
'้ก':('xi',),
'็ข':('meng',),
'ๅธง':('zhen',),
'็ถฆ':('qi',),
'็ฎช':('dan',),
'ๆ ณ':('lao',),
'็จฟ':('gao',),
'ๆ':('gan',),
'่':('dan',),
'ๆ':('zhu',),
'่น':('jian',),
'ๅ':('tan',),
'ๅญ':('yun',),
'้ ':('kai',),
'ๅทฆ':('zuo',),
'้ฃจ':('xiang',),
'้
น':('lei',),
'ๅฉฟ':('xu',),
'่ฐ':('zhun',),
'็':('shu', 'pi', 'ya'),
'่':('tang',),
'้ฃ':('tang',),
'ๅขฉ':('dun',),
'ๆฒฑ':('tuo',),
'่ดฐ':('er',),
'ไธฒ':('chuan',),
'็น':('dian',),
'ๅบ':('ci',),
'็บฝ':('niu',),
'่':('zhen',),
'ๆฑ':('cuan',),
'่ฃ':('ken',),
'้':('yong',),
'ๅง':('xing',),
'่':('cao',),
'ไฝ':('zuo',),
'้ข':('yuan',),
'ๅค':('lu',),
'่งณ':('hu',),
'็
ธ':('bian',),
'ๆ':('ge',),
'ๆ ':('zhi',),
'ๆ':('han',),
'่':('pa', 'ba'),
'ๆ':('fu',),
'่ธ':('huai',),
'ๅผง':('hu',),
'ๆซ':('jue',),
'ๆฏ':('zhen',),
'่ฎ':('ang',),
'่บฒ':('duo',),
'็ท':('juan',),
'้ถ':('ke',),
'ๅพผ':('jiao',),
'ๆ':('yu',),
'ๅ':('jiao',),
'ๅฑ':('xie',),
'ๅฉ':('jie',),
'ๆ':('yang',),
'่ฟ':('yuan',),
'ๅข':('tuan',),
'ๅณฆ':('luan',),
'ๆฎ':('dan',),
'ๅท':('ke',),
'็ถ':('ping',),
'็บ':('yin',),
'็':('gao',),
'่ค':('bian',),
'ๅ':('tong',),
'ๅธ':('zhang',),
'่':('lang',),
'ๆ':('zhan',),
'ๅก':('fa',),
'็ ':('ji',),
'ๅฅ':('ming',),
'้ฒง':('gun',),
'ๆญ':('duan',),
'ๆฝ':('ying',),
'่ฏ':('su',),
'ๅ':('xiang',),
'ๆ':('pi',),
'่
':('ding',),
'้ข':('jue',),
'้ฅช':('ren',),
'่ฏ':('kua',),
'ไฟฑ':('ju',),
'ๆกด':('fu',),
'้ท':('ru',),
'ๅน':('tan',),
'้ปป':('fu',),
'ๅทฝ':('xun',),
'็ฑผ':('xian',),
'่':('shi',),
'่':('yi',),
'็':('meng',),
'้':('liao',),
'็ผ':('gou',),
'้ข':('han',),
'ไธ':('cong',),
'่ด':('bei',),
'ๅฐง':('yao',),
'ๆฏ':('qie',),
'็ธป':('mi',),
'้บ':('xun',),
'่ฑ':('jiang',),
'้':('lou',),
'้':('e',),
'็ป':('dai',),
'ๅฅ':('yi',),
'็ฃ':('zhe',),
'่':('qing',),
'็ก':('shui',),
'้ ':('chou',),
'่ฝฑ':('gu',),
'ๅท':('xia',),
'็ถ':('ji',),
'ๅนป':('huan',),
'็ฏพ':('mie',),
'่ ':('lian',),
'็':('xun',),
'ๅค':('kui',),
'่พ':('xin',),
'็ ':('mian',),
'ๅฅ':('tang',),
'็ฌจ':('ben',),
'ๆญ':('bo',),
'่ฌ':('shu',),
'ๆบฑ':('zhen',),
'ๆขต':('fan',),
'่คด':('lan',),
'ๅถ':('si',),
'็น':('you',),
'ๅบ':('zhai',),
'็ฒฝ':('zong',),
'ๅ':('man', 'mai'),
'ๆก':('juan',),
'้':('pi',),
'ๅ':('jian',),
'้ป':('hei',),
'ๅฏ':('yu',),
'้ฃ':('sou',),
'ๆ':('qi',),
'่':('ju',),
'ๆง':('bing', 'bin'),
'ๅ ':('zhui',),
'้นฆ':('ying',),
'ๆฌ':('jing',),
'่ฏณ':('kuang',),
'้ท':('men',),
'ๅน':('bao',),
'ๆ
':('chang',),
'ไบ':('le', 'liao'),
'้ฆ':('zhuan',),
'ๆ':('nuan',),
'ไผ':('yu',),
'ๆ':('song',),
'่ฐ':('pian',),
'้ก':('qian',),
'ๅฃ':('xia',),
'ๅดง':('song',),
'็บฆ':('yue',),
'ๆซ':('mo',),
'่ฎ':('rui',),
'็ท':('nan',),
'้ถ':('li',),
'้บ':('ye',),
'่':('quan',),
'่ต':('qiu',),
'้
':('yi',),
'่':('song',),
'ๆฑ':('ru',),
'่ญ':('jia',),
'ๆฎ':('jie',),
'ๅท':('lang',),
'ๅฝป':('che',),
'ๆฒ':('wo',),
'่':('cui',),
'่':('li',),
'ๅจ':('li',),
'ๆฐ':('dao',),
'่':('gang',),
'้ฒฃ':('jian',),
'็จ':('kun',),
'ๆต':('xiao',),
'ๅผบ':('qiang', 'jiang'),
'ๅพ':('tuo',),
'่ง':('jian',),
'่ฟ
':('xun',),
'่':('jie', 'ji'),
'้':('jin',),
'ๅ':('xiao',),
'็':('ao',),
'่':('zui',),
'ๅจ':('zhou',),
'ๆฐ':('qia',),
'ๆด':('qing',),
'ๅฝ':('ji',),
'ๆค':('guo',),
'ๆ':('huo',),
'่':('kai',),
'้ช':('yan',),
'้':('ge',),
'็ฎข':('yuan',),
'้ฉ':('juan',),
'็ช':('deng',),
'ๆธฏ':('gang',),
'่ฒ':('lian',),
'็จท':('ji',),
'ๅฒธ':('an',),
'็ฟ':('ying',),
'ๆฃ':('jian',),
'่':('ting',),
'้น':('peng',),
'ๅ
':('dui',),
'็':('kui',),
'ๆต':('zhe',),
'ไฟ':('li',),
'่':('mu',),
'้จ':('quan',),
'ๆทฎ':('huai',),
'่
ฑ':('jian',),
'ๆฒ':('she', 'die'),
'ๅฉท':('ting',),
'ๅป':('lou', 'lv'),
'ๆ':('duo',),
'่':('qi',),
'่':('tang',),
'่':('ming',),
'ๅบฅ':('xiu',),
'็ผค':('bin',),
'ๅฉ':('mie',),
'็จ':('yong',),
'ๆฑ':('fei',),
'่ด':('yi',),
'็ฒน':('cui',),
'ๅพ':('wu',),
'่ฃ':('cai',),
'่':('rong',),
'ๅง':('shi',),
'ๅฟ':('chan',),
'้':('zheng',),
'ๅ':('sang',),
'็':('die',),
'้':('diao',),
'ๆฏ':('pi',),
'ๅฅ ':('dian',),
'ๅฝค':('tong',),
'็ง':('yao',),
'่งซ':('su',),
'้ช':('pei',),
'ๆ
ฐ':('wei',),
'ๆธ
':('qing',),
'่พ':('zhe',),
'็จ':('shao',),
'็':('pu',),
'้':('hu',),
'็':('jia',),
'้':('man', 'wan'),
'่ฐ':('an',),
'ๆ':('zhuang',),
'้ธฅ':('ou',),
'ๆผฏ':('luo', 'ta'),
'้พ':('jia',),
'่ต':('zang',),
'ไน
':('jiu',),
'่':('xing',),
'็ฏ':('hou',),
'้':('jia',),
'่':('cai',),
'ๅฆ':('jiu',),
'้จ':('men',),
'ไฝฏ':('yang',),
'ๆณฎ':('pan',),
'่ฑ':('ao',),
'ๅ
ป':('yang',),
'็ปบ':('liu',),
'ๆบ':('kui', 'hui'),
'่ค':('gua',),
'่':('mao',),
'้ผ':('ding',),
'็':('hao',),
'ๆ':('lian',),
'ๅฎก':('shen',),
'็จ ':('chou',),
'ๅฉ':('e',),
'ๆฆญ':('xie',),
'่ฐ':('jian',),
'็พน':('geng',),
'้ธธ':('er',),
'้ผ':('bi',),
'ๅพ':('ji',),
'่ฏ':('zheng',),
'ไป':('ding',),
'ๆฅ':('cha', 'zha'),
'ๆ':('kun',),
'ไน':('cheng', 'sheng'),
'ๆ':('zhao',),
'่ต':('zhuan',),
'็ปฃ':('xiu',),
'็ง':('shao',),
'ๅจ':('qi',),
'้ช':('qiang',),
'ๆฐ':('rao',),
'่ณ':('si',),
'ๅฝ':('han',),
'็ผ':('zhuo',),
'้ฟ':('keng',),
'ๆ':('you',),
'้ข':('he',),
'ๅ':('mao',),
'้':('men',),
'ๆฎ':('shi', 'zhi'),
'ๆ':('cong', 'zong'),
'ๆฐฏ':('lv',),
'็ ท':('shen',),
'ๅบธ':('yong',),
'็ป':('yao',),
'ๆณ':('xie',),
'ๅ':('bei',),
'็':('hu',),
'ๅ':('tao',),
'ๆ
':('ni',),
'่':('sheng',),
'ๅฆ':('ling',),
'้จ':('yu',),
'ๅช':('qin',),
'ๆฟฎ':('pu',),
'้
ฝ':('yan',),
'็พ':('fan',),
'่':('yu',),
'ๅ':('fu',),
'้':('di',),
'ๅ':('wu',),
'่':('ting',),
'็ฌ ':('li',),
'็จ':('zha',),
'ๆขญ':('suo',),
'ๆฑ':('bao',),
'ๆต':('nian',),
'ๅจถ':('qu',),
'็บน':('wen',),
'ๅฐบ':('chi',),
'็ฝ':('chi',),
'ไฟ':('cu',),
'ๆก':('gui',),
'่':('ling',),
'ๆ':('tan',),
'้':('you',),
'ไฝ':('she',),
'่ฑ':('tun',),
'่':('yu',),
'็ง':('sui',),
'ๅจ':('tuo',),
'่ฏซ':('jie',),
'่ทฏ':('lu',),
'ๆด':('zheng',),
'ๆฐ
':('chang',),
'่ถ':('ju', 'qie'),
'่':('yuan',),
'็ ':('kan',),
'่ฒ':('yo', 'yu'),
'้ชถ':('di',),
'้ฒบ':('shi',),
'ๅผ':('xian',),
'้พ':('nan',),
'ๆท':('zi',),
'ๅก':('cheng',),
'ๅ':('jian',),
'็':('fa',),
'้':('jing',),
'ๅ':('ou',),
'็':('zhi',),
'่ฟ':('hai', 'huan'),
'ๆ':('jia',),
'ๅช':('ne', 'na'),
'่ต':('kui',),
'้ฝ':('ju',),
'็พ':('dun',),
'ๆข':('ting',),
'ๆ':('xia',),
'ๅธ':('xi',),
'ๅ':('tu',),
'่':('shan',),
'ๅฎฅ':('you',),
'้ง':('sui',),
'่ธฌ':('zhi',),
'่ด':('ze',),
'็ฆน':('yu',),
'็ฝ':('gu',),
'้ผ':('zuo',),
'่ฏ
':('zu',),
'ๅณ':('xun',),
'ๆป':('bi',),
'่ต':('lai',),
'ๆ':('ni',),
'ๅฑ ':('tu',),
'้ฆ':('dun',),
'้ช':('mou',),
'ไฟญ':('jian',),
'่ณ':('ge',),
'ๅงน':('cha',),
'ๅฝ':('sao',),
'ๆด':('jie',),
'ๆ':('yu',),
'็ผ':('duo',),
'็':('zeng',),
'้':('shan',),
'ๅ':('lei',),
'็':('mi',),
'่ค':('bei',),
'ๆฎ':('dan',),
'ๅง':('mi',),
'ไบฌ':('jing',),
'ๆจฏ':('qiang',),
'ๅผ':('jiao', 'jue'),
'็ฟ':('pan',),
'็ป':('chu',),
'้':('dao',),
'ๅ':('dan', 'chan', 'shan'),
'่ฃ':('qiu',),
'ๅฆ':('suo',),
'้จ':('bu',),
'ๅช':('zhi',),
'่ฝญ':('e',),
'ๅป':('shen',),
'ๅฟ':('jiao', 'chao'),
'ๆ':('pi',),
'่':('bei', 'bi'),
'ๅ':('tang',),
'้':('qu',),
'่พ':('zhan',),
'่':('xue',),
'็ด ':('su',),
'็ฌค':('tiao',),
'ๅฉ':('nuo',),
'ๆต':('di',),
'่ด':('hui',),
'็ขน':('xuan',),
'ๅพ':('qing',),
'ๆ':('bai',),
'้ป':('shu',),
'็ก':('xing',),
'้':('lao',),
'็งฃ':('mo',),
'ๅญค':('gu',),
'็ง':('shen',),
'ๅ
จ':('quan',),
'่ทซ':('qiong',),
'่ฏฏ':('wu',),
'ๆฐ':('shu', 'shuo'),
'่ณ':('zhi',),
'ๅฃน':('yi',),
'็ฅธ':('huo',),
'้ป':('wen',),
'ๅฝ':('guo',),
'่':('ge',),
'้พ':('wo',),
'ไผ':('zhong',),
'็บข':('hong',),
'ๆดซ':('xu',),
'ไพฌ':('nong',),
'้ฒถ':('nian',),
'็ป':('hua',),
'้ชบ':('hou',),
'็ฟ':('yuan',),
'้พ':('zhu',),
'ๆท':('dian',),
'่':('ying',),
'ๆ':('xie',),
'็ฟ':('yi',),
'้ญ':('wei',),
'ๅฆ':('o', 'e'),
'่นญ':('ceng',),
'ๆฃฎ':('sen',),
'่ฑ':('cong',),
'ๆฒ':('qu',),
'ๅฝท':('pang',),
'้กน':('xiang',),
'ๅป':('chi',),
'็ซบ':('zhu',),
'ๅฟ':('qing',),
'ไธ':('yi',),
'ๆ':('pie',),
'่':('ji',),
'้':('lang',),
'ๅ':('jue',),
'ๆจ':('tang',),
'ๆ':('sou',),
'่':('wo',),
'ๅพก':('yu',),
'้ชฃ':('zhan',),
'ๅฆฅ':('tuo',),
'้ง':('jun',),
'ๅฉ':('jiu',),
'็จ':('cuan',),
'่ฐฌ':('miu',),
'ๅคบ':('duo',),
'็ฝ':('ju',),
'ๅพ':('bian',),
'่ฟ':('qian',),
'่ง
':('mi',),
'ๆ':('gan',),
'ๆ
':('qian', 'qie'),
'ๆณ':('si',),
'ๆ':('xun',),
'ๆ':('wei',),
'่':('qiao',),
'็ซฃ':('jun',),
'้
ช':('lao',),
'ๆฐ':('xi',),
'ๅฏน':('dui',),
'็ฉธ':('xi',),
'ๅฝ':('sou',),
'ๆค
':('yi',),
'่ข':('ao',),
'ๆ':('an',),
'่':('mi',),
'้':('yin',),
'็':('huan',),
'ๅจฃ':('di',),
'ๅง':('ba',),
'ๆธซ':('xie',),
'่ฎฎ':('yi',),
'ๆณ':('chuo',),
'ๅชธ':('chi',),
'ๆ':('nian',),
'้น':('miao',),
'็ณ':('zan',),
'ๅ':('keng',),
'็':('yan',),
'ๅ
':('si',),
'ๆญ':('xi', 'she'),
'ๆ':('bi',),
'็ฝก':('gang',),
'้ป ':('xia',),
'ๅทข':('chao',),
'็ฅฅ':('xiang',),
'่ตญ':('zhe',),
'ๅป':('sai',),
'ๅฟ':('chang',),
'ๆฎ':('yang',),
'่ฐ':('shen',),
'ๆ':('wen',),
'็ฆ':('fu',),
'็ผ ':('chan',),
'็คค':('ca',),
'ๅฉ':('zhu',),
'ๆฒญ':('shu',),
'่ดฌ':('bian',),
'้ผ':('sou',),
'ๆฑ':('qiu',),
'่ฃ
':('zhuang',),
'้ณ':('qi',),
'็ง':('sha',),
'่ฟซ':('po', 'pai'),
'่งฏ':('zhi',),
'ๆฐ':('jie',),
'ๅฝ':('geng',),
'ๆ
':('zha', 'shan'),
'ๅ':('zhou',),
'่':('luo',),
'้ธก':('ji',),
'็ฒข':('ci', 'zi'),
'ๅง':('ji',),
'ๆผซ':('man',),
'่ฒ':('pu',),
'ๅฎธ':('chen',),
'ๆฟ':('ji',),
'ๅ':('xie',),
'็':('fan',),
'ไป':('ta',),
'ๆฉ':('cheng',),
'่ฏ':('jie',),
'่':('li',),
'็ฉ':('wu',),
'่ฑ':('shi',),
'็ปถ':('shou',),
'้ฉน':('ju',),
'ๅป':('di', 'chi'),
'็ฃบ':('huang',),
'็':('li',),
'ๆฒ':('da', 'ta'),
'ไธ':('qie',),
'ๅ':('hui',),
'้ข':('zhuan',),
'ๅฎ':('bao',),
'้บ':('lin',),
'ๆ ':('dai',),
'ๆฐจ':('an',),
'ๅฎ':('shou',),
'ๅฌฒ':('niao',),
'ๆน':('fang',),
'่ธ':('yu',),
'็ฅ':('xian',),
'่ง':('qu',),
'่ฟ':('wu',),
'็':('gou',),
'ๅ':('yuan', 'yun'),
'็ป':('tong',),
'่ฅฆ':('ru',),
'้ซ':('pi',),
'ๅญ':('ba',),
'้ฏ':('wen',),
'ๅฟต':('nian',),
'่ท':('gan',),
'ๆธ':('mo',),
'ๆฉผ':('yuan',),
'้':('yao',),
'ๅ':('chang',),
'ๅฆ':('zhuang',),
'้ฒ':('lu',),
'ๅพ':('huai',),
'ๆผ':('cao',),
'่บ':('chan',),
'ๅ':('lie',),
'้':('cuo',),
'ๆข':('tan',),
'่ฅ':('xi',),
'่ดฉ':('fan',),
'ๅฌ':('qu',),
'ๆท':('hu',),
'่บ':('luo',),
'่ถพ':('zhi',),
'ๅ
':('yun',),
'็':('hu',),
'็ณ':('xu',),
'่':('long',),
'ๆ':('ban',),
'่':('lu',),
'ไฝ':('he',),
'ๆป':('tao',),
'็':('liu',),
'้':('ye',),
'ๅ':('guo',),
'็ญ':('zheng',),
'ๅท':('zhou',),
'ๆฅ':('lai',),
'่ค':('ha', 'ge'),
'ๆตฉ':('hao',),
'ไฟช':('li',),
'้ฅฑ':('bao',),
'่น':('weng',),
'็':('tuan',),
'ๅ':('yu',),
'ๆ':('bie',),
'่':('wei',),
'ๆถ':('juan',),
'ๅ':('long',),
'็':('gan',),
'้พ':('jun', 'gui', 'qiu'),
'ๆ ':('yun',),
'่ฎง':('hong',),
'็ญ':('quan',),
'ๅฎ':('shun',),
'็ขฑ':('jian',),
'ๅจฒ':('wa',),
'็บต':('zong',),
'่ธ':('dou',),
'้':('yu',),
'้
':('ya',),
'ๅฏ':('kou',),
'่':('cang',),
'ๅญ':('zi',),
'็ฟ':('di', 'zhai'),
'็ฌ':('xie',),
'็ฝด':('pi',),
'ๆญผ':('jian',),
'ๅ':('jue',),
'็
':('dan',),
'ๅบ':('chuang',),
'ๆ':('shu',),
'ๆ ':('shu',),
'่พ':('pei',),
'ๅ':('bei',),
'็':('dun',),
'ๅผ':('di',),
'็ฒ':('xi',),
'ๆข':('qiang',),
'ๆขฆ':('meng',),
'่ฐฉ':('man',),
'ๆบช':('xi',),
'็ฏ':('mi',),
'้ฎ':('niu',),
'่บ':('ran',),
'้ญ':('yan',),
'ๅฑ':('ti',),
'่
':('ye',),
'ๆง':('huai',),
'่ต':('geng',),
'ๅ':('xi',),
'็':('nao',),
'ๅ':('qiu',),
'ๆฑฉ':('gu',),
'่ทจ':('kua',),
'ๅ
ซ':('ba',),
'ไปช':('yi',),
'ๅฏ':('pi',),
'็ฎ':('weng',),
'ๆถ':('huang',),
'ไฝฟ':('shi',),
'ๆณพ':('jing',),
'ๅ':('dao',),
'้':('e',),
'ๅ':('ge',),
'็ข':('ding',),
'ๅธ':('shi',),
'่ค':('bao',),
'้':('han',),
'็':('lou',),
'้ช':('wu',),
'็ ':('feng',),
'้ฒ':('xun',),
'่ฃ':('gong',),
'ไพฉ':('kuai',),
'ๅฎ':('pi',),
'็ฎฑ':('xiang',),
'่ธ':('zhan',),
'่ ผ':('qu',),
'ๆ':('li',),
'่ฏ':('yi',),
'ไป':('cang',),
'ๅ':('jue',),
'็ณ':('zao',),
'่ญฆ':('jing',),
'ๅญ':('ping',),
'็ฌ':('biao',),
'้ฏ':('se',),
'็ฉฐ':('rang',),
'็ฑด':('di',),
'็':('li',),
'ๅ':('jiong',),
'็ผ':('ji', 'qi'),
'้บ':('zhu',),
'ๆ':('min',),
'่':('ji',),
'่ฒ':('pi',),
'็':('jiu',),
'็':('jiu',),
'็พ':('xiu',),
'ๆถช':('fu',),
'ๅฌ':('cui',),
'่ถ':('xian',),
'่พพ':('da',),
'ๅ':('shi',),
'็ป':('zhong',),
'่':('meng',),
'ๆณ':('gan',),
'ๅ':('qu',),
'็ฅ':('zhu',),
'ๆก':('jian',),
'่ ':('min',),
'็ฎ':('ai',),
'ๅนณ':('ping',),
'ๆถ':('ye',),
'ๆบ':('wang',),
'ๅ':('yun',),
'้':('shi',),
'ๅผ':('yi',),
'ๆ ':('nao',),
'ไบฉ':('mu',),
'็ญ':('tan',),
'ๆน':('xian',),
'ไธพ':('ju',),
'ๅ':('ai',),
'้
':('qian', 'yan'),
'็ฉ':('mu',),
'้ป':('hong',),
'่':('gua',),
'้น':('yao',),
'่ฑช':('hao',),
'ๅญ':('jian',),
'้ฏ':('chuang',),
'ๅซฑ':('qiang',),
'่ฏป':('du',),
'ๆฅผ':('lou',),
'็
':('lang',),
'้พ':('yin',),
'ๆ':('zuo',),
'ๆฐ':('pie',),
'่ถ':('lie',),
'้':('que',),
'่ก':('chuan',),
'่ฅ':('ying',),
'่ธฉ':('cai',),
'็ซ':('mao',),
'ๆฒช':('hu',),
'ๅฌ':('dong',),
'็ฏ':('ning',),
'ๅฎฐ':('zai',),
'็ผณ':('huan',),
'่บ':('ji',),
'ๅ':('duo',),
'็':('po',),
'็':('zha',),
'ๅ':('xu',),
'ๅ':('duo',),
'็ก':('xiao',),
'่ ':('bo',),
'่ค':('teng',),
'่ฟจ':('dai',),
'ๅซ':('wei',),
'้ญ':('zao',),
'็ฎ':('mu',),
'้ฉฑ':('qu',),
'ๅฝณ':('chi',),
'็ฃฒ':('qu',),
'ๆถ':('za', 'zan'),
'่น':('hong', 'jiang'),
'่กฝ':('ren',),
'่':('sa',),
'ๆข':('zi',),
'้':('wei', 'kui'),
'ๅ':('xie',),
'้ฒ':('jiao',),
'้ช':('shan',),
'ๆ ':('shuo',),
'ๆค':('gai',),
'่ธ':('song',),
'ๆ
':('shen',),
'่ท':('pao',),
'่ฏ':('shi',),
'็':('dao',),
'้':('biao',),
'็ซ':('jing',),
'่ตฆ':('she',),
'้ซ':('qiao', 'tiao', 'yao', 'diao'),
'ๅญ':('dai',),
'ๅทฑ':('ji',),
'็ฉด':('xue',),
'ๆธ':('tong',),
'ๅ':('chui',),
'้ข':('jing', 'geng'),
'ๆ':('fu',),
'่':('beng', 'bang'),
'็':('jie',),
'้':('bi',),
'ๅจ':('juan',),
'่ก':('cai',),
'ๆข':('shu',),
'ไธง':('sang',),
'ๆถฆ':('run',),
'ๆฎช':('yi',),
'้ฎ':('jiao',),
'่บ':('ying',),
'ๅ':('zhao', 'zhou'),
'ๅ
':('sa',),
'็':('di',),
'้น':('xian',),
'ๅฅ':('feng',),
'่':('shou',),
'ๆณ':('le',),
'ๅ':('sou',),
'้ป':('chu',),
'่ ':('li',),
'่ค':('pie',),
'่ฃจ':('pi', 'bi'),
'ๅฏ':('fu',),
'็ฎ':('xie',),
'ๆถ':('shi',),
'่
น':('fu',),
'่ฝฝ':('zai',),
'้':('he',),
'ๆ':('zhai',),
'ๆ':('tao',),
'็':('dao', 'tao'),
'็ผ':('zhen',),
'่ฃ':('zhi',),
'ๆค':('ji',),
'ๅฎ':('luo',),
'็ฒฑ':('liang',),
'่ธ':('lian',),
'ๆบฝ':('ru',),
'้':('tie',),
'็':('chu',),
'่ข':('guo',),
'้ซ':('yan',),
'ๅญ':('yuan',),
'็ฌ':('cai',),
'่ทป':('ji',),
'ไปฝ':('fen',),
'ๆฝผ':('tong',),
'่ฏฟ':('wei',),
'็ ':('hua',),
'้ฆ':('kui',),
'ๆฐ':('min',),
'ไบ':('hu',),
'ๅด':('jue',),
'ไผง':('cang',),
'ๆฒฆ':('lun',),
'็ซ':('fu',),
'้ฎ':('you',),
'ๅฎด':('yan',),
'ๆดป':('huo',),
'้
':('ling',),
'่':('ren',),
'ๆฏ':('bi',),
'ๆกฉ':('zhuang',),
'ๅฏ':('mao',),
'ๅฅณ':('nv',),
'็ปฒ':('gun',),
'้ฉต':('zang',),
'ๅ':('di',),
'้':('li',),
'ๅจ':('luan',),
'่':('jing',),
'ไธ':('gai',),
'้':('chai',),
'ๅถ':('deng',),
'้ข':('man',),
'ๆ ':('ying',),
'ๆจจ':('xi',),
'ๅฎ':('kui',),
'่ธ':('ge',),
'ๅ':('bo',),
'ๆ':('shao',),
'่ฟ':('jin',),
'ๆต':('hu', 'xu'),
'้':('jing',),
'็ป':('jiang',),
'้
':('fen',),
'ๆฃ':('jian',),
'่ฝฆ':('ju', 'che'),
'้ฏ':('tan',),
'ๅฟฑ':('chen',),
'็':('sheng', 'xing'),
'ๅ':('wei',),
'้ช':('pian',),
'ๅฆ':('ren',),
'่':('lou',),
'่บ':('lie',),
'ๅ':('ma',),
'ๅธ':('bo',),
'่ก':('la',),
'็ซ':('tan',),
'ๅฌ':('yao',),
'็จณ':('wen',),
'่บ':('ying',),
'่ฎพ':('she',),
'ๅ
':('chong',),
'ไฝ':('you',),
'้ณ':('gui',),
'ๅฏ':('mo',),
'ๆก':('tiao',),
'่ค':('yin',),
'ไฟฆ':('chou',),
'ๅซ':('yi',),
'้ญ':('lei',),
'้ฅต':('er',),
'ๆ':('fang',),
'ไผ':('fa',),
'ๆฎ':('lian',),
'่ฐ':('ye',),
'้พ':('kan',),
'ๆค':('fen',),
'้ฌ':('tan',),
'ๅฎ':('gua',),
'็บฑ':('sha',),
'ๆน':('mo', 'ma'),
'่ธ':('rong',),
'ๆฒฝ':('gu',),
'่ดผ':('zei',),
'้':('yan',),
'้
':('zhi',),
'่':('shi',),
'ๆ':('za', 'zha'),
'่ฃ':('yu',),
'็ง':('zu',),
'ๆฃ':('chuai',),
'่นฆ':('beng',),
'้ซ':('yun',),
'ๅญ':('ku',),
'ๆตผ':('mei',),
'็':('cui',),
'ๅ':('za',),
'ๅบ':('qing',),
'ๆธ':('mian', 'sheng'),
'่พ':('fu',),
'็':('wan',),
'ๅผ':('chi',),
'ๅค':('gou',),
'่ฅ':('tang',),
'็ฏ':('zhuo',),
'้ฎ':('luan',),
'็ฌณ':('jia',),
'้ฒฒ':('kun',),
'่ถ':('han',),
'่บ':('fei',),
'ๆคฟ':('chun',),
'ๅฉ':('wan',),
'ๆ':('jing',),
'ๆง':('gao',),
'็':('ya',),
'ๅ':('liao',),
'ๆก':('bu',),
'่ ':('pan',),
'่ฏจ':('hun',),
'ๅซ':('dian',),
'็ฎ':('shi',),
'้นฑ':('hu',),
'ๅญณ':('zi',),
'้กต':('ye',),
'่น':('cao',),
'ไฝป':('tiao',),
'ๆบ':('xing',),
'ๅ ':('jue', 'ku'),
'ๅ':('he', 'ge'),
'ๆบ':('tang',),
'้ช':('pian',),
'ๅถ':('lin',),
'้':('qiong',),
'ๅ':('cuo',),
'ไพฅ':('jiao', 'yao'),
'ๆธค':('bo',),
'็ฎญ':('jian',),
'ๅดฎ':('gu',),
'็ต':('lin',),
'้ด':('yin',),
'ไผบ':('si', 'ci'),
'ๅ':('yong',),
'่ฏ':('ci',),
'ๆต':('ji',),
'็':('zhu',),
'ๅ':('wan',),
'็':('meng',),
'้':('zu',),
'ๆณฃ':('qi',),
'ๆง':('ning',),
'้ปฏ':('an',),
'็ฐ':('hui',),
'้ณ':('chong',),
'ๆผ':('e',),
'็ค':('jiao',),
'้ข':('qi',),
'็ผ
':('mian',),
'้':('zhen',),
'ๆ':('zhen',),
'็พ':('ling',),
'ๅ':('du',),
'่คก':('da',),
'ๆช':('qiang',),
'็ณ':('lin',),
'ๆป':('zong',),
'ๅฅ':('lian',),
'็ป':('zu',),
'ๅ':('lin',),
'้ณ':('min',),
'ๅกซ':('tian',),
'้
ฑ':('jiang',),
'ๅณ':('wei',),
'็ฒ':('hun',),
'่ฝ':('sui',),
'ๅผ':('long', 'nong'),
'็':('shuai', 'lv'),
'้':('ni',),
'้':('chang',),
'่ฐ':('huang',),
'ๆ':('lan',),
'่':('shu',),
'ๅฒ':('ao',),
'้':('quan',),
'ๅ':('quan',),
'็':('cai',),
'ๆค ':('qian',),
'่ถฃ':('qu',),
'ไบฅ':('hai',),
'ๆผค':('lan',),
'็ชญ':('ju',),
'็ฑ':('tai',),
'ๆฒน':('you',),
'่ดธ':('mao',),
'ไธบ':('wei',),
'ๆฝ':('chou',),
'่ผ':('tong',),
'้':('xuan',),
'ไฟ':('qiao',),
'ๆ':('pa', 'ba'),
'ๅต':('rong',),
'้น':('ci',),
'ๅ':('bo', 'bu'),
'ไฝค':('wa',),
'้ณ':('hong',),
'ๅต':('lun',),
'่ฏท':('qing',),
'ๆตธ':('jin',),
'่ป':('zhen',),
'็ ':('ma',),
'้ฆ':('yu',),
'้พ':('ling',),
'็':('liu',),
'ๆ':('xin',),
'ๅฌ':('shan',),
'ๅ':('tu',),
'็':('pu',),
'่ ก':('li',),
'ๆฆ':('yue',),
'็ผฏ':('zeng',),
'้ฒฎ':('ling',),
'ๅฐ':('bing',),
'็ณ':('shen',),
'ๅด':('nao',),
'ๆฌท':('xi',),
'่บบ':('tang',),
'็ง':('xiu',),
'ๅน
':('fu',),
'็':('yong',),
'ๆ':('rou',),
'็':('pin',),
'ๅ':('mou',),
'ๆกก':('rao',),
'่ฟค':('yi',),
'ๆฉ':('mo', 'ma'),
'้ฉญ':('yu',),
'็ฒ':('sun',),
'้ต':('ling',),
'่ฑน':('bao',),
'่ฝ':('jiao',),
'ๅฐ':('she',),
'ๅ':('cao',),
'้':('lve',),
'ๆฒ':('qi',),
'่':('mi',),
'้ข':('ke',),
'ๅพ':('xi',),
'้':('zhong',),
'ๆฐค':('yin',),
'ๆจ':('zuo',),
'็ต':('ci',),
'ๆพน':('dan', 'tan'),
'่':('ou',),
'ๅ':('ting',),
'้':('fei',),
'ๆง':('ju',),
'่ฆ':('luo',),
'้ซซ':('tiao',),
'็กฌ':('ying',),
'ๅฑ':('chi',),
'้ณ':('li',),
'ๅต':('tong',),
'่ป':('fu',),
'ๆผ':('nao',),
'่ฟ':('xu',),
'้ช':('tai', 'dai'),
'ๅฆ':('ru',),
'ๅ':('li',),
'็':('mei',),
'้':('wei',),
'ๅ':('han',),
'ๆค':('zhi',),
'ๆ':('tiao',),
'้ผ':('pi',),
'ๅ':('chuang',),
'ๅ':('yin',),
'่ดฅ':('bai',),
'่ฉ':('tiao',),
'็ณ':('chou',),
'ๅด':('hui',),
'ๆ ท':('yang',),
'่ฎถ':('ya',),
'่ถบ':('fu',),
'ๆฟ':('ge',),
'้น':('juan',),
'้':('yu',),
'็':('ying',),
'ๆ':('guai',),
'้ป':('mo',),
'้':('gao',),
'ๆฉ':('ma',),
'้ฅญ':('fan',),
'ๅฑฏ':('tun',),
'็ฟฎ':('he',),
'่ฝน':('li',),
'่
ฝ':('wa',),
'ๅ':('quan', 'juan'),
'ๆฎ':('can',),
'่':('lei',),
'้ฆ':('kui',),
'ๅบ':('miao',),
'็ผ':('yuan',),
'ๆฌ ':('qian',),
'่พฃ':('la',),
'ๆจ':('mu',),
'ๅธฎ':('bang',),
'้ด':('jie', 'kai'),
'้ซ':('ke',),
'ๅฟ':('xin',),
'้ณ
':('qiu',),
'ๅ':('song',),
'้':('zhi',),
'ๆน':('mian',),
'่':('yuan',),
'ๆ':('heng',),
'ๅฝ':('zhi',),
'ๅ
':('dou',),
'่กข':('qu',),
'้ณ':('yin',),
'็ด':('chou',),
'ๆฝธ':('shan',),
'่ฟ':('huo',),
'ไบ':('yu',),
'ๆ':('wo',),
'็ช':('jiao',),
'้':('shi',),
'ๆขข':('shao',),
'่ฐฅ':('shi',),
'่ฉ':('fan',),
'็ฌซ':('zi',),
'็ดฏ':('lei',),
'ๆฟ':('zheng',),
'ๅฑ
':('ju',),
'ๆง':('chui',),
'่ต':('shang',),
'ๆ':('di', 'ti'),
'่':('yu',),
'ๅซ':('piao',),
'้':('fu',),
'ๅ':('hui',),
'ๆฉก':('xiang',),
'่ฏ ':('quan',),
'่ทค':('jiao',),
'ๅตฏ':('cuo',),
'็ปฎ':('qi',),
'ๅณ':('ao',),
'ๆณบ':('luo',),
'ๆพ':('zeng', 'ceng'),
'ๅจ':('lou',),
'็':('huang',),
'้':('gao',),
'ไธ':('ji',),
'่':('yan',),
'ๅฆ':('miao',),
'็ ':('dun',),
'้':('tai',),
'่ง':('jie',),
'ๆจ':('yuan',),
'้ธฌ':('lu',),
'ๅคฎ':('yang',),
'็ฑ':('pao',),
'่ผ':('mi',),
'็ฝ':('ying',),
'้':('you',),
'็ป':('hang',),
'ๅก':('tang',),
'ๅ':('lu',),
'้
':('tai',),
'ๆฃฃ':('di',),
'่ช':('sun',),
'ๅฟญ':('bian',),
'้ซฏ':('ran',),
'็ฐ':('gui',),
'ๅต':('po',),
'ๅพ':('cu',),
'้ช':('jiao',),
'็':('yan',),
'้':('miao',),
'ๅ':('jun',),
'่ดก':('gong',),
'ๆช':('cuo',),
'้ฒ':('gou',),
'่ฎบ':('lun',),
'ๆฟ':('fang',),
'้':('long',),
'ๅ
':('guang',),
'็':('lie',),
'ไฝ':('wei',),
'ๆ':('huan',),
'ๆ':('ba',),
'้ฃ':('piao',),
'ๆญฅ':('bu',),
'่จ':('dong',),
'้ฑ':('yi',),
'่ฝ':('chun',),
'ๅข':('qiang',),
'ๆฌค':('yu',),
'็บญ':('yun',),
'้ผฌ':('you',),
'้ฐ':('meng',),
'ๅฒ':('chao',),
'้ซ
':('lou',),
'ๆก':('zhi',),
'่':('suo',),
'ๆ':('shai',),
'่':('tiao', 'shao'),
'ๅฅ':('zhuang',),
'็':('tong',),
'ๅ':('li',),
'่ฟท':('mi',),
'ๆฅธ':('qiu',),
'่ป':('zao',),
'ๆผ':('zhu',),
'ๅ':('pao',),
'ๆธ':('zi',),
'ๆ':('qiang',),
'ๅผ':('fu',),
'ๅ':('ma',),
'้':('fu', 'yi'),
'่ฐก':('su',),
'ๆบข':('yi',),
'่ฉ':('jiang',),
'็ดซ':('zi',),
'ๆป':('gong',),
'่พ':('shen',),
'ๅฑ':('pi',),
'่ต':('fu',),
'ไน':('zha',),
'่
':('fei',),
'ๆฑก':('wu',),
'ๆฉฅ':('zhu',),
'่ฏค':('zheng',),
'ๆฉ':('en',),
'ๅตซ':('zi',),
'้นญ':('lu',),
'ๅ
ณ':('guan',),
'็ฒ':('mang',),
'ๆณถ':('xue',),
'่ฝ':('sui',),
'็ช':('qie',),
'็':('xuan',),
'้':('ting',),
'ๅ':('yi',),
'ๆข':('gu',),
'ๆ':('zhua',),
'ๅฎ':('zhou',),
'ๅ':('he',),
'ๆธ ':('qu',),
'็ต':('shang',),
'ไผถ':('ling',),
'ๆฝ':('shi',),
'่ผ':('e',),
'ๅ':('wa',),
'ไป':('jie',),
'็
':('xuan',),
'่':('zhang',),
'็ณ':('qiu',),
'็':('cheng', 'sheng'),
'ๅ':('wu',),
'็':('yan',),
'ไน ':('xi',),
'่ตข':('ying',),
'ๅต':('kan', 'qu'),
'็ผ':('zi',),
'ๅถ':('zhang',),
'็ค
':('dun',),
'้ข':('yu',),
'ๅ':('lu',),
'่ฒ':('mao',),
'ๆ':('e',),
'ๅ':('ma',),
'็ฆ':('zhuo',),
'้':('de',),
'ๅ':('jie',),
'่คฅ':('ru',),
'ๅฒฌ':('jia',),
'่พถ':('chuo',),
'ๆฟ':('yi',),
'็ป':('gan',),
'ๅ':('hui',),
'็':('bei',),
'่น':('ta',),
'ๆณ':('bi', 'mi'),
'ๅฟ':('cun',),
'้ซ':('gao',),
'ๅง':('yao',),
'้':('tong',),
'่ฃค':('ku',),
'ๆฉ':('lie',),
'็ฏฎ':('lan',),
'้
ต':('jiao',),
'ๅผ':('kai',),
'ๅค':('chu',),
'้':('lang',),
'ๅ':('xiong',),
'้':('xun',),
'ๅ':('si',),
'็':('tian',),
'ไบก':('wang',),
'ๆผ ':('mo',),
'่ฎฃ':('fu',),
'ๆคค':('luo',),
'ๆจ':('ai',),
'็ฑ':('zhou',),
'้ฐ':('xian',),
'ๅฒ':('yin',),
'ไธถ':('zhu',),
'่ผ':('ai',),
'่':('mou',),
'ๆ':('bing',),
'ๅญ':('cun',),
'็':('mao',),
'ๅ':('chuo',),
'ไฝ ':('ni',),
'่ฑข':('huan',),
'ๆง':('jiu',),
'่ช':('cong',),
'ๅณญ':('qiao',),
'ๅฑ':('cong',),
'ๅต':('jiang',),
'่ทท':('qiao',),
'้พ':('chen',),
'ๅฒ':('qi',),
'้ฆ':('hun',),
'ๅ':('hao',),
'ๆฐ':('qu',),
'ๆ':('pa',),
'็ข':('bei',),
'็ผซ':('sao',),
'้ฒ':('zheng',),
'ๆฟ':('yuan',),
'่พ':('fei', 'fu'),
'้ญ':('ba',),
'้':('zhen',),
'็':('jia',),
'ๆท':('tang',),
'ๆ':('mou',),
'ๆ':('lin',),
'ๅป':('liao',),
'ๆกฅ':('qiao',),
'ๆฉ':('kuo',),
'ๅณ':('ji',),
'้ต':('zun',),
'่ฉน':('zhan',),
'ๆฃบ':('guan',),
'ๆพ':('shi',),
'ๆ':('huai',),
'่
':('xi',),
'ๅ':('ti',),
'้ฆ':('jin',),
'่':('bo',),
'ๆ':('liao',),
'่':('fei',),
'่ด':('zhen',),
'็ฅ':('pie',),
'ๅค':('ying',),
'่ซ':('zhun',),
'่ฏ':('qiu',),
'ๆฎ':('nang',),
'ๆฌฒ':('yu',),
'ไพต':('qin',),
'็ถ':('ran',),
'ๅน':('kui',),
'็บ':('tiao',),
'ๅฎฝ':('kuan',),
'็':('zhan',),
'ๆ':('zhai',),
'่ฏ':('hua',),
'ไป':('qian',),
'ๆญ':('jie',),
'้
ถ':('mei',),
'็น':('bi',),
'ๅธ':('pei',),
'่':('cui',),
'ๆ':('dian',),
'่จ':('hong',),
'ๆฆ':('yu',),
'้':('bu',),
'ๅ':('pu',),
'้':('yan',),
'ๅผ':('yin',),
'ๆ':('qiang',),
'ๅข':('xiang',),
'้ฒจ':('sha',),
'ๅพช':('xun',),
'่ญ':('jiao',),
'ๆฌ':('tai',),
'ๆขฐ':('xie',),
'ๆบด':('xiu',),
'ๅท':('shua',),
'้น':('qiao',),
'่ง':('shi',),
'็':('jing',),
'็ฝ':('han',),
'ๆ':('you',),
'ๆท':('song',),
'็งฆ':('qin',),
'ๅตฉ':('song',),
'ๆซ':('dong',),
'ๆฏ':('jing',),
'่ฃฐ':('duo',),
'ๅถ':('xiong',),
'้ธ':('zhu',),
'ๅบ':('ji',),
'ๅทพ':('jin',),
'ๆ':('qiao',),
'่ฒ':('hao', 'he'),
'้ข':('pin',),
'็ผ':('di',),
'่':('chao',),
'ๆ':('zun',),
'้ธช':('gu',),
'่ฏ':('ao',),
'่ถณ':('zu',),
'ไบต':('xie',),
'ๆ':('zhan',),
'่ต':('zhen',),
'ไฟ':('si', 'qi'),
'้ก':('jun',),
'ๅฃ':('si',),
'ๆญ':('shi',),
'ๆฃฑ':('ling', 'leng'),
'ไฝด':('nai',),
'ๅธ':('xiao',),
'็ซฝ':('yu',),
'่ฌ':('jian',),
'ๆข':('bang',),
'ๅ':('chu',),
'้':('fei',),
'็':('ying',),
'ๅ ':('qian',),
'็ช':('zhi',),
'ๅธ':('pa',),
'ๅข':('mai',),
'่ญ':('bi',),
'ๆฌ':('ju',),
'่ธต':('zhong',),
'็ธ':('da', 'dan'),
'ๆ':('nen',),
'่ฃ':('dang',),
'ๆฑ':('han',),
'็':('du',),
'ๅ':('shuang',),
'้':('duo',),
'ๅง':('jie',),
'ๆณ':('ning',),
'้ฃ':('liao',),
'็ปช':('xu',),
'ๆฏ':('bei',),
'้ธ':('zha',),
'ๅบ':('gu',),
'่
':('hao',),
'่พ':('hui',),
'้':('ti',),
'ๅ':('yong',),
'ๅบ':('ku',),
'่':('wei', 'yu'),
'ๅ ':('shan',),
'้ข':('yan',),
'็ฅ':('jie',),
'่ซ':('yan', 'yuan'),
'่ฏ':('kuai',),
'็บ':('ding',),
'่น':('dao',),
'ๅ':('kan',),
'็ป':('jie',),
'่ฎ':('fu',),
'็งฝ':('hui',),
'ๆฎ':('dai',),
'ไผ':('kang',),
'็':('xia',),
'่ข':('wa',),
'ๆฐ':('fu',),
'ๅข':('zhong',),
'้ค':('qian',),
'็ ง':('zhen',),
'ๅฎฆ':('huan',),
'ๆฌ':('xuan',),
'่ดต':('gui',),
'้น':('pi',),
'ๅฐฟ':('niao', 'sui'),
'่':('bei', 'bi'),
'ๆ
':('gu',),
'่ฏ':('shi', 'zhi'),
'็ญ':('zhu',),
'ๆ':('yi',),
'ๆฟ':('bi',),
'ๅก':('ka', 'qia'),
'ๅฅ':('sha',),
'็ฏฆ':('bi',),
'ๅฝฉ':('cai',),
'ๆซ':('sao',),
'่ฌ':('nu',),
'ๅถ':('xie', 'ye'),
'ๅฟพ':('kai',),
'ๅ':('ya',),
'ๅ':('re', 'nuo'),
'้ช':('qi',),
'้ฒ':('er',),
'่ธ':('ju',),
'ๅ ':('jiang',),
'้ข':('feng',),
'ๅค':('huan',),
'็บฉ':('kuang',),
'่ซ':('mo',),
'่ฏ':('shu',),
'ๆ ฒ':('kao',),
'็ถ':('fu',),
'่':('xun',),
'ๆ':('ling',),
'้
':('zhuo',),
'ๅ':('ye',),
'่':('wa',),
'็ ':('liao',),
'้ฅ':('diu',),
'้ปฉ':('du',),
'็ฝจ':('yan',),
'ๆญ':('can',),
'่ฎ':('ji',),
'่กฒ':('na',),
'ๆณต':('beng',),
'็น':('dun',),
'้นพ':('cuo',),
'่':('shen',),
'ไธ':('san',),
'ๅ':('bei',),
'้':('jian',),
'็':('jiao',),
'ๅจ':('suo',),
'้ผ':('gu',),
'ๅฐ':('ga',),
'ๆฌ':('jing',),
'่ฐต':('zhan',),
'ๆ':('bian', 'pian'),
'่':('shen',),
'ๆ
':('shu',),
'ๅ':('ling',),
'้':('ju',),
'็ฉ':('se',),
'ๅฏ':('mei',),
'้ฃ':('sa',),
'ๆ':('tuo',),
'ๆป':('zhi',),
'ๅก':('yan', 'shan'),
'้ฅง':('xing', 'tang'),
'็ซฆ':('song',),
'่ฌ':('nie',),
'่ฏฐ':('gao',),
'็ญป':('gang',),
'ๅบ':('bu',),
'ๅปพ':('gong',),
'ๆ':('chan',),
'่
':('li',),
'ๆ ':('zhan',),
'ๅ':('za', 'ze', 'zha'),
'้':('an',),
'็':('la',),
'ๅข':('mu',),
'็จ':('ren',),
'่':('yin',),
'ไธ':('dong',),
'็ก':('yang',),
'ๅ ':('fei',),
'ๅค':('pan',),
'่ฏ':('xin',),
'้ฒป':('zi',),
'ๅพฝ':('hui',),
'็คพ':('she',),
'่':('shao',),
'ๆป':('dian',),
'้':('mo',),
'ๅฝ':('gui',),
'่ฟ':('wei',),
'่
ฎ':('sai',),
'ๆทฑ':('shen',),
'่ฝฒ':('ke',),
'ๆฏต':('san',),
'้ถ':('xiang',),
'็น':('xian',),
'ๅนผ':('you',),
'่':('kao',),
'ๆฎ':('shu',),
'่':('zhou',),
'ๆฐ':('fen',),
'่บ':('zuan', 'cuo'),
'ๆจ':('zhang',),
'้ ':('na',),
'็ฃ':('xie',),
'ๅข':('gou',),
'ๅฎช':('xian',),
'่ดฑ':('jian',),
'้น':('zou',),
'็ธ':('zha',),
'ๅฐป':('kao',),
'้ผฝ':('yan', 'qui'),
'ๆ
':('tong',),
'่ท':('tai',),
'่ฏ':('zhen',),
'้':('ye',),
'ๅซ':('pin',),
'ๆ':('yi',),
'ไฝ':('gou',),
'ๅก':('fei',),
'้ฃ':('qian',),
'็ข':('shi',),
'้นง':('zhe',),
'็ฏช':('chi',),
'ๆฏ':('che',),
'้ธ':('dan',),
'็ฑป':('lei',),
'้ปผ':('fu',),
'็ฉฟ':('chuan',),
'ๆ':('pan',),
'ๅ':('die', 'zha'),
'้':('huang',),
'้ฒ':('gui', 'xie'),
'ๅฆ':('ji',),
'็ฌ':('bi',),
'ๅ ':('lao',),
'้ข':('wu',),
'ๆธฒ':('xuan',),
'็ถ':('pa',),
'็ พ':('li',),
'็':('jue',),
'ๅ':('wei',),
'้ญ':('mo',),
'่':('zuo',),
'ๆฝ':('qian',),
'้ก':('zha',),
'ๅฟง':('you',),
'็ฅจ':('piao',),
'ๆณฑ':('yang',),
'่นฒ':('dun',),
'็น':('peng',),
'ๅ
ธ':('dian',),
'็ปฝ':('zhan',),
'ๅฝผ':('bi',),
'้กพ':('gu',),
'ๆ':('zan',),
'ๆฒ':('hang',),
'้':('feng',),
'็':('yan',),
'ๅฐ':('shao',),
'ๆ':('lang',),
'่':('biao',),
'ๆด':('luo',),
'่พ':('gu',),
'็ผง':('lei',),
'้ฆจ':('xin',),
'ๅชช':('ao',),
'่ฐฑ':('pu',),
'ๆฎด':('ou',),
'้ธฝ':('ge',),
'็พผ':('chan',),
'ๆ':('en',),
'ๆก':('an',),
'็':('ban',),
'้':('lang',),
'่':('huang',),
'่ต':('sai',),
'ๆ':('pan',),
'ไน':('jiu',),
'้
ฃ':('han',),
'ๅฅ':('bo', 'bao'),
'ๆซ':('jiao',),
'ไปฒ':('zhong',),
'่ฏด':('shui', 'yue', 'tuo', 'shuo'),
'ๅบ':('shao',),
'ๆ':('yun',),
'ๆ':('ou',),
'้ฆ':('nang',),
'็ ':('yan',),
'่':('jiao', 'jue'),
'ๆถ':('lao',),
'้ข':('gu',),
'็ฎฉ':('luo',),
'่ฏ':('ken',),
'ๆดฒ':('zhou',),
'้ท':('po',),
'็ถ':('kuang',),
'้บป':('ma',),
'็ฌพ':('bian',),
'ไฝ':('dan',),
'่ฉ':('li',),
'ๅ':('hua',),
'ๆ':('tuo',),
'่ท':('ju',),
'ๆฑ':('si',),
'ๆญ':('xu',),
'่ฎ':('zhou',),
'ไนฐ':('mai',),
'้ถ':('ba',),
'่ฐ':('sui',),
'้':('xi',),
'้':('tou',),
'็':('ding',),
'ๅผ':('shi',),
'ๅค':('xi',),
'่':('pi', 'bi'),
'็ฐง':('huang',),
'้ชจ':('gu',),
'ๅฆช':('yu',),
'ๆฌ':('qiao',),
'็ธ':('ma',),
'ๅธป':('ze',),
'็ฒผ':('lin',),
'ๆญ':('qian',),
'่ง':('ji',),
'ๅ':('pai',),
'็ฝ':('wang',),
'่':('zhu',),
'ๅก':('po',),
'็ข':('li',),
'ๅ
ฅ':('ru',),
'็ฟฆ':('jian',),
'ๅญฉ':('hai',),
'่ฌ':('peng',),
'ๆฏ':('xi',),
'่ฃด':('pei',),
'็ท':('ai',),
'ๅบ':('chu',),
'่':('yi',),
'ๆ':('zui',),
'่ช':('yu',),
'ๅ':('xing',),
'้':('tu',),
'ๅฎ':('mi',),
'่':('nie',),
'้ธฆ':('ya',),
'่ซ':('shi',),
'ๆฎ':('lu',),
'่ฎณ':('hui',),
'ๆฐฒ':('yun',),
'ๅฒฝ':('dong',),
'ๆท':('qi',),
'้':('juan',),
'้ฝ':('qi',),
'็ฏ':('lou',),
'้ฅ':('yong',),
'ๆต':('bang',),
'ๅฃ':('kou',),
'็
ค':('mei',),
'ๅทง':('qiao',),
'่ฎ':('gen',),
'ไฝฐ':('bai',),
'ๆฃต':('ke',),
'้ถ':('tao',),
'ๅธ':('xie',),
'้ฉพ':('jia',),
'่':('fan',),
'ๆ':('liao',),
'ๆบ':('xu',),
'ๅ':('ming',),
'ๅธ':('tang',),
'้ฌ':('bin',),
'็ฒ':('li',),
'ๅ ':('duo',),
'่':('xin', 'shen'),
'ๆค':('du',),
'้ ':('ju',),
'็ดง':('jin',),
'ๅบฆ':('du', 'duo'),
'่ธฑ':('duo',),
'ๆฆด':('liu',),
'ๅท':('yo',),
'ๆ':('chao',),
'ๅ':('ai',),
'ๅฟ':('tan',),
'็ก':('shuo',),
'ๅง':('wei',),
'ๆ':('hu',),
'่ฅ':('jin',),
'็ข':('piao',),
'ๅฅ':('jian',),
'็ปฆ':('tao',),
'่ฟฐ':('shu',),
'่ฆ':('yao',),
'ๆ ':('zhi',),
'่พ
':('fu',),
'ๅบ':('xu',),
'้':('xu',),
'ๅ ':('hou',),
'ๅจ':('dun',),
'่ขซ':('bei',),
'้ช':('yin',),
'่บฏ':('qu',),
'ๆดฎ':('tao',),
'ๆฒ':('suo',),
'้ชท':('ku',),
'้ป':('lin',),
'็ผบ':('que',),
'็พ':('zai',),
'่ก':('xue', 'xie'),
'่น':('ti',),
'ๆ':('mu',),
'็ฃ':('cuo',),
'็ป':('jing',),
'้':('xia',),
'ๅ':('zu',),
'้':('gai',),
'่':('ba',),
'้ฉ':('shuan',),
'้ฅถ':('rao',),
'่ฐ':('tiao', 'diao'),
'ๆฎ':('cu',),
'ๆจ':('chu',),
'่ฒ':('mo',),
'ๆ':('da',),
'่':('wu',),
'ๆ':('xing',),
'้ข ':('dian',),
'ๅฎข':('ke',),
'้จ':('wu',),
'ๆชฌ':('meng',),
'่ฑ':('tuo',),
'ๆด':('pu', 'bao'),
'ๅฟ':('hei',),
'่ฏ':('gu',),
'ไป':('ze',),
'่':('yi',),
'ๆ':('mi',),
'่':('mao',),
'ๅซ':('xian',),
'ๅ':('nang',),
'ๆกซ':('suo',),
'่ฐ':('yi',),
'่ด':('qi',),
'็ฉท':('qiong',),
'ๅพ':('sou',),
'่ข':('yuan',),
'่บ
':('zhu',),
'่':('kui',),
'้':('yi',),
'็':('wu',),
'่ ':('huo',),
'ๆฒ':('sha',),
'็ขก':('zhou',),
'็บฅ':('he', 'ge'),
'้ฆ':('li',),
'็ฉ':('qu',),
'้ช':('ban',),
'ๆฐฎ':('dan',),
'่':('xun',),
'็ง':('qiu',),
'ๅฑ':('shi',),
'้
':('gan',),
'็ฅ ':('ci',),
'้ปฅ':('qing',),
'้ฉ':('sha',),
'่ฒ':('shai', 'se'),
'้กถ':('ding',),
'่':('ba',),
'้':('qin',),
'็':('kao',),
'ไบ':('ya',),
'่':('you',),
'ๆ':('qi',),
'ไผฏ':('bai', 'bo'),
'่ฑ':('mo',),
'ๅฌท':('ma',),
'ๆก':('heng',),
'ไฟ':('e',),
'่':('dan',),
'่':('gu',),
'ๅฏ':('fu',),
'้ณ':('ta',),
'ๅ':('pu', 'bu'),
'ๆป':('gun',),
'ๆ':('sou',),
'็ฆ':('fan',),
'ๆฅซ':('ji',),
'่ฏฌ':('wu',),
'็ญท':('kuai',),
'ๅฃถ':('hu',),
'ๅปบ':('jian',),
'็ฟ':('rui',),
'่ถ
':('chao',),
'้ฆ':('mo',),
'็ ':('qi',),
'ๅ':('ca', 'cha'),
'ไธ':('qiu',),
'ๆ':('dui',),
'่':('qi',),
'็ฎก':('guan',),
'ๅจ ':('shen',),
'ๅฐค':('you',),
'็ฉ':('zhu',),
'้ช':('huo',),
'ๅฝ':('lie',),
'้ฟ':('tian', 'dian'),
'็ซ':('li',),
'่ฟ':('zhe',),
'ๆน':('xiang',),
'ๅง':('seng',),
'้ฉ':('han',),
'ๆงญ':('qi',),
'่ฝฎ':('lun',),
'ๅนธ':('xing',),
'ๆฆ':('gai',),
'ๅค':('fu',),
'็พ':('mei',),
'ๆ ':('li',),
'ๆ':('jia',),
'่':('rong',),
'้ช ':('biao', 'piao'),
'้ฒค':('li',),
'ๅฆ':('sha', 'xia'),
'่ฑ':('zhu',),
'้ผน':('yan',),
'็บธ':('zhi',),
'ๅป':('ke',),
'็ผ':('lian',),
'่ง':('guan',),
'ๆต
':('qian',),
'ๆ':('sha', 'shan'),
'ๅ':('meng',),
'ๅ':('bi',),
'่ก':('jie',),
'ๆฏ':('bi',),
'ไฝ':('yu',),
'ๆ':('zuo', 'zha'),
'้นฃ':('jian',),
'ๅฉ':('gan',),
'็ช':('huan',),
'ไฟฎ':('xiu',),
'็กท':('jian',),
'ไพ':('kan',),
'่ฒ
':('xiu',),
'ๆด':('hui',),
'ๆ':('yue',),
'ๅฎ':('song',),
'้ข':('ying',),
'ๅ':('gai',),
'้':('nv',),
'ไผ':('you',),
'่ฐ':('yan',),
'ๆ':('kui',),
'็ฒฅ':('zhou',),
'ๅดค':('yao',),
'ๅจ':('zai',),
'่ฎซ':('qi',),
'่ณ':('tang',),
'ๅบน':('tuo',),
'็พ':('yin',),
'่ต':('zi',),
'่':('guo',),
'้':('bin',),
'ๆฝ':('pan',),
'่':('zhi',),
'้ซก':('kun',),
'็ญ ':('yun', 'jun'),
'้ฉ':('yu',),
'็จ':('ni',),
'ๆฑ':('gong',),
'้ฉถ':('shi',),
'็ฝ':('feng',),
'ๅ
ผ':('jian',),
'ๅ ':('tu',),
'ๅ':('xing',),
'้':('ni',),
'็':('chao',),
'ๅ':('lv',),
'่พ':('lu',),
'ๆ':('luan',),
'่':('mo',),
'็ผฃ':('jian',),
'ๅช':('mi',),
'่ฑ':('cang',),
'ๆฐ':('bai',),
'ๅคท':('yi',),
'้ธน':('gua',),
'็พธ':('lei',),
'ๅป':('yi',),
'ๅฟ':('hu',),
'่ฃ':('lie',),
'่':('weng',),
'้ป':('li',),
'็':('xia',),
'ๅ':('chen',),
'ไน':('yi',),
'่':('rao',),
'ๅฑฅ':('lv',),
'ๅฉ':('e',),
'ๆตฏ':('wu',),
'่ฐ':('gu',),
'ๆณ':('yao',),
'็ฅท':('dao',),
'็ฟ':('ji',),
'ๅพ':('tu',),
'่ถ':('chen',),
'็':('suo',),
'ๆ':('zhi',),
'่':('cuo',),
'้ผข':('fen',),
'้ฆ':('jin',),
'ๅจ':('ju',),
'ๅฆน':('mei',),
'้ป':('zuan',),
'็ฌบ':('jian',),
'้น':('an',),
'ๅฅ':('kui',),
'ไป':('zhang',),
'ๆฑ':('han',),
'็ฝ':('xi',),
'ๅผ':('pi',),
'้
พ':('shi',),
'่ ':('luo',),
'ๆ':('pou',),
'ๅ':('zuo',),
'็':('man',),
'ๅ':('bi',),
'ๆธ':('shen',),
'ๆ':('jian',),
'่':('ya',),
'ๆ':('ji',),
'้ฒ ':('geng',),
'็ ฃ':('tuo',),
'้ชค':('zhou',),
'ๆฐ':('zhuan',),
'่ต':('yin',),
'ๅธท':('wei',),
'ๅป':('wen',),
'ๅฟ':('gui',),
'ๆต':('liu',),
'่ฟ':('yu',),
'่':('ju',),
'ๅ':('kuang',),
'ๆท':('nao',),
'่':('ge',),
'่':('chong',),
'ๅญฅ':('nu',),
'้ง':('nie',),
'็ฆ':('wu',),
'่ฐ':('xi',),
'ๆณ':('ken',),
'่ด':('ju',),
'ๅฏบ':('si',),
'็ฟ':('gu',),
'่':('rou',),
'ๆ':('yu',),
'็ผ':('si',),
'ๅฎ':('hong',),
'่ฐ':('xuan',),
'ๆ':('ming',),
'้ธข':('yuan',),
'็ชฅ':('kui',),
'ไบญ':('ting',),
'่ฎฏ':('xun',),
'่ณ':('shan',),
'่ต':('zi',),
'่
':('jing',),
'้':('hao', 'gao'),
'ไฟ':('su',),
'่ฃ':('qun',),
'ๆ':('du',),
'ๅงฃ':('jiao',),
'็
จ':('wei',),
'ไฝฌ':('lao',),
'่กฎ':('gun',),
'ๅฅธ':('jian',),
'้ฉบ':('zou',),
'ๅผ':('che',),
'่':('zhe',),
'ๆ':('ba',),
'้ฌ':('jiu',),
'็ข':('sui',),
'ๅ':('xiang',),
'ๆ':('xie',),
'ๅฆ':('yi',),
'ๅช':('nu',),
'่ฑ':('xuan',),
'ๆฐ':('xin',),
'่ต':('duo',),
'ๆด':('guai', 'guo'),
'้ฝ':('yan', 'dian'),
'ๅฟ':('ni',),
'ๆฑ':('zhi',),
'่':('e',),
'ๅฟ':('ji',),
'้ฃ':('feng',),
'้':('luo',),
'ๅ':('shu',),
'ๆณ':('mao',),
'่':('hui',),
'ๅฑก':('lv',),
'้ฅฃ':('si', 'shi'),
'็ปข':('juan',),
'็ฆ':('wa',),
'่พ':('quan',),
'่':('li',),
'ๆ':('ge',),
'ๅบ':('gui',),
'็ฐ':('su',),
'้':('da',),
'็':('yu',),
'ๅ':('yi',),
'่ค':('chu', 'zhu'),
'็พก':('xian',),
'ๅ ค':('di',),
'็ฉ':('wan',),
'ๅจ':('pao', 'bao'),
'่บซ':('shen',),
'่ณ':('fang',),
'้ฒท':('diao',),
'็ผถ':('fou',),
'ๅฎน':('rong',),
'็คบ':('shi',),
'ๅฝ':('lou',),
'็พ':('hua',),
'่น':('die',),
'่ก':('nv',),
'็ป':('fu',),
'้':('xian',),
'่ฏ':('hui',),
'ๆฉ':('ju',),
'ๅซฃ':('yan',),
'ๅง':('chi',),
'ๆฑ':('zhu',),
'่ฒ':('qi',),
'ๆต':('meng',),
'้ฅบ':('jiao',),
'ๅผ':('hu',),
'ไผ':('qi',),
'ๆถ':('tu',),
'็':('chuang',),
'่':('suan',),
'็จฃ':('su',),
'้ขค':('zhan', 'chan'),
'ๅฆ':('ken',),
'ไธซ':('ya',),
'่ดญ':('gou',),
'่ฑ':('pi',),
'ๆด':('cui',),
'ๅป':('xi',),
'ไป':('shi', 'shen'),
'่':('ma',),
'ๆ':('zhuo',),
'่':('sao',),
'็':('jian',),
'ๆ':('an',),
'ๅฝก':('shan',),
'ๅฅฅ':('ao',),
'ๅฉ':('jie',),
'ๆนซ':('qiu',),
'่ฐ':('zhe',),
'ๆณ':('ban',),
'่ด':('dong',),
'็ป':('deng',),
'้ผ':('lai',),
'็ฟ':('can',),
'่บ':('zao',),
'่ข
':('niao',),
'ๆ':('qie',),
'ๅพ':('lv',),
'้ฒ':('bao',),
'้':('yong',),
'่':('wu',),
'็บก':('yu',),
'ๅผ ':('zhang',),
'็ขฅ':('bian',),
'ๅคค':('yin',),
'่พซ':('bian',),
'่ณ':('shi',),
'็ บ':('li',),
'ๅฝ':('yan', 'ye'),
'่ฅ':('xiang',),
'ๅ':('m',),
'่':('lan',),
'็ซน':('zhu',),
'้กบ':('shun',),
'ๅผ':('ti',),
'ไธ':('ding',),
'ๆฒ':('yi',),
'้ผ':('yuan',),
'ๆด':('xi', 'xian'),
'ๆ':('wang',),
'่':('bi',),
'ๆ':('gan',),
'้พ ':('yue',),
'ๅฒข':('ke',),
'ไผซ':('zhu',),
'่ฐญ':('tan',),
'ๆฐ':('ping',),
'่ต':('ding',),
'็ฎธ':('zhu',),
'ๅป':('qi',),
'็ผ':('teng',),
'ๅฟ':('er',),
'ๆก
':('wei',),
'่':('qu',),
'ๆ':('fei',),
'็ฑ':('ji',),
'้':('er', 'keng'),
'ๆฃ':('peng',),
'ๅนก':('fan',),
'ๅกฅ':('ge',),
'้ง':('lan',),
'ๅฉ':('sheng',),
'่ทฌ':('kui',),
'่ด':('zhi',),
'็ตท':('zhi',),
'ๅปถ':('yan',),
'้ผ':('ta',),
'ๅพ':('gou',),
'้
':('yang',),
'ๆ':('xu', 'qu'),
'่พ':('yuan',),
'ๆธ':('yu',),
'็':('ge',),
'็ฎ':('qian',),
'ๅจ':('nuo', 'na'),
'ๆฅ':('li',),
'ๆถฉ':('se',),
'้ฒณ':('chang',),
'ๅพต':('cheng', 'zheng', 'zhi'),
'่ท':('zhi',),
'ๆบ':('ji',),
'ๆดพ':('pai',),
'้':('fu',),
'ๆ':('wan',),
'ๆณ':('hong',),
'่ฅ':('huang',),
'ๆค':('wu',),
'ๆฑจ':('mi',),
'็ญ':('ban',),
'ๅนด':('nian',),
'ๆฏฝ':('jian',),
'่':('die',),
'็พ':('yang',),
'่ฐ':('wei',),
'ๆฎ':('yun',),
'ๅ':('mi',),
'้ช':('ao',),
'่ ':('chang',),
'่ค':('zao',),
'ไพช':('chai',),
'็ฌ':('quan',),
'็ขฐ':('peng',),
'่น':('lian',),
'ๆธ':('lu',),
'ไธฟ':('pie',),
'ๆ':('han',),
'่ฏ':('yi',),
'ไป':('zi',),
'ๅญ':('xiao',),
'่ฃ':('dong',),
'็ซ':('huo',),
'็ฏ':('qu',),
'็ฑณ':('mi',),
'ๆนฟ':('shi',),
'ๅ':('nan',),
'้
':('yu',),
'็':('mian',),
'ๅฎ':('yu',),
'่':('shao',),
'ๆค':('ju',),
'ๆผ':('yan',),
'้':('ti',),
'้':('kan',),
'็ช':('wo',),
'่ข':('shan',),
'ๆฒฉ':('wei',),
'่ธช':('zong',),
'ๅญ':('shao',),
'้ฏ':('jian',),
'็ฎ':('cong',),
'ๅบต':('an',),
'่ท':('huo',),
'็':('xian',),
'ๅ':('ji',),
'็ฏ':('pian',),
'ๆ':('mao',),
'ๆ':('bai', 'bo'),
'ๅ':('shou',),
'้':('nao',),
'่ก':('hu',),
'็ญ':('xia',),
'็ปต':('mian',),
'่บ':('hui',),
'้':('zeng',),
'ๅฐ':('wei', 'yu'),
'ไธ':('pi',),
'็':('yi',),
'็ผ':('gao',),
'ๆฌง':('ou',),
'ๅฏ':('yi',),
'้ธต':('tuo',),
'ๅ':('jing',),
'ๆ':('cai',),
'ๆก':('sang',),
'ๅ':('guo',),
'ๆข':('long',),
'ๅฎ':('xiao',),
'ๆท':('jie',),
'่ฏผ':('zhuo',),
'้พ':('yu',),
'ไบ':('qi',),
'ๆฐ':('qi',),
'ๅ':('xu',),
'้':('mao',),
'ๆฅ':('chi',),
'่ฌฆ':('qing',),
'ไธจ':('shu',),
'่ดช':('tan',),
'้ฏ':('ba',),
'่ท':('qian',),
'ๆผพ':('yang',),
'้น':('ti',),
'ๅต':('sheng',),
'่':('zhi',),
'ๆป':('zi',),
'่ก':('han',),
'่ฟฉ':('er',),
'ๅฌ':('zong',),
'็ฏฑ':('li',),
'้ฅฒ':('si',),
'่บ':('ji',),
'ๆงฝ':('cao',),
'่ฝพ':('zhi',),
'้':('kun',),
'่':('dan',),
'้':('ai',),
'้ข':('yan',),
'็ฐ':('dian',),
'ๅซ':('bie',),
'็ฌ':('ju',),
'ๅฏ':('qi',),
'็
':('chang',),
'ๅ':('hong',),
'่ง':('jiao', 'jue'),
'้':('yi',),
'ไฝฉ':('pei',),
'่ธ':('xiao', 'shao'),
'่ฃผ':('ti', 'xi'),
'ไฟพ':('bi',),
'ๅ':('la',),
'้
':('jin',),
'็':('zhen',),
'ๅฆ':('fu',),
'็ผ':('miao',),
'็':('xi',),
'ๅค':('ye',),
'ๆก':('lun',),
'่ข':('lao',),
'่ฐช':('zhe',),
'ๅฒต':('hu',),
'ๆบ':('xie',),
'่ถฟ':('ta',),
'้':('fei',),
'ๅฉ':('po',),
'ๅฑ':('jie',),
'่
':('yan', 'a'),
'ๆ':('zu',),
'่ต':('pei',),
'็ฅ':('hu',),
'่ก':('yi',),
'่ฅ':('ying',),
'็ญ':('re',),
'ๅ
ฌ':('gong',),
'ๅญฐ':('shu',),
'ๅตด':('ji',),
'ๆน':('cao',),
'่บ':('qi',),
'ๅ':('diao',),
'็ช':('bian',),
'้':('qiao', 'shao'),
'็ฌ':('di',),
'ๅ':('dong',),
'ๅบ':('pang',),
'่ ':('you',),
'่ค':('xie',),
'่พจ':('bian',),
'้ญ':('huan',),
'็ฌ':('ao',),
'ๅฏ':('wei',),
'ๆธ':('ju', 'gou'),
'ๅ':('a',),
'้ณ':('bian',),
'ๆ':('huang',),
'่':('li',),
'่ฃ':('pou',),
'ๆฑ':('shan',),
'้
':('xu',),
'ๅ':('qin',),
'็ป':('jiao',),
'่ฃ':('you',),
'ไนฉ':('ji',),
'ๆณช':('lei',),
'ๆ
ท':('kang',),
'ๆญป':('si',),
'ๅบ':('bi',),
'่':('rong',),
'ๆ':('chang',),
'่ถ':('zi',),
'ๆฐ':('di',),
'้':('qing',),
'็':('xi',),
'ๅ':('tang',),
'็พ':('di',),
'ๅธ':('zhi',),
'ๆก':('wo',),
'่ข':('meng',),
'ๆฅ':('jie',),
'่ดฆ':('zhang',),
'้ซ':('fang',),
'ๅญ':('ya',),
'็ผฒ':('qiao',),
'ๅฎต':('xiao',),
'ๆถ':('shou',),
'ๆบ':('ting',),
'็':('ting',),
'ๅ':('ban',),
'็ป':('zhi',),
'ๅ':('wei',),
'่ฅ':('xi',),
'ๆกจ':('jiang',),
'็ญ':('duan',),
'ๅฌ':('shi',),
'้
ฎ':('tong',),
'ๅฉด':('ying',),
'ๅ':('yan',),
'้':('tao',),
'ๅ
':('bao',),
'่':('qi',),
'ๆ':('zeng',),
'่ ':('meng',),
'็':('yi',),
'ๅ':('hou',),
'ๅฆ':('niu',),
'่ค':('kou',),
'ๅซ':('han',),
'้ญ':('ai',),
'ๆธ':('ji',),
'ๅ':('ai',),
'็ฅ':('zhi',),
'ๆต':('hun',),
'็':('ya',),
'ๅฝ':('yi',),
'ๆข':('di',),
'ๆทฆ':('gan',),
'ๆฏช':('mu',),
'็ฏ':('gu',),
'ๆท':('gui',),
'่ธ':('xiong',),
'ๅพ':('xun',),
'้ข':('jie', 'xie'),
'่':('pi',),
'็':('zhi',),
'ๆฅ':('bao',),
'่ฐฆ':('qian',),
'ๅฒฑ':('dai',),
'ๅชต':('ying',),
'่ฎฟ':('fang',),
'ๆ พ':('luan',),
'้':('du',),
'็':('ran',),
'ๆ':('xuan',),
'ๅฉ':('biao',),
'่':('ke',),
'ๆ':('miao',),
'่ต':('ci',),
'ไน':('ping',),
'ๅท':('chuan',),
'้ป':('you',),
'ๅฏ':('cha',),
'่ฅ':('ruo',),
'้ฎ':('zhe',),
'่ถ':('ting',),
'ๆน':('re',),
'่บ':('yi',),
'ไฝผ':('jiao',),
'่กพ':('qin',),
'ๅ':('xu', 'yu'),
'็':('zao',),
'่':('long',),
'่ค':('bao',),
'ๅ':('ga', 'ka'),
'ๅข':('zeng',),
'ๆฃ':('leng',),
'็ฌ':('shun',),
'็ฎด':('zhen',),
'่ธฝ':('ju',),
'็':('mao',),
'้ณ':('fu',),
'่':('tai',),
'่ฝง':('zha', 'ya'),
'้ฌ':('tao',),
'็ฏ':('xun',),
'ๅฎ':('tong',),
'ๆท':('pa',),
'็':('yu',),
'้
':('pei',),
'ๆ':('zhan',),
'่พ':('ji',),
'ๆธ':('jian',),
'้':('lin',),
'ๅ':('liu',),
'ๅฐ':('ga',),
'่คช':('tun', 'tui'),
'้ฏ':('han',),
'็ฎ':('xian',),
'ๆฌพ':('kuan',),
'้':('tuo',),
'็':('niu',),
'้':('chuan',),
'็ณ':('hou',),
'่ก':('xian',),
'ๅ':('hua',),
'ๅป':('chan',),
'ๆค':('xu',),
'่ฏฉ':('xu',),
'็ฟฑ':('ao',),
'่ถ':('die',),
'่
บ':('xian',),
'ๅฌ':('xi',),
'็ฆ':('xi',),
'ๆ':('ji',),
'ไผ':('xiu',),
'้ฒ':('xian',),
'็ ':('zuo', 'zha'),
'ๅฎ':('shi',),
'่ค':('fu',),
'ๆฐง':('yang',),
'ไพฆ':('zhen',),
'้ญ':('ding',),
'็บฐ':('pi',),
'็ขด':('cha',),
'่น':('ru',),
'ไธป':('zhu',),
'่ดฝ':('zhi',),
'ๆฒผ':('zhao',),
'็':('zheng',),
'็
':('duan',),
'็ญ':('deng',),
'้น':('mei',),
'ๅต':('lou',),
'ๆข':('ji',),
'็ซ':('dian',),
'้ฌ':('luo', 'ge'),
'็ฏ':('deng',),
'ๅฎ':('ding',),
'ๅทฒ':('yi',),
'็ฌ':('ji',),
'้':('he',),
'้':('tong',),
'็ฒ':('li',),
'ๆชฉ':('lin',),
'้ซ':('ju',),
'ๅญ':('ji',),
'้ฏ':('xi',),
'่ท':('ru',),
'ๆถ':('chang',),
'ๆจพ':('yue',),
'้ญ':('xiao',),
'ๆฏ':('yu',),
'้ณ':('shan',),
'ๆ ':('gang',),
'่ฅ':('xu',),
'ๅฌ':('se',),
'็ปฑ':('shang',),
'ๅฝฐ':('zhang',),
'ๅฅด':('nu',),
'ๆณฝ':('ze',),
'ๅ':('qi',),
'ๅจ':('ping',),
'่':('zang',),
'ไธ':('chou',),
'็ผ':('ru',),
'ๆฃ':('zheng',),
'ๆดง':('wei',),
'ไบฆ':('yi',),
'่ฎจ':('tao',),
'็ฌ':('li',),
'้ธฑ':('chi',),
'็พฐ':('tang',),
'่น':('chuan',),
'ๆธ':('dan',),
'้':('bo',),
'ๅ':('qi',),
'่':('zhu',),
'ไฟ':('li',),
'ๆก':('jiu',),
'้':('tang',),
'็':('ge', 'gai'),
'็ซ':('jing',),
'่ฃ':('rong',),
'ๆข':('zhuo',),
'่ตง':('nan',),
'ๆปฆ':('luan',),
'็ซ':('jie',),
'ๆท':('fu',),
'่ธ':('yan',),
'้ฆ':('nei',),
'็ ':('dang',),
'้พ
':('bao',),
'ไบ':('kui',),
'่':('bin',),
'ๆ':('xi',),
'ๅฐ':('chen',),
'้':('ke', 'kua', 'guo'),
'ไธค':('liang',),
'ๆฉ':('zhan',),
'ๅฑ':('hu',),
'้ณ':('qian',),
'ๆพ':('fang',),
'้น':('hu', 'gu'),
'ๅ
':('zhao',),
'ๆฃ':('qi',),
'ๅ':('fang',),
'ๆป':('fu',),
'่':('qian',),
'ๅ':('xin',),
'ๆก ':('ya',),
'่ฟฅ':('jiong',),
'่ฉ':('fan',),
'ๆทน':('yan',),
'่ฝบ':('yao',),
'ๆฝ':('lan',),
'่พ':('lei',),
'ๅ':('ai',),
'่ธ':('ta',),
'ๆถ':('xian',),
'ๆ':('pai',),
'็จ':('bai',),
'ๅฆ':('yao',),
'็':('ying',),
'็':('wen',),
'็ชฌ':('yu',),
'้ฑ':('po',),
'็ด':('cun',),
'่ดน':('fei',),
'ๆฒธ':('fei',),
'ๆผ':('ya',),
'็ฅ':('qi',),
'็ฝ
':('xia',),
'ๅ':('ha',),
'่ท':('tuo',),
'ๆ
':('she',),
'้':('liu',),
'้':('zhi',),
'็':('pi',),
'ไฝฅ':('qian',),
'ๆฆ':('nuo',),
'็กซ':('liu',),
'ๅทฎ':('chai', 'ci', 'cha'),
'ๅฒ':('shi',),
'ๆป':('ce',),
'ไฟบ':('an',),
'้ข':('ban',),
'็ผ':('jian',),
'้':('ding',),
'ไพ':('zhu',),
'่':('can',),
'ๆ':('shuo',),
'ๆบฅ':('pu',),
'ไผค':('shang',),
'่ฆ':('ou',),
'ๆฉ':('liao',),
'่ฎท':('ne',),
'่ป':('shan',),
'ๆธบ':('miao',),
'้ฝ':('zha',),
'่
':('fu',),
'้ณ':('yong',),
'ๅ':('lin',),
'ๅ':('yu',),
'ๆฅ ':('nan',),
'็ซญ':('jie',),
'ๅญฌ':('nao',),
'ๅ
ฐ':('lan',),
'ๆฝ':('zhuai',),
'ๅธ
':('shuai',),
'ๅ':('ji',),
'่':('man', 'wan'),
'ๆ':('shu',),
'ๅบ':('geng',),
'็':('zhen',),
'่จ':('hong',),
'ๅคซ':('fu',),
'็ฎฌ':('ruo',),
'ๅผฏ':('wan',),
'ๅณ':('li',),
'้ต':('zhen',),
'ๆถธ':('he',),
'ๆผ':('guan',),
'้ซ':('qia',),
'ๅฟ':('xin',),
'่':('ran',),
'ๆ':('yun',),
'ๅฑ':('e',),
'็ป':('xuan',),
'็':('die',),
'่ตฃ':('gan',),
'่ง':('ying',),
'ๆฆ':('dian',),
'่ทธ':('bi',),
'ๆฟ':('ban',),
'็จ':('xi',),
'ๆ ':('lu',),
'ๆ':('cheng',),
'ๆ':('sao',),
'ๅธ':('lian',),
'ๅ':('ti',),
'้':('xia',),
'ๆฎก':('bin',),
'็ผฎ':('shan',),
'้ณ':('hui',),
'ๅต':('miao',),
'่ขท':('jia', 'qia'),
'่ฟ':('hao',),
'็ป':('lian',),
'ๅฅ':('huan',),
'้':('chen',),
'ๅ':('a',),
'่ก':('xing', 'hang'),
'ไฝ':('di',),
'ๅ':('yo',),
'่ฏก':('gui',),
'ไปฃ':('dai',),
'ๆฑค':('tang',),
'ๆจ':('chen',),
'็งญ':('zi',),
'ๅกฌ':('yuan',),
'้
ฒ':('cheng',),
'่พ':('rong',),
'ๅ':('yu',),
'่ฐ':('jian',),
'ๆพ':('peng',),
'็ ':('che',),
'ๆธฃ':('zha',),
'ๆง':('xing',),
'่จ':('fu',),
'็ฐ':('qiu',),
'้ต':('qiang',),
'ๆผ':('dao',),
'ๅณ':('yi',),
'่ฟ':('ying',),
'ๆ':('chi',),
'ๅฟฎ':('zhi',),
'้ด':('tang',),
'ๆกท':('jue',),
'่ผ':('pian',),
'้ช':('xiao',),
'้ฒ
':('ba',),
'็':('dan',),
'่ข':('pao',),
'่':('qi',),
'ๅผ':('hong',),
'็':('qiang',),
'่ ข':('chun',),
'ๆฒฅ':('li',),
'ๅฑ':('li',),
'ๆพ':('xian',),
'ๆฏ':('wu',),
'ๅ':('gao',),
'ไน':('hu',),
'่':('jian',),
'ๆ':('ran',),
'ๅง':('shan',),
'็ฝ':('fu',),
'ๅ':('pan',),
'้':('hao',),
'็
':('yu',),
'ๅ':('jie',),
'ไฟฃ':('yu',),
'่ฉ':('qiong',),
'ๆจ':('yang',),
'็ฑ':('yu',),
'ๅฐ':('yin',),
'ๆปน':('hu',),
'่ฑบ':('chai',),
'่พ':('xia',),
'ๅจ':('wei',),
'ๅ':('jia',),
'่':('ting',),
'ๆ':('yi',),
'็':('meng',),
'ๅฒ':('lan',),
'็':('sheng',),
'่ฎ ':('yan',),
'ๆง':('kui',),
'่จ':('man',),
'็ฐ':('xian',),
'ๅณ':('zhen',),
'็ด':('ke',),
'่ธน':('chuai',),
'่ฝ':('dan',),
'้':('ta', 'she', 'tuo', 'shi', 'yi'),
'่ป':('wei',),
'ๆ':('pu',),
'็ซ':('shu',),
'้':('xuan',),
'่
ง':('shu',),
'ๆฆ':('lan',),
'้ด':('jian',),
'ๆฅท':('kai',),
'่ฟธ':('beng',),
'ๆป':('nian',),
'่ผ':('jiu',),
'ๅข':('jing',),
'้':('zui',),
'่พ':('chuo',),
'ๆธ':('lu',),
'ๆ':('chu',),
'้':('yin',),
'ๆถก':('wo', 'guo'),
'ๆฉ':('qi',),
'้ฒซ':('ji',),
'ๅพญ':('yao',),
'ๅฑ':('li',),
'้ณ':('pi',),
'ๆฌบ':('qi',),
'ๅ':('shang',),
'ๅ':('ban',),
'็':('dian',),
'ๅ':('mu',),
'ๆฑ ':('chi',),
'่ฏฅ':('gai',),
'ๆจ':('hen',),
'้ฅฎ':('yin',),
'็ฑ':('fei',),
'่ตถ':('gan',),
'ๆฝ':('cheng',),
'่
พ':('teng',),
'็':('jiang',),
'่ฐ':('mou',),
'ไผ':('wu',),
'้ข':('ti',),
'็':('chen',),
'ๅฎ':('ding',),
'้':('zhang',),
'ๆง':('mei',),
'็บฌ':('wei',),
'้ฑ':('zi',),
'็ด':('an',),
'็ญ
':('xian',),
'้':('ao',),
'ๆฝ':('wei',),
'่ง':('yu',),
'ๆ':('jiu',),
'่':('mang',),
'ๆ':('bu',),
'้น':('e',),
'็ฏ':('fei',),
'ๅ':('ba',),
'่กฃ':('yi',),
'ๆฆ':('dan',),
'ๆช':('jiu',),
'้ฐ':('jiao',),
'่ฃธ':('luo',),
'ๆฟ':('cheng',),
'้ฒ':('lu',),
'ๅฆ':('fei',),
'้ช
':('hua',),
'็ฌ':('ji',),
'ๅ':('yuan', 'huan'),
'้':('cheng',),
'ๆฒก':('mo', 'mei'),
'่ธข':('ti',),
'่ฆ':('ying',),
'่ช':('hang',),
'ๅบญ':('ting',),
'ๅฑ':('zan',),
'็ง':('tu',),
'ๅน':('mi',),
'้ญ':('po',),
'ๅ':('dai',),
'ๆท':('lin',),
'ๅ':('xue', 'xiao'),
'ๆฏ':('mei',),
'่':('li',),
'ๅฟ':('zhi',),
'้ฃ':('biao',),
'ๅ':('sou',),
'ๆต ':('xi',),
'ๆญค':('ci',),
'่ฉ':('ka',),
'ๆ
จ':('kai',),
'็ปญ':('xu',),
'ๅฝฌ':('bin',),
'้ฒ':('chui',),
'ๅด':('que',),
'ๆฃน':('zhao',),
'ๅฐ':('feng',),
'็บ':('zuan',),
'ๅจ
':('ya',),
'้':('zhen',),
'็':('du',),
'ไธ':('bu',),
'่':('nong',),
'็ผ':('min',),
'ๅฒ':('qu',),
'้ฆ':('guo',),
'ๅช':('mei',),
'ไบข':('kang',),
'่ฎค':('ren',),
'่จ':('peng',),
'้ธญ':('ya',),
'็ฐ':('zhu',),
'่ น':('du',),
'็ฑ':('lai',),
'ๅฏ':('ji',),
'้':('mao',),
'ๆน':('tuan',),
'่ฃ':('cheng',),
'็ณ':('tang',),
'้':('yong',),
'ๆปข':('ying',),
'ๆฆ':('ca',),
'ๅฃฎ':('zhuang',),
'้ฐ':('run',),
'็
ณ':('hu',),
'้ฆ
':('xian',),
'ไบ':('shi',),
'ๆ':('zheng',),
'ๅจ':('niang',),
'็':('shan',),
'ๆพก':('zao',),
'่ดข':('cai',),
'ๆฉ':('yan',),
'็ฌฎ':('ze',),
'็ฒ':('jia',),
'ๅต':('kuang',),
'ๆผถ':('huan',),
'่ฟ':('zhong',),
'ๆป':('zi',),
'่ฑ':('wan',),
'่':('nve',),
'ๆ':('ta', 'tuo'),
'่':('lian',),
'ๅ':('si',),
'็':('yu',),
'่งฅ':('gong',),
'ๆกค':('qi',),
'่ฉ':('pu',),
'้ฒ':('cha',),
'่ฝถ':('yi',),
'ๆฏน':('shu',),
'ๅผ':('bian',),
'็':('xiong',),
'้ช':('zhi',),
'็':('chi',),
'้':('ju',),
'่ขค':('mao',),
'่จ':('ji',),
'ๅณ':('ku',),
'่ฝ':('bi',),
'ๆผ':('han',),
'้':('shi',),
'ๆต':('hui',),
'่ฏ':('qu',),
'ๆ':('cun',),
'ๆ
':('mu',),
'ๅญ':('sun',),
'้':('bi',),
'็':('luo',),
'ๅฏฎ':('liao',),
'ๅฒ':('dia',),
'้ด':('chen',),
'่ผ':('liao',),
'ๆฟ':('yong',),
'็ผ':('zhui',),
'ๅฎ':('ta',),
'้ข
':('lu',),
'ๅ':('la',),
'ไพ':('li',),
'ๆด':('lie',),
'ๆ':('qu',),
'ไผ ':('zhuan', 'chuan'),
'่ฐข':('xie',),
'่ช':('pang',),
'ๅฒญ':('ling',),
'ๆพ':('li',),
'ๅฑ':('ceng',),
'็':('zheng',),
'่ต':('du',),
'่
':('qiang',),
'็ญ':('kou',),
'้':('lv',),
'็':('xu', 'chu'),
'่ฃฅ':('jian',),
'ๅตฌ':('wei',),
'้ฉฎ':('duo', 'tuo'),
'็ฑ':('xu',),
'ๅ
ด':('xing',),
'่นถ':('jue',),
'่พ':('yi', 'ai'),
'ๅธ':('bi',),
'้ฌ':('zong',),
'้':('tie', 'e'),
'็':('jie',),
'็':('chui',),
'ๆช':('qin',),
'ๆ':('sa',),
'ๅบ':('pao',),
'ๅข':('liang',),
'ๅ':('ban',),
'ๆฌฃ':('xin',),
'่จ':('liang', 'lang'),
'ๅคฏ':('hang',),
'้ฑ':('jing',),
'็ฐ':('kan',),
'้ต':('kui',),
'่ฝ':('la', 'lao', 'luo'),
'ๆผ':('wu', 'yu'),
'็น':('po', 'fan'),
'็ก
':('gui',),
'้':('ji',),
'ๅ':('you',),
'้':('jiao',),
'ๆ':('shu',),
'้':('dian',),
'็':('ning',),
'ไนก':('xiang',),
'ๆณข':('bo',),
'ๆฆ':('xi',),
'็ฅฏ':('zhen',),
'ๅฒ':('zhe',),
'ๆตท':('hai',),
'ไปถ':('jian',),
'่ฏธ':('zhu',),
'่ผ':('sheng',),
'่ฟ':('reng',),
'่
':('yi',),
'่':('di',),
}
PinYinDict.update({
'ใ':('-',),
'๏ผ':('-',),
'๏ผ':('',),
'ใ':('-',),
'ใ':('',),
'ใ':('',),
})
PinYinDict = dict([( ord(k.decode('utf-8')), v[0].decode('utf-8')) for k, v in PinYinDict.items()])
def hanzi2pinyin(hanzi):
""" hanzi should be unicode string"""
pinyin = ''
for char in hanzi:
char_ord = ord(char)
if char_ord in PinYinDict:
pinyin += PinYinDict[char_ord]
else:
pinyin += char
return pinyin
| {
"repo_name": "hexuotzo/khufu",
"path": "pinyin.py",
"copies": "1",
"size": "119018",
"license": "bsd-2-clause",
"hash": -8219895344353602000,
"line_mean": 14.4946378728,
"line_max": 99,
"alpha_frac": 0.2781780947,
"autogenerated": false,
"ratio": 1.6788488475741754,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.24570269422741753,
"avg_score": null,
"num_lines": null
} |
# 0000
# 0000 @NOP No Operation
# 0001
# CCCC AAAA AAAA @JCN Jump Condition
# 0010
# RRR0 DDDD DDDD @FIM Fetch Immediate
# RRR1 @SRC Send Register Control
# 0011
# RRR0 @FIN Fetch Indirect
# RRR1 @JIN Jump Indirect
# 0100
# AAAA AAAA AAAA @JUN Jump Unconditional
# 0101
# AAAA AAAA AAAA @JMS Jump to Subroutine
# 0110
# RRRR @INC Increment
# 0111
# RRRR AAAA AAAA @ISZ Increment and Skip
# 1000
# RRRR ADD Add
# 1001
# RRRR SUB Subtract
# 1010
# RRRR @LD Load
# 1011
# RRRR @XCH Exchange
# 1100
# DDDD @BBL Branch Back and Load
# 1101
# DDDD LDM Load Immediate
# 1110
# 0000 @WRM Write Main Memory
# 0001 @WMP Write RAM Port
# 0010 @WRR Write ROM Port
# 0100 @WR0 Write Status Char 0
# 0101 @WR1 Write Status Char 1
# 0110 @WR2 Write Status Char 2
# 0111 @WR3 Write Status Char 3
# 1000 SBM Subtract Main Memory
# 1001 RDM Read Main Memory
# 1010 @RDR Read ROM Port
# 1011 ADM Add Main Memory
# 1100 RD0 Read Status Char 0
# 1101 RD1 Read Status Char 1
# 1110 RD2 Read Status Char 2
# 1111 RD3 Read Status Char 3
# 1111
# 0000 @CLB Clear Both
# 0001 @CLC Clear Carry
# 0010 @IAC Increment Accumulator
# 0011 CMC Complement Carry
# 0100 CMA Complement
# 0101 @RAL Rotate Left
# 0110 RAR Rotate Right
# 0111 TCC Transfer Carry and Clear
# 1000 DAC Decrement Accumulator
# 1001 TCS Transfer Carry Subtract
# 1010 @STC Set Carry
# 1011 DAA Decimal Adjust Accumulator
# 1100 KBP Keyboard Process
# 1101 @DCL Designate Command Line
ROM = [None] * 256
ROM[0x000:0x03D] = [
0xE2, # 0 WRR
0xCF, # 1 BBL, 15
0x2A, 0x41, # 2 FIM, 5, 4, 1
0x50, 0xDE, # 4 JMS (LD MK)
0x50, 0xE5, # 6 JMS (CK IDX)
0x30, # 8 FIN 0
0xFE, # 9 254
0x50, 0xEE, # 10 JMS (CK FIN)
0x50, 0xE5, # 12 JMS (CK IDX)
0x50, 0xEE, # 14 JMS (CK FIN)
0x50, 0xE5, # 16 JMS (CK IDX)
0x2A, 0x42, # 18 FIM 5, 4, 2
0x5F, 0xFF, # 20 JMS 15, 255
0x57, 0x1A, # 22 JMS 7, 26
0x48, 0x24, # 24 JUN 8, 36
0x5F, 0xFF, # 26 JMS 15, 255
0x53, 0x20, # 28 JMS 3, 32
0x4C, 0x18, # 30 JUN 12, 24
0x5F, 0xFF, # 32 JMS 15, 255
0x4F, 0xFF, # 34 JUN 15, 255
0x22, 0xCB, # 36 FIM 1, 12, 11
0xF0, # 38 CLB
0x2B, # 39 SRC 5
0xE1, # 40 WMP
0x21, # 41 SRC 0
0xE0, # 42 WRM
0xF2, # 43 IAC
0x71, 0x29, # 44 ISZ 1, 41
0xE4, # 46 WR0
0xF2, # 47 IAC
0xE5, # 48 WR1
0xF2, # 49 IAC
0xE6, # 50 WR2
0xF2, # 51 IAC
0xE7, # 52 WR3
0x60, # 53 INC 0
0x72, 0x29, # 54 ISZ 2, 41
0xFA, # 56 STC
0x50, 0xF7, # 57 JMS (CK CDL)
0x73, 0x39, # 59 ISZ 3, 57
0x25, # 61 SRC 2
0xFA, # 62 STC
0xF5, # 63 RAL
0xE1, # 64 WMP
0x1A, 0x47, # 65 JCN C=0, 71
0x1C, 0x4F, # 67 JCN A!=0, 79
0x19, 0x50, # 69 JCN T=1, 80
0x12, 0x50, # 71 JCN C=1, 80
0x14, 0x52, # 73 JCN A=0, 82
0x11, 0x43, # 75 JCN T=0, 67
0x40, 0x45, # 77 JUN 0, 69
0xF0, # 79 CLB
0x40, 0x3F, # 80 JUN 0, 63
]
ROM[0x0DE:] = [
# LD MK
0x2B, # 222 SRC 5
0xAB, # 223 LD 11
0xF1, # 224 CLC
0xE1, # 225 WMP
0xF5, # 226 RAL
0xBB, # 227 XCH 11
0xC0, # 228 BBL, 0
# CK IDX
0x21, # 229 SRC 0
0x23, # 230 SRC 1
0x25, # 231 SRC 2
0x27, # 232 SRC 3
0x29, # 233 SRC 4
0x2B, # 234 SRC 5
0x2D, # 235 SRC 6
0x2F, # 236 SRC 7
0xC0, # 237 BBL, 0
# CK FIN
0x32, # 238 FIN 1
0x34, # 239 FIN 2
0x36, # 240 FIN 3
0x38, # 241 FIN 4
0x3A, # 242 FIN 5
0x3C, # 243 FIN 6
0x3E, # 244 FIN 7
0x30, # 245 FIN 0
0xC0, # 246 BBL, 0
# CK CDL
0xA4, # 247 LD 4
0xF5, # 248 RAL
0xFD, # 249 DCL
0xB4, # 250 XCH 4
0xEA, # 251 RDR
0xC0, # 252 BBL, 0
0x00, # 253
0xFF, # 254
0x00, # 255
]
class I4004:
def __init__(self):
self.accumulator = 0x0
self.registers = [0x0] * 16
self.pc_stack = [0x000]
self.rom_port = 0x0
self.ram_port = [0x0] * 4
self.ram_address = 0x00
self.carry = 0
self.rom = ROM
self.ram = [0x0] * 1024
self.ram_status = [0x0] * 64
self.ram_bank = 0
self.test = 0
def next(self):
return self.rom[self.pc_stack[0]]
def increment_pc(self):
self.pc_stack[0] += 1
self.pc_stack[0] &= 0xFF # @@@ just for exerciser
def run(self):
while True:
print self.pc_stack
op = self.next()
if op is None:
print "registers =", self.registers
print "accumulator =", self.accumulator
print "unknown op", op
break
if op >> 4 == 0x0:
if op % 0x10 == 0:
self.NOP()
else:
self.unimplemented_op()
elif op >> 4 == 0x1:
self.JCN(op % 0x10)
elif op >> 4 == 0x2:
if op % 2 == 0:
self.FIM(op % 0x10)
else:
self.SRC((op % 0x10) - 1)
elif op >> 4 == 0x3:
if op % 2 == 0:
self.FIN(op % 0x10)
else:
self.JIN((op % 0x10) - 1)
elif op >> 4 == 0x4:
self.JUN(op % 0x10)
elif op >> 4 == 0x5:
self.JMS(op % 0x10)
elif op >> 4 == 0x6:
self.INC(op % 0x10)
elif op >> 4 == 0x7:
self.ISZ(op % 0x10)
elif op >> 4 == 0xA:
self.LD(op % 0x10)
elif op >> 4 == 0xB:
self.XCH(op % 0x10)
elif op >> 4 == 0xC:
self.BBL(op % 0x10)
elif op == 0xE0:
self.WRM()
elif op == 0xE1:
self.WMP()
elif op == 0xE2:
self.WRR()
elif op == 0xE4: # |
self.WRx(0) # |
elif op == 0xE5: # |
self.WRx(1) # combine
elif op == 0xE6: # |
self.WRx(2) # |
elif op == 0xE7: # |
self.WRx(3) # |
elif op == 0xEA:
self.RDR()
elif op == 0xF0:
self.CLB()
elif op == 0xF1:
self.CLC()
elif op == 0xF2:
self.IAC()
elif op == 0xF5:
self.RAL()
elif op == 0xFA:
self.STC()
elif op == 0xFD:
self.DCL()
elif op == 0xFE:
self.unimplemented_op()
else:
print "registers =", self.registers
print "accumulator =", self.accumulator
print "unknown op %02X" % op
break
def unimplemented_op(self):
self.increment_pc()
def BBL(self, data):
self.increment_pc()
print "BBL", data
if len(self.pc_stack) > 1:
self.pc_stack = self.pc_stack[1:]
self.accumulator = data
def CLB(self):
self.increment_pc()
print "CLB"
self.carry = 0
self.accumulator = 0
def CLC(self):
self.increment_pc()
print "CLC"
self.carry = 0
def DCL(self):
self.increment_pc()
self.ram_bank = {
0: 0, 1: 1, 2: 2, 4: 3,
3: 4, 5: 5, 6: 6, 7: 7,
}[self.accumulator & 0x7]
def FIM(self, pair):
self.increment_pc()
self.registers[pair: pair + 2] = divmod(self.next(), 0x10)
print "FIM", pair >> 1, self.registers[pair: pair + 2]
self.increment_pc()
def FIN(self, pair):
self.increment_pc()
address = (self.pc_stack[0] & 0xF00) + (self.registers[0] << 4) + self.registers[1]
data = self.rom[address]
self.registers[pair: pair + 2] = divmod(data, 0x10)
print "FIN", pair >> 1, self.registers[pair: pair + 2]
def IAC(self):
self.increment_pc()
self.carry, self.accumulator = divmod(self.accumulator + 1, 0x10)
print "IAC", self.accumulator
def INC(self, register):
self.increment_pc()
self.registers[register] = (self.registers[register] + 1) & 0xF
print "INC", register, self.registers[register]
def ISZ(self, register):
self.increment_pc()
address = self.next()
self.increment_pc()
self.registers[register] = (self.registers[register] + 1) & 0xF
print "ISZ", register, self.registers[register], address
if self.registers[register] != 0:
self.pc_stack[0] = address
def JCN(self, condition):
self.increment_pc()
address = self.next()
self.increment_pc()
c1 = ((condition & 0x8) == 0x8)
c2 = ((condition & 0x4) == 0x3)
c3 = ((condition & 0x2) == 0x2)
c4 = ((condition & 0x1) == 0x1)
c = (c2 and self.accumulator == 0) or (c3 and self.carry == 1) or (c4 and self.test == 1)
if c ^ c1:
self.pc_stack[0] = address
print "JCN", condition, c, c1, c2, c3, c4
# def JIN(self, pair):
# self.increment_pc()
# address = (self.pc_stack[0] & 0xF00) + (self.registers[pair] << 4) + self.registers[pair + 1]
# self.pc_stack[0] = address
# print "JIN", pair >> 1, address
def JMS(self, a3):
self.increment_pc()
address = (a3 << 8) + self.next()
address &= 0xFF # @@@ just for exerciser
print "JMS", address
self.increment_pc()
self.pc_stack.insert(0, address)
def JUN(self, a3):
self.increment_pc()
address = (a3 << 8) + self.next()
address &= 0xFF # @@@ just for exerciser
print "JUN", address
self.pc_stack[0] = address
def LD(self, register):
self.increment_pc()
self.accumulator = self.registers[register]
print "LD", self.accumulator
def NOP(self):
self.increment_pc()
def RAL(self):
self.increment_pc()
old_acc = self.accumulator
self.carry, self.accumulator = divmod((self.accumulator << 1) + self.carry, 0x10)
print "RAL", old_acc, self.accumulator, self.carry
def RDR(self):
self.increment_pc()
# @@@ no selection of ROM chip considered, nor I/O distinction
print "RDR", self.rom_port
self.accumulator = self.rom_port
def SRC(self, pair):
self.increment_pc()
self.ram_address = (self.registers[pair] << 4) + self.registers[pair + 1]
print "SRC", pair >> 1, self.ram_address
def STC(self):
self.increment_pc()
print "STC"
self.carry = 1
def WMP(self):
self.increment_pc()
print "WMP", self.ram_address >> 6, self.accumulator
self.ram_port[self.ram_address >> 6] = self.accumulator
def WRM(self):
self.increment_pc()
print "WRM", self.ram_address, self.accumulator
self.ram[self.ram_address] = self.accumulator
def WRR(self):
self.increment_pc()
print "WRR", self.accumulator
# @@@ no selection of ROM chip considered, nor I/O distinction
self.rom_port = self.accumulator
def WRx(self, status):
self.increment_pc()
print "WR", self.ram_address >> 4, status
self.ram_status[((self.ram_address >> 4) << 2) + status] = self.accumulator
def XCH(self, register):
self.increment_pc()
acc_buffer = self.accumulator
self.accumulator = self.registers[register]
self.registers[register] = acc_buffer
print "XCH", register, self.accumulator, acc_buffer
i4004 = I4004()
i4004.run()
| {
"repo_name": "jtauber/pycpu",
"path": "i4004.py",
"copies": "1",
"size": "12895",
"license": "mit",
"hash": -5353150625355330000,
"line_mean": 29.7023809524,
"line_max": 103,
"alpha_frac": 0.4665374176,
"autogenerated": false,
"ratio": 3.0717008099094807,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4038238227509481,
"avg_score": null,
"num_lines": null
} |
''' 0015. 3Sum - LeetCode
https://leetcode.com/problems/3sum/'''
class Solution:
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums = sorted(nums)
ret = []
for i in range(len(nums)-2):
if nums[i] > 0:
break
if i > 0 and nums[i] == nums[i-1]:
continue
l = i + 1
r = len(nums) - 1
while l < r:
if nums[i] + nums[l] + nums[r] == 0:
ret.append([nums[i], nums[l], nums[r]])
l += 1
r -= 1
while l < r and nums[l] == nums[l-1]:
l += 1
while l < r and nums[r] == nums[r+1]:
r -= 1
elif nums[i] + nums[l] + nums[r] < 0:
l += 1
else:
r -= 1
return ret
s = Solution()
print(s.threeSum([-1, 0, 1, 2, -1, -4]))
| {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/0015-3sum.py",
"copies": "1",
"size": "1030",
"license": "mit",
"hash": 3218575320785151000,
"line_mean": 28.4285714286,
"line_max": 59,
"alpha_frac": 0.3485436893,
"autogenerated": false,
"ratio": 3.6267605633802815,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4475304252680281,
"avg_score": null,
"num_lines": null
} |
@@ -0,0 +1,59 @@
import Simulation
import Frameworks.PlayerStatsPrebuilts as Stats
def GetSpread():
balanced = Simulation.Player(1)
focused_stam_high = Simulation.Player(1)
focused_stam_low = Simulation.Player(1)
focused_stam_mid = Simulation.Player(1)
specialist_stam_high = Simulation.Player(1)
specialist_stam_low = Simulation.Player(1)
balanced.SetStats(Stats.balanced)
focused_stam_high.SetStats(Stats.turtle)
focused_stam_low.SetStats(Stats.passer)
focused_stam_mid.SetStats(Stats.sprinter)
specialist_stam_high.SetStats(Stats.blitzer)
specialist_stam_low.SetStats(Stats.striker)
spread = {}
spread["balanced"] = balanced
spread["focused_stam_high"] = focused_stam_high
spread["focused_stam_low"] = focused_stam_low
spread["focused_stam_mid"] = focused_stam_mid
spread["specialist_stam_high"] = specialist_stam_high
spread["specialist_stam_low"] = specialist_stam_low
return spread
hit_results = {}
harmony_results = {}
spread = GetSpread()
for key in spread.keys():
hit_results[key] = []
harmony_results[key] = []
for t in range(10000):
hits = 0.0
spread = GetSpread()
while(spread):
hits += 1.0
done = []
for p in spread.keys():
harmony = 1 * (1.0 + 0.1*spread[p].run)*(1.0 + 0.1*spread[p].throw)*(1.0 + 0.1*spread[p].pick)
harmony_results[p].append(harmony)
spread[p].TakeDamage()
if spread[p].run == 0 or spread[p].throw == 0 or spread[p].pick == 0:
hit_results[p].append(hits)
done.append(p)
for d in done:
del spread[d]
average_hits = {}
average_harmony = {}
for key in hit_results.keys():
average_hits[key] = sum(hit_results[key]) / len(hit_results[key])
average_harmony[key] = sum(harmony_results[key]) / len(harmony_results[key])
print(t)
print("hits: \n\t", average_hits)
print("harmony: \n\t", average_harmony)
| {
"repo_name": "jedislight/Shockball",
"path": "Test/AverageHitsSimulator.py",
"copies": "1",
"size": "2028",
"license": "mit",
"hash": 4129844176294368000,
"line_mean": 33.3728813559,
"line_max": 106,
"alpha_frac": 0.6188362919,
"autogenerated": false,
"ratio": 2.90961262553802,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.402844891743802,
"avg_score": null,
"num_lines": null
} |
@@ -0,0 +1,80 @@
# https://punkapi.com/
# http://docs.python-requests.org/en/master/
import time, re, requests, os, errno, json, sqlite3
def getDatos(url,i):
directorio = 'archivitos-de-cache'
ids=[]
print ('Buscando datos...')
ids.append(i)
urls = url + str(i)
request = requests.get(urls)
id = request.json()[0]['id']
name = request.json()[0]['name']
description = request.json()[0]['description']
image_url = request.json()[0]['image_url']
abv= request.json()[0]['abv']
target_fg = request.json()[0]['target_fg']
tagline = request.json()[0]['tagline']
first_brewed = request.json()[0]['first_brewed']
ibu = request.json()[0]['ibu']
ph = request.json()[0]['ph']
qery = setDatos(id,name,description,image_url,abv,target_fg,tagline,first_brewed,ibu,ph)
return(qery)
def setDatos(id,name,description,image_url,abv,target_fg,tagline,first_brewed,ibu,ph):
#ids = [1, 68, 28, 50, 200]
#parametros = '|'.join([str(id) for id in ids])
conn = sqlite3.connect(':memory:')
conn = sqlite3.connect("refrigerador.db")
cursor = conn.cursor()
if not os.path.isfile("refrigerador.db"):
conn = sqlite3.connect("refrigerador.db")
else:
pass
cursor.execute('''
CREATE TABLE IF NOT EXISTS chelas (
id INTEGER PRIMARY KEY,
name TEXT,
description TEXT,
image_url TEXT,
abv TEXT,
target_fg TEXT,
tagline TEXT,
first_brewed TEXT,
ibu TEXT,
ph TEXT
)'''
)
conn.commit()
cursor.execute(''' INSERT INTO chelas VALUES (?,?,?,?,?,?,?,?,?,?)''', (id,name,description,image_url,abv,target_fg,tagline,first_brewed,ibu,ph))
conn.commit()
queryto = cursor.execute('''
SELECT * FROM chelas
ORDER BY name''')
qery= show(queryto)
conn.close()
return(qery)
def show(queryto):
for row in queryto:
return('\nID: {} \nNombre: {} \nDescripcion {} \nImagen: {} \nAbv: {} \nTarget: {} \nTag: {} \nFirst Brewed: {} \nIbu: {} \nPH: {} '\
.format(row[0],row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9]))
if __name__ == '__main__':
url = 'https://api.punkapi.com/v2/beers/'
i=0
for i in range(1,235):
#Rango de 235 para que se repita 234 veces, que es el nรบmero de cervezas que hay.
qery = getDatos(url,i)
if qery:
print(" {}.- Datos almacenados.".format(i))
else:
print("Fallo, la tabla estรก vacรญa.")
#print(qery)
#Imprimir qery trae los datos del select en el orden que se vayan almacenando.
i+=1
print("Listo.")
| {
"repo_name": "AnhellO/DAS_Sistemas",
"path": "Ago-Dic-2018/Andres Lopez/Practica 4/Practica 2 Parcial 2.py",
"copies": "1",
"size": "2430",
"license": "mit",
"hash": -2752291328423617000,
"line_mean": 31.7972972973,
"line_max": 146,
"alpha_frac": 0.6361763494,
"autogenerated": false,
"ratio": 2.56553911205074,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37017154614507397,
"avg_score": null,
"num_lines": null
} |
__author__ = 'Libao Jin'
__date__ = 'December 18, 2015'
import time
class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
# start_time = time.time()
# n = len(nums)
# i = 0
# while i < n - 1:
# j = i + 1
# if nums[i] > target:
# continue
# while j < n:
# if nums[i] + nums[j] > target:
# break
# if nums[i] + nums[j] == target:
# end_time = time.time()
# periods = end_time - start_time
# print(periods)
# return [i + 1, j + 1]
# else:
# j += 1
# i += 1
start_time = time.time()
n = len(nums)
i = 0
while i < n - 1:
t = target - nums[i]
if t in nums:
j = nums.index(t)
if j <= i:
j = i + 1
if nums[i] > target:
continue
while j < n:
if nums[i] + nums[j] == target:
end_time = time.time()
periods = end_time - start_time
print(periods)
return [i + 1, j + 1]
else:
j += 1
i += 1
continue
else:
end_time = time.time()
periods = end_time - start_time
print(periods)
return [i + 1, j + 1]
i += 1
if __name__ == '__main__':
s = Solution()
nums = [1, 2, 3, 4, 5]
print(s.twoSum(nums, 3))
print(s.twoSum(nums, 4))
print(s.twoSum(nums, 5))
print(s.twoSum(nums, 6))
print(s.twoSum(nums, 7))
print(s.twoSum(nums, 8))
print(s.twoSum(nums, 9))
nums = [-1, -2, -3, -4, -5]
print(s.twoSum(nums, -8))
| {
"repo_name": "jinlibao/LeetCode-Solutions",
"path": "solutions/001_Two_Sum.py",
"copies": "2",
"size": "2151",
"license": "mit",
"hash": 2515048178935872000,
"line_mean": 28.0675675676,
"line_max": 59,
"alpha_frac": 0.3454207345,
"autogenerated": false,
"ratio": 3.76707530647986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.02026010126714032,
"num_lines": 74
} |
# 002. Add Two Numbers - LeetCode
# https://leetcode.com/problems/add-two-numbers/description/
from helper.linked_list import ListNode, LinkedList, traversal
# Non Empty, reversed
# class Solution(object):
# def addTwoNumbers(self,l1,l2):
# def linkedlist_to_integer(l):
# ret = 0
# count = 0
# head = l
# while head:
# ret += head.val * ( 10 ** count )
# count += 1
# head = head.next
# return ret
# msum = linkedlist_to_integer(l1) + linkedlist_to_integer(l2)
# ret = ListNode( msum % 10 )
# msum //= 10
# head = ret
# while msum > 0:
# head.next = ListNode( msum % 10 )
# msum //= 10
# head = head.next
# return ret
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
a = l1
b = l2
c = 0
while True:
res = a.val + b.val + c
c = 0
if res > 9:
c = 1
res -= 10
a.val = res
if a.next is None:
a.next = b.next
break
elif b.next is None:
break
a = a.next
b = b.next
while c > 0:
if a.next is None:
a.next = ListNode(1)
c = 0
elif a.next.val < 9:
a.next.val += 1
c = 0
else:
a.next.val = 0
a = a.next
return l1
ans = [
([1,2,3],[3,2,1],[4,4,4]),
([1,2,3],[1],[2,2,3]),
([1],[1,2,3],[2,2,3]),
([2,4,3],[5,6,4],[7,0,8]), #WA1
([5],[5],[0,1]), # WA2
([5],[5,1],[0,2]),
([1],[9,9],[0,0,1]), # WA3
([0],[0],[0]),
]
s = Solution()
for i in ans:
ret = s.addTwoNumbers(LinkedList(i[0]).head,LinkedList(i[1]).head)
print( "O" if traversal(ret) == i[2] else "X", traversal(ret), i[2] ) | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/002_add-two-numbers.py",
"copies": "1",
"size": "2124",
"license": "mit",
"hash": -6848560599147613000,
"line_mean": 24.9146341463,
"line_max": 73,
"alpha_frac": 0.4058380414,
"autogenerated": false,
"ratio": 3.189189189189189,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4095027230589189,
"avg_score": null,
"num_lines": null
} |
# 002 - Add Two Numbers (Medium)
# https://leetcode.com/problems/add-two-numbers/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
carry = 0
# A node to store the answer and a node to keep appending nodes to.
res_node, cur_node = None, None
# Loop while either linked list has still something to consume.
while l1 is not None or l2 is not None:
# If a linked list has been exhausted just assign value 0.
val_1 = l1.val if l1 is not None else 0
val_2 = l2.val if l2 is not None else 0
val_res = (val_1 + val_2) + carry
# The values have been consumed so advance the linked lists.
l1 = l1.next if l1 is not None else None
l2 = l2.next if l2 is not None else None
carry = 1 if val_res > 9 else 0
val_res %= 10
if res_node is None:
# Initialization of the first node.
res_node = ListNode(val_res)
cur_node = res_node
else:
# Keep advancing nodes in the result linked list.
new_node = ListNode(val_res)
cur_node.next = new_node
cur_node = new_node
# If there's a carry at the end, add a new node with that carry.
if carry == 1:
cur_node.next = ListNode(1)
# The original node reference was never advanced, this is from start.
return res_node
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/02_Medium/lc_002.py",
"copies": "1",
"size": "1752",
"license": "mit",
"hash": 8937769810065111000,
"line_mean": 33.3529411765,
"line_max": 77,
"alpha_frac": 0.5496575342,
"autogenerated": false,
"ratio": 3.816993464052288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48666509982522876,
"avg_score": null,
"num_lines": null
} |
# 003_cleaner.py
#####################################################################
##################################
# Import des modules et ajout du path de travail pour import relatif
import sys
sys.path.insert(0 , 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/')
from voca import AddLog , StringFormatter , OutFileCreate , OdditiesFinder
##################################
# Init des paths et noms de fichiers
missionName = '006'
AddLog('title' , '{} : Dรฉbut du nettoyage du fichier'.format(missionName))
work_dir = 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/raw/{}_raw/'.format(missionName)
# Nom du fichier source
raw_file = 'src'
##################################
# retreiving raw string
raw_string_with_tabs = open(work_dir + raw_file , 'r').read()
# replacing tabs with carriage return
raw_string_with_cr = raw_string_with_tabs.replace( '\t', '\n' )
# turning the string into a list
raw_list = raw_string_with_cr.splitlines()
# going through oddities finder
AddLog('subtitle' , 'Dรฉbut de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( raw_list )
# going through string formatter
ref_list = []
AddLog('subtitle' , 'Dรฉbut de la fonction StringFormatter')
for line in list_without_oddities:
ref_list.append( StringFormatter( line ) )
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Dรฉbut de la fonction OutFileCreate')
OutFileCreate('C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/out/','{}_src'.format(missionName),ref_list,'prenoms feminins italiens')
| {
"repo_name": "sighill/shade_app",
"path": "apis/raw/006_raw/006_cleaner.py",
"copies": "1",
"size": "1624",
"license": "mit",
"hash": 3086637915699015700,
"line_mean": 41.7837837838,
"line_max": 147,
"alpha_frac": 0.637037037,
"autogenerated": false,
"ratio": 3.0393996247654784,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41764366617654786,
"avg_score": null,
"num_lines": null
} |
# 003 - Longest Substring Without Repeating Characters (Medium)
# https://leetcode.com/problems/longest-substring-without-repeating-characters/
from collections import defaultdict
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
positions = defaultdict(lambda: -1)
# A moving index of the current substring.
start_idx = 0
# The length of the longest substring.
max_length = 0
for idx in range(len(s)):
char = s[idx]
# If the current character has a previous recorded
# position and the starting index is behind that,
# then we have to move the starting index to the
# position right after the current character.
if start_idx <= positions[char]:
start_idx = positions[char] + 1
# Otherwise, increase the length of the max substring.
else:
max_length = max(max_length, idx - start_idx + 1)
# Store the current index as the latest position where
# the current character has been seen.
positions[char] = idx
return max_length
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/02_Medium/lc_003.py",
"copies": "1",
"size": "1232",
"license": "mit",
"hash": 3789780809355710500,
"line_mean": 35.2352941176,
"line_max": 79,
"alpha_frac": 0.5957792208,
"autogenerated": false,
"ratio": 4.546125461254612,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5641904682054613,
"avg_score": null,
"num_lines": null
} |
#[0,0,5, 1,0,9, 4,7,3,
# 0,0,9, 5,0,0, 8,0,6,
# 1,4,0, 8,0,0, 2,0,0,
#
# 4,0,0, 0,0,0, 0,6,0,
# 0,0,6, 7,0,2, 5,0,0,
# 0,8,0, 0,0,0, 0,0,1,
#
# 0,0,4, 0,0,1, 0,2,8,
# 5,0,2, 0,0,8, 6,0,0,
# 3,9,8, 2,0,7, 1,0,0]
s = [0,0,5,1,0,9,4,7,3,0,0,9,5,0,0,8,0,6,1,4,0,8,0,0,2,0,0,4,0,0,0,0,0,0,6,0,0,0,6,7,0,2,5,0,0,0,8,0,0,0,0,0,0,1,0,0,4,0,0,1,0,2,8,5,0,2,0,0,8,6,0,0,3,9,8,2,0,7,1,0,0]
def findUsedForLocal(n):
local_x = n % 9 #Finds the first position in the y axis.
local_y = n // 9
start_y = local_y * 9 #Finds the first position in the x axis.
anchor = n - local_x % 3 - (local_y % 3) * 9 #Finds the first position in the group.
brukt = 0
for i in range(0, 9): #Test all x, y and group values, does not mather if it also checks the value in the n position.
brukt |= 1 << s[start_y + i] #Gather info from the y axis.
brukt |= 1 << s[i * 9 + local_x] #Gather info from the x axis.
brukt |= 1 << s[anchor + (i // 3) * 9 + (i % 3)] #Gather info from the group.
return brukt #Returns all the values it cannot be. All not set is a posiblity.
def findSolution(n):
while n < 81 and s[n] != 0: #Skips those who already has values.
n += 1
if n == 81: #solution found, if compiled with false, it will do all posible combinations.
return True
brukt = findUsedForLocal(n) #Find all posible values to try.
for i in range(1, 10): #Checks if value(i) is a posible,
if brukt & (1 << i) == 0: # 0 means that it is not a conflict on x, y or g, and might be a posibility.
s[n] = i #Sets i as a posbile.
if findSolution(n + 1):
return True
s[n] = 0 #None of the posibles worked. Resets the value.
return False #solution not found, yet.
findSolution(0)
print(s)
| {
"repo_name": "oddbear/Algorithms",
"path": "Sudoku/Solvers/Python3/Standard Sudoku/alg06/main.py",
"copies": "1",
"size": "1695",
"license": "mit",
"hash": -3184779239254468000,
"line_mean": 33.5918367347,
"line_max": 167,
"alpha_frac": 0.5970501475,
"autogenerated": false,
"ratio": 2.077205882352941,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7882046440979169,
"avg_score": 0.05844191777475442,
"num_lines": 49
} |
# 005_cleaner.py
#####################################################################
##################################
# Import des modules et ajout du path de travail pour import relatif
import sys
sys.path.insert(0 , 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/')
from voca import AddLog , StringFormatter , OutFileCreate , OdditiesFinder
##################################
# Init des paths et noms de fichiers
missionName = '005'
AddLog('title' , '{} : Dรฉbut du nettoyage du fichier'.format(missionName))
work_dir = 'C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/raw/{}_raw/'.format(missionName)
# Nom du fichier source
raw_file = 'src'
##################################
# retreiving raw string
raw_string_with_tabs = open(work_dir + raw_file , 'r').read()
# replacing tabs with carriage return
raw_string_with_cr = raw_string_with_tabs.replace( '\t', '\n' )
# turning the string into a list
raw_list = raw_string_with_cr.splitlines()
# going through oddities finder
AddLog('subtitle' , 'Dรฉbut de la fonction OdditiesFinder')
list_without_oddities = OdditiesFinder( raw_list )
# going through string formatter
ref_list = []
AddLog('subtitle' , 'Dรฉbut de la fonction StringFormatter')
for line in list_without_oddities:
ref_list.append( StringFormatter( line ) )
##################################
# Enregistrement des fichiers sortie
AddLog('subtitle' , 'Dรฉbut de la fonction OutFileCreate')
OutFileCreate('C:/Users/WILLROS/Perso/Shade/scripts/LocalWC-Shade-App/apis/out/','{}_src'.format(missionName),ref_list,'prenoms masculins italiens')
| {
"repo_name": "sighill/shade_app",
"path": "apis/raw/005_raw/005_cleaner.py",
"copies": "1",
"size": "1625",
"license": "mit",
"hash": -8571346310990103000,
"line_mean": 41.8108108108,
"line_max": 148,
"alpha_frac": 0.63726095,
"autogenerated": false,
"ratio": 3.041275797373358,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4178536747373358,
"avg_score": null,
"num_lines": null
} |
# 006. ZigZag Conversion
# The idea is based on Min Priority Queue.
# However, in python, this turned out to be really slow.
import unittest
import queue
class Solution(object):
def convert(self, s, numRows):
"""
:type s: str
:type numRows: int
:rtype: str
"""
if numRows == 1:
return s
q = queue.PriorityQueue()
for i,c in enumerate(s):
p = i % ((numRows-1) * 2)
if p >= numRows:
p = numRows * 2 - 2 - p
q.put(((p,i), c))
ret = ""
while not q.empty():
_,c = q.get()
ret = ret+c
return ret
class SolutionUnitTest(unittest.TestCase):
def setup(self):
pass
def tearDown(self):
pass
def testsingleNumber(self):
s = Solution()
self.assertEqual(
s.convert("perowkjahsdmnzmqweuiryakjdshvzkljaptipoaadkjfh", 4),
"pjnujkikekamzeikdzltpdjrwhdmwrasvjpoafosqyhaah")
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "hanlin-he/UTD",
"path": "leetcode/py/006.queue.py",
"copies": "1",
"size": "1053",
"license": "mit",
"hash": -2570669376171554300,
"line_mean": 25.325,
"line_max": 75,
"alpha_frac": 0.5327635328,
"autogenerated": false,
"ratio": 3.463815789473684,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44965793222736844,
"avg_score": null,
"num_lines": null
} |
# 01-02-04
#v1.0.2
#
# Date Utils
# By Fuzzyman see www.voidspace.org.uk/atlantibots/pythonutils.html
# Written for the Victory Day program for Jesus Fellowship Church
# www.jesus.org.uk
# These are various functions for dealing with dates (including leap years and so on)
# Useful especially for situations where you have to arrange appointments.
# (e.g. second Tuesday of the month etc...)
# None of these functions are designed to handle BC dates.........
# They will also only work with dates from the Gregorian (modern) calender.
# They usually assume that given dates are *possible* dates.
# (Although there is a function to explicitly check a date).
# Help and inspiration was taken from :
# http://users.aol.com/s6sj7gt/mikecal.htm and
# http://mathforum.org/library/drmath/view/62338.html
# If you have any bug reports or suggestions please contact me.
# If you would like to be notified of bug fixes / updates then please contact me.
# E-mail fuzzyman AT atlantibots DOT org DOT uk (or michael AT foord DOT me DOT uk )
# Code maintained at http://www.voidspace.org.uk/atlantibots/pythonutils.html
# Copyright Michael Foord
# Not for use in commercial projects without permission.
# If you use them in a non-commercial project then please credit me and include a link back.
# If you release the project non-commercially then let me know (and include this message with my code !)
# No warranty express or implied for the accuracy, fitness to purpose or otherwise for this code....
# Use at your own risk !!!
from time import localtime
##############################
# First set up some useful values
monthslower = [ 'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december' ]
dayslower =[ 'sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday',
'saturday' ]
monthdict = { 'january' : 31, 'february' : 28, 'march' : 31, 'april' : 30, 'may' : 31,
'june' : 30, 'july' : 31, 'august' : 31, 'september' : 30, 'october' : 31,
'november' : 30, 'december' : 31 }
monthdictleap = { 'january' : 31, 'february' : 29, 'march' : 31, 'april' : 30, 'may' : 31,
'june' : 30, 'july' : 31, 'august' : 31, 'september' : 30, 'october' : 31,
'november' : 30, 'december' : 31 }
monthlist = [ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ]
monthlistleap = [ 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 ]
days =[ 'Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
'Saturday' ]
months = [ 'January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December' ]
#############################
# Next the functions
"""
There are various useful 'constants' defined in dateutils :
monthslower, dayslower = lowercase lists of the days and months
monthdict, monthdictleap = dictionaries keyed by month - value is the number of days in the month (monthdictleap is for a leap year)
monthlist, monthlistleap = a list of the number of days in the month (monthlistleap is for a leapyear)
days, months = capitalised lists of the days and months
dateformcon = a dictionary with the standard config settings for the formatted date function.
The Following functions are defined in dateutils :
(Some of the functions depend on each other - so it's better to import the ones you want rather than cut and paste :-)
realdate(day, month, year):
Returns true if the supplied date is a possible date
and False if it isn't :-) (Note - it only tests that the *year* is greater than zero).
isleapyear(year):
Given a year as an integer (e.g. 2004) it returns True if the year is a leap year,
and False if it isn't.
daysinmonth(year, month):
Given a year and a month it returns how many days are in that month.
datetoday(day, month, year):
Passed in a date, in integers, it returns the day of the week.
Output is expressed as an integer from 0-6.
0 is Sunday, 1 is Monday.......
datestringtoints(datestring):
Passed in a datestring - in the form 'yyyymmdd'
(e.g. 20040122 being 22nd January 2004) -
it returns an integer tuple ( yyyy, mm, dd ).
If the datestring is of the wrong length it returns None.
(It assumes a four figure year).
intstodatestring(day, month, year):
Given three integers for day, month and year
it returns a datestring 'yyyymmdd' (for easy storage).
returndate():
Returns the local date using the localtime function
from the time module.
Returns integers - ( yyyy, mm, dd ).
nearestday(day, month, year, dayofweek = 2, afteronly = 0):
Given a date as three integers (year, month and day) it returns the nearest
date that is 'dayofweek'. (dayofweek should be an integer from 0 - 6. 0 is Sunday, 1 Monday etc..)
If afteronly is set to 1 then it finds the nearest date of that day, on or *after* the specified.
Returns integers - ( yyyy, mm, dd ).
dayofweek defaults to Tuesday (2) and afteronly defaults to 0 as they are the defaults I'm using for the Victory Day program this is written for.
This is used for : e.g find the nearest Tuesday to a given date, or find the nearest Tuesday *after* a given date !
addnumdays(day, month, year, modifier):
Given a date as three integers (year, month and day) and a number of days to add or subtract
to that date (the integer modifier, positive or negative value) - it returns the correct date
as a tuple of integers - ( yyyy, mm, dd ).
incdate(day, month, year):
Given a date it adds one day to the date and returns the new date.
decdate(day, month, year):
Given a date it subtracts one day from the date and returns the new date.
adddate(day1, month1, year1, day2, month2, year2):
Given a date as three integers (year1, month1 and day1) and another number of days (day2), months (month2)
and years (year2) to add to that date (or subtract from it) - it returns the new date as a tuple of integers - ( yyyy, mm, dd ).
Note :
Feb 28th + 1 month = March 31st
Feb 29th + 1 month = March 31st
January 29th to 31st + 1 month = feb 28th/29th
August 31st + 1 month = September 30th
We add the years together, then the months, then correct for the 'end of month' (e.g. we change Sep 31st to Sep 30th)
Finally we add any extra days on.
daycount(year, month, day)
This is an implementation of the Julian Day system. This
is a continuous count of days from January 1, 4713 B.C.
Given a date in in integers it returns an integer value for the date
This represents it's Julian Day number as above.
This only works for dates represented using the the Gregorian
calendar which was adopted in the US/UK on Oct. 15, 1582 - but
at different times elsewhere (so historical dates may not be in this system....).
counttodate(daycount)
Given the number for a date using the Julian Day System,
it returns that date as integer tuple (year, month, day).
daysbetween(day1, month1, year1, day2, month2, year2)
Given two dates it returns the number of days between them.
If date1 is earlier than date2 then the result will be positive.
def dayfinish(day)
Takes an integer day and returns the correct finish for it
1 = 'st', 2 = 'nd', 3 = 'rd', 4-10 = 'th' etc....
def formatteddate(day, month, year, configdict = {}, **configs)
Given a date in in integers, it returns the date as a nicely formatted string :
e.g. 24th January 1997 or 2nd February 1948
configs accepts the following keywords :
dayofweek, addzero, addcom, fullstop, monthfirst
e.g. print(formatteddate(12, 8, 1974, dayofweek=1, addzero=0, addcom=1, fullstop=1, monthfirst=0))
Monday 12th August, 1974.
If dayofweek is set to 1 then the day of the week will also be printed :
e.g. Monday 24th January 1997
If addzero is set to 1 then days 1-9 will have an additional zero :
e.g. 02nd February 1948
If addcom is set to 1 then there will be a comma between the month and the year :
e.g. 24th January, 1997
If fullstop is set to 1 then there will be a fullstop after the year :
e.g. 24th January 1997.
If monthfirst is set to 1 then then the month will be put before the day :
e.g. January 24th 1997
If the year is set to zero then it will be missed off.
(and the dayofweek will be treated as 0 in this case as well).
There is a dictionary called dateformcon defined in the dateutils module with all the config values
defined and some good standard settings :-)
This dictionary can be passed in instead of the individual settings.
"""
#############################
def realdate(day, month, year):
"""Returns true if the supplied date is a possible date
and False if it isn't :-) (Note - it *only* tests that the year is greater than zero)."""
if month > 12 or year < 1 or day < 1 or month < 1:
return False
elif month == 2: # if it's february we need to know if it's a leap year
if isleapyear(year):
numdays = 29
else:
numdays = 28
else:
numdays = monthlist[ month-1 ] # -1 because in the list January is 0
if day > numdays:
return False
else:
return True
def isleapyear(year):
"""Given a year as an integer (e.g. 2004) it returns True if the year is a leap year,
and False if it isn't."""
if year%4 != 0:
return False
elif year%100 !=0:
return True
elif year%400 == 0:
return True
else:
return False
def daysinmonth(year, month):
"""Given a year and a month it returns how many days are in that month."""
if month == 2: # if it's february we need to know if it's a leap year
if isleapyear(year):
numdays = 29
else:
numdays = 28
else:
numdays = monthlist[ month-1 ] # -1 because in the list January is 0
return numdays
def datetoday(day, month, year):
"""Passed in a date, in integers, it returns the day of the week.
Output is expressed as an integer from 0-6.
0 is Sunday, 1 is Monday....... """
# dayofweek would have been a better name for this function :-(
d = day
m = month
y = year
if m < 3:
z = y-1
else:
z = y
dayofweek = ( 23*m//9 + d + 4 + y + z//4 - z//100 + z//400 )
if m >= 3:
dayofweek -= 2
dayofweek = dayofweek%7
return dayofweek
def datestringtoints(datestring):
"""Passed in a datestring - in the form 'yyyymmdd'
(e.g. 20040122 being 22nd January 2004) -
it returns an integer tuple ( yyyy, mm, dd ).
If the datestring is of the wrong length it returns None.
(It assumes a four figure year)."""
if len(datestring) != 8: # badly formed datestring
return None
return (int(datestring[:4]), int(datestring[4:6]), int(datestring[6:8]))
def intstodatestring(day, month, year):
"""Given three integers for day, month and year
it returns a datestring 'yyyymmdd' (for easy storage)."""
y = str(year)
while len(y) < 4:
y = '0' + y
m = str(month)
d = str(day)
if len(m) < 2:
m = '0' + m
if len(d) < 2:
d = '0' + d
return y+m+d
def returndate():
"""Returns the local date using the localtime function
from the time module.
Returns integers - ( yyyy, mm, dd )."""
try: # because this function doesn't work on some platforms
datetuple = localtime()
except:
return (2004, 1, 31)
return ( datetuple[0], datetuple[1], datetuple[2] )
def nearestday(day, month, year, dayofweek = 2, afteronly = 0):
"""Given a date as three integers (year, month and day) it returns the nearest
date that is 'dayofweek'. (dayofweek should be an integer from 0 - 6. 0 is Sunday, 1 Monday etc..)
If afteronly is set to 1 then it finds the nearest date of that day, on or *after* the specified.
Returns integers - ( yyyy, mm, dd ).
dayofweek defaults to Tuesday (2) and afteronly defaults to 0 as they are the defaults I'm using for the Victory Day program this is written for.
This is used for : e.g find the nearest Tuesday to a given date, or find the nearest Tuesday *after* a given date !"""
thisday = datetoday(day, month, year)
if thisday == dayofweek:
return (year, month, day)
if thisday < dayofweek: # this 'if else test' tells us the number of days between the two days of the week
forward = dayofweek - thisday
backward = 7 - forward
else:
backward = thisday - dayofweek
forward = 7 - backward
if afteronly or forward < backward:
difference = forward
else:
difference = -backward
return addnumdays(day, month, year, difference)
def addnumdays(day, month, year, modifier):
"""Given a date as three integers (year, month and day) and a number of days to add or subtract
to that date (the integer modifier, positive or negative value) - it returns the correct date
as a tuple of integers - ( yyyy, mm, dd )."""
if modifier > 0: # damn - different rules for negative modifiers and hard to make generic
if month == 2 and isleapyear(year) and day == 29: # special case
modifier -= 1
month = 3
day = 1
while modifier >= 365: # add any years on
if month <= 2 and isleapyear(year) or month > 2 and isleapyear(year+1):
numdays = 366
else:
numdays = 365
if modifier >= numdays:
year += 1
modifier -= numdays
else:
break
while modifier >= 28: #add any full months on
if month == 2: # if it's february we need to know if it's a leap year
if isleapyear(year):
numdays = 29
else:
numdays = 28
else:
numdays = monthlist[ month-1 ] # -1 because in the list January is 0
if modifier >= numdays:
modifier -= numdays
if month != 12:
month += 1
else:
month = 1
year += 1
else:
break
# now we need to correct if the new 'day' value is greater than the number of days in the new month......
if month == 2: # if it's february we need to know if it's a leap year
if isleapyear(year):
numdays = 29
else:
numdays = 28
else:
numdays = monthlist[ month-1 ] # -1 because in the list January is 0
if day > numdays:
if month != 12:
month += 1
else:
month = 1
year += 1
day = day - numdays
while modifier > 0:
year, month, day = incdate(day, month, year)
modifier -= 1
elif modifier < 0: # we have to subtract days
modifier = -modifier # easier to deal with positive numbers :-)
if month == 2 and isleapyear(year) and day == 29: # special case
modifier -= 1
day = 28
while modifier >= 365: # take any years off
if month > 2 and isleapyear(year) or month <= 2 and isleapyear(year-1):
numdays = 366
else:
numdays = 365
if modifier >= numdays:
year -= 1
modifier -= numdays
else:
break
while modifier >= 28: # subtract any full months on
if month == 2:
if isleapyear(year):
numdays = 29
else:
numdays = 28
else:
numdays = monthlist[month-1]
adjuster = numdays - day # how many days before the end of the month is it
if day > numdays:
modifier -= numdays
if month != 1:
month -=1
else:
month = 12
year -= 1
if month == 2:
if isleapyear(year):
numdays = 29
else:
numdays = 28
else:
numdays = monthlist[month-1]
day = numdays - adjuster # if we've gone back a whole month it's now the smae numebr of days before the end of the month
else:
break
while modifier > 0:
year, month, day = decdate(day, month, year)
modifier -= 1
return ( year, month, day )
def incdate(day, month, year):
"""Given a date it adds one day to the date and returns the new date."""
if month == 2: # if it's february we need to know if it's a leap year
if isleapyear(year):
numdays = 29
else:
numdays = 28
else:
numdays = monthlist[ month-1 ] # -1 because in the list January is 0
if day < numdays:
day += 1
else: # of course, here day should equal numdays or the date is invalid :-)
if month == 12:
month = 1
year +=1
day = 1
else:
month += 1
day = 1
return ( year, month, day )
def decdate(day, month, year):
"""Given a date it subtracts one day from the date and returns the new date."""
if day > 1:
day -= 1
elif month == 1: # 1st January
year -=1
day = 31
month = 12
elif month == 3: # 1st March
if isleapyear(year):
day = 29
else:
day = 28
month = 2
else:
day = monthlist[ month-2 ]
month -= 1
return ( year, month, day )
def adddate(day1, month1, year1, day2, month2, year2):
"""Given a date as three integers (year1, month1 and day1) and another number of days (day2), months (month2)
and years (year2) to add to that date (or subtract from it) - it returns the new date as a tuple of integers - ( yyyy, mm, dd ).
Note :
Feb 28th + 1 month = March 31st
Feb 29th + 1 month = March 31st
January 29th to 31st + 1 month = feb 28th/29th
August 31st + 1 month = September 30th
We add the years together, then the months, then correct for the 'end of month' (e.g. we change Sep 31st to Sep 30th)
Finally we add any extra days on."""
year = year1 + year2
month = month1 + month2
while month < 1:
year -= 1
month += 12
while month > 12:
year += 1
month -=12
numdays = daysinmonth(year, month)
if day1 > numdays:
day1 = numdays
if day2 < 0:
day2 = -day2
thisfunc = decdate
else:
thisfunc = incdate
while day2 > 0:
year, month, day1 = thisfunc(day1, month, year)
day2 -= 1
return year, month, day1
def daycount(year, month, day):
""""This is an implementation of the Julian Day system. This
is a continuous count of days from January 1, 4713 B.C.
Given a date in in integers it returns an integer value for the date
This represents it's Julian Day number as above.
This only works for dates represented using the the Gregorian
calendar which was adopted in the US/UK on Oct. 15, 1582 - but
at different times elsewhere (so historical dates may not be in this system....)."""
if month < 3:
year = year - 1
month = month + 13
else:
month = month + 1
A = int(year/100)
B = 2 - A + int(A/4)
return int(365.25*year) + int(30.6001*month) + B + day + 1720995
def counttodate(daycount):
"""Given the number for a date using the Julian Day System,
it returns that date as integer tuple (year, month, day)."""
# note - slow and badly implemented... but fast enough :-)
daycount = daycount - 2453030
return addnumdays(25, 1, 2004, daycount)
def daysbetween(day1, month1, year1, day2, month2, year2):
"""Given two dates it returns the number of days between them.
If date1 is earlier than date2 then the result will be positive."""
return daycount(year2, month2, day2) - daycount(year1, month1, day1)
def dayfinish(day):
"""Takes an integer day and returns the correct finish for it
1 = 'st', 2 = 'nd', 3 = 'rd', 4-10 = 'th' etc...."""
if day > 3 and day < 21:
return 'th' # special cases
daystr = str(day)
if len(daystr) > 1:
daystr = daystr[-1]
if daystr == '1':
return 'st'
elif daystr == '2':
return 'nd'
elif daystr == '3':
return 'rd'
else:
return 'th'
def formatteddate(day, month, year, configdict = {}, **configs):
"""Given a date in in integers, it returns the date as a nicely formatted string :
e.g. 24th January 1997 or 2nd February 1948
configs accepts the following keywords :
dayofweek, addzero, addcom, fullstop, monthfirst
e.g. print(formatteddate(12, 8, 1974, dayofweek=1, addzero=0, addcom=1, fullstop=1, monthfirst=0))
Monday 12th August, 1974.
If dayofweek is set to 1 then the day of the week will also be printed :
e.g. Monday 24th January 1997
If addzero is set to 1 then days 1-9 will have an additional zero :
e.g. 02nd February 1948
If addcom is set to 1 then there will be a comma between the month and the year :
e.g. 24th January, 1997
If fullstop is set to 1 then there will be a fullstop after the year :
e.g. 24th January 1997.
If monthfirst is set to 1 then then the month will be put before the day :
e.g. January 24th 1997
If the year is set to zero then it will be missed off.
(and the dayofweek will be treated as 0 in this case as well).
There is a dictionary called dateformcon defined in the dateutils module with all the config values
defined and some good standard settings :-)
This dictionary can be passed in instead of the individual settings.
"""
keywordlist = ['dayofweek', 'addzero', 'addcom', 'fullstop', 'monthfirst']
if configdict != {} and isinstance(configdict, dict):
configs = configdict
for member in keywordlist:
if not configs.has_key(member):
configs[member] = 0
outstring = ''
if configs['dayofweek'] and year:
outstring = days[datetoday(day, month, year)] +' '
if day < 10 and configs['addzero']:
daystr = '0' + str(day)
else:
daystr = str(day)
if not configs['monthfirst']:
outstring += daystr + dayfinish(day) + ' ' + months[month-1]
else:
outstring += months[month-1] + ' ' + daystr + dayfinish(day)
if configs['addcom'] and year:
outstring += ','
if year:
outstring += ' ' + str(year)
if configs['fullstop']:
outstring += '.'
return outstring
dateformcon = { 'dayofweek' : 1, 'addzero' : 0, 'addcom' : 1, 'fullstop' : 1, 'monthfirst' : 0 }
############################################################
if __name__ == "__main__":
print(returndate())
year, month, day = returndate()
test = daycount(year, month, day)
print(test)
print(counttodate(test))
while True:
x = raw_input("Enter Year of date (Enter to quit) >> ")
if x=='':
break
y = raw_input("Enter Month >> ")
z = raw_input("Enter Day >> ")
test = daycount(int(x), int(y), int(z))
print(test)
print(counttodate(test))
print(realdate(32, 1, 2004))
while True:
x = raw_input("Enter Modifier (0 to quit) >> ")
if x=='0':
break
print(addnumdays(31, 3, 2004, -int(x) ))
while True:
x = raw_input("Enter Day of Week 0-6 (7 to quit) >> ")
if x=='7':
break
print(nearestday(24, 1, 2004, int(x)))
while True:
x = raw_input("Enter Years to Add (Enter to quit) >> ")
if x=='':
break
y = raw_input("Enter Months to Add >> ")
z = raw_input("Enter Days To Add >> ")
print(adddate(24, 1, 2004, int(z), int(y), int(x)))
year, month , day = adddate(24, 1, 2004, int(z), int(y), int(x))
print("The nearest Tuesday after that date is ", nearestday(day, month, year))
"""
Versionlog
01-02-04 Version 1.0.2
Corrected bug in intstodatestring.
Created lowercase day list and capitalised month list.
Added formatteddate and dayfinish function.
Put a try: except: catch in returndate - mainly so I can test on the pocketpc.
"""
| {
"repo_name": "amir-zeldes/rstWeb",
"path": "modules/dateutils.py",
"copies": "1",
"size": "24933",
"license": "mit",
"hash": -1155343091978499600,
"line_mean": 37.6558139535,
"line_max": 150,
"alpha_frac": 0.6003288814,
"autogenerated": false,
"ratio": 3.683409661693012,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47837385430930124,
"avg_score": null,
"num_lines": null
} |
# 01/02/2017
import re
def get_inputs(inputFile="encoded.txt"):
f = open(inputFile, 'r')
temp = f.readline().strip().split()
key = {}
for x, y in zip(temp[::2], temp[1::2]):
key[y] = x
return (f.read(), key)
def decode(args):
message, key, output = args[0], args[1], []
start, finish, keys = 0, 1, key.keys()
while finish <= len(message):
if message[start:finish] in keys:
output.append(key[message[start:finish]])
start = finish
elif not message[start] in "gG":
output.append(message[start])
start = finish
finish += 1
return ''.join(output)
def encode(inputFile="input.txt", outputFile="encoded.txt"):
fi = open(inputFile, 'r')
text = fi.read()
nbits, key = len(set(re.sub("\W", "", text))).bit_length(), {}
outputText, outputCode = [], []
for char in text:
if (not char in key.keys()) and (char.isalpha()):
key[char] = '{:0{length}b}'.format(len(key.keys()),
length = nbits).replace("0", "g").replace("1", "G")
outputText.append(char if not char.isalpha() else key[char])
outputText = ''.join(outputText)
for alpha, code in key.items():
outputCode.append("{} {} ".format(alpha, code))
outputCode = ''.join(outputCode + ['\n'])
fo = open(outputFile, 'w')
fo.write(''.join((outputCode, outputText)))
| {
"repo_name": "tlgs/dailyprogrammer",
"path": "Python/intermediate/i245.py",
"copies": "2",
"size": "1423",
"license": "unlicense",
"hash": -2240604543171092200,
"line_mean": 32.0930232558,
"line_max": 75,
"alpha_frac": 0.5551651441,
"autogenerated": false,
"ratio": 3.388095238095238,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49432603821952376,
"avg_score": null,
"num_lines": null
} |
# 01/05/2017
# Does not implement a very robust SBD system at this moment.
# An improvement would be to follow Wikipedia's "vanilla" approach of
# using a hand-compiled list of abreviations but I was unable to find a good one.
# https://en.wikipedia.org/wiki/Sentence_boundary_disambiguation
#
# Does not group singular/plural words, e.g.: car and cars
import re
with open(r"..\\..\\other\\en_stopwords.txt", "r") as f_stops:
STOP_WORDS = {word.rstrip() for word in f_stops.readlines()}
def tokenize_sentence(sentence):
'''Creates a list of words from a sentence'''
return [re.sub(r"\W+", "", word) for word in sentence.split()]
def create_bag_of_words(sentences):
'''Returns a bag of words and their frequency'''
bag = {}
for s in sentences:
for word in tokenize_sentence(s):
word = word.lower()
if word in bag:
bag[word] += 1
elif word not in STOP_WORDS:
bag[word] = 1
return bag
def score_sentence(sentence, bag):
'''Scores a sentence based on the presence of words from the bag provided'''
sentence = [x.lower() for x in tokenize_sentence(sentence)]
return sum([1 for w in bag if w in sentence])
def summarize_text(filename="text.txt", n_top_words=4, n_sentences=2):
with open(filename, "r") as f_text:
text = re.sub(r"\n(?=\w)", " ", f_text.read())
sentences = [s.rstrip() for s in re.split(r"(?<=[\.!?]) ", text)]
bag = create_bag_of_words(sentences)
top_words = sorted(bag, key=bag.get, reverse=True)[0:n_top_words]
ranked = {k: v for k, v in zip(sentences, [score_sentence(s, top_words) for s in sentences])}
summary_sentences = sorted(ranked, key=ranked.get, reverse=True)[0:n_sentences]
return ' '.join(sorted(summary_sentences, key=sentences.index))
if __name__ == "__main__":
print(summarize_text())
| {
"repo_name": "tlseabra/dailyprogrammer",
"path": "Python/hard/h312.py",
"copies": "2",
"size": "1883",
"license": "mit",
"hash": 2848616043031693000,
"line_mean": 36.66,
"line_max": 97,
"alpha_frac": 0.6394052045,
"autogenerated": false,
"ratio": 3.2804878048780486,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9901450666854797,
"avg_score": 0.003688468504650147,
"num_lines": 50
} |
# 01.06.2007, c
# last revision: 25.02.2008
from __future__ import absolute_import
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/various_formats/small2d.mesh'
material_1 = {
'name' : 'coef',
'values' : {'coef' : 1.0},
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Left',
'select' : 'vertices in (x < -0.499)',
'kind' : 'facet',
}
region_2 = {
'name' : 'Right',
'select' : 'vertices in (x > 0.499)',
'kind' : 'facet',
}
region_22 = {
'name' : 'Bottom',
'select' : 'vertices in (y < -0.499)',
'kind' : 'facet',
}
region_23 = {
'name' : 'Top',
'select' : 'vertices in (y > 0.499)',
'kind' : 'facet',
}
field_1 = {
'name' : '2_displacement',
'dtype' : 'real',
'shape' : (2,),
'region' : 'Omega',
'approx_order' : 2,
}
field_2 = {
'name' : 'pressure',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Omega',
'approx_order' : 1,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '2_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '2_displacement',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
ebcs = {}
epbc_10 = {
'name' : 'rl',
'region' : ['Left', 'Right'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_y_line',
}
epbc_12 = {
'name' : 'tb',
'region' : ['Top', 'Bottom'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_x_line',
}
from sfepy.discrete.fem.periodic import match_x_line, match_y_line
functions = {
'match_x_line' : (match_x_line,),
'match_y_line' : (match_y_line,),
}
from sfepy.base.testing import TestCommon
##
# 01.06.2007, c
class Test( TestCommon ):
##
# 01.06.2007, c
def from_conf( conf, options ):
from sfepy.discrete import Problem
problem = Problem.from_conf(conf, init_equations=False)
test = Test( problem = problem,
conf = conf, options = options )
return test
from_conf = staticmethod( from_conf )
##
# c: 01.06.2007, r: 18.02.2008
def test_pbc( self ):
from sfepy.discrete import Variables, Conditions
problem = self.problem
conf = self.conf
ebcs = Conditions.from_conf(conf.ebcs, problem.domain.regions)
epbcs = Conditions.from_conf(conf.epbcs, problem.domain.regions)
variables = Variables.from_conf(conf.variables, problem.fields)
variables.equation_mapping(ebcs, epbcs, None, problem.functions)
state = variables.create_state_vector()
variables.apply_ebc(state)
return variables.has_ebc(state)
| {
"repo_name": "lokik/sfepy",
"path": "tests/test_periodic_bc_2d.py",
"copies": "5",
"size": "2902",
"license": "bsd-3-clause",
"hash": -7282990991255301000,
"line_mean": 20.9848484848,
"line_max": 72,
"alpha_frac": 0.5344589938,
"autogenerated": false,
"ratio": 2.8065764023210833,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.020950126577965467,
"num_lines": 132
} |
# 01.06.2007, c
# last revision: 25.02.2008
filename_mesh = 'database/tests/small2d.mesh'
material_1 = {
'name' : 'coef',
'mode' : 'here',
'region' : 'Omega',
'coef' : 1.0,
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Left',
'select' : 'nodes in (x < -0.499)',
}
region_2 = {
'name' : 'Right',
'select' : 'nodes in (x > 0.499)',
}
region_22 = {
'name' : 'Bottom',
'select' : 'nodes in (y < -0.499)'
}
region_23 = {
'name' : 'Top',
'select' : 'nodes in (y > 0.499)'
}
field_1 = {
'name' : '2_displacement',
'dim' : (2,1),
'flags' : (),
'domain' : 'Omega',
'bases' : {'Omega' : '2_3_P2'}
}
field_2 = {
'name' : 'pressure',
'dim' : (1,1),
'flags' : (),
'domain' : 'Omega',
'bases' : {'Omega' : '2_3_P1'}
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '2_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '2_displacement',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
ebcs = {}
epbc_10 = {
'name' : 'rl',
'region' : ['Left', 'Right'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_y_line',
}
epbc_12 = {
'name' : 'tb',
'region' : ['Top', 'Bottom'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_x_line',
}
fe = {
'chunk_size' : 1000
}
from sfepy.fem.periodic import *
from sfepy.base.testing import TestCommon
##
# 01.06.2007, c
class Test( TestCommon ):
##
# 01.06.2007, c
def from_conf( conf, options ):
from sfepy.fem import ProblemDefinition
problem = ProblemDefinition.from_conf( conf, init_equations = False )
test = Test( problem = problem,
conf = conf, options = options )
return test
from_conf = staticmethod( from_conf )
##
# c: 01.06.2007, r: 18.02.2008
def test_pbc( self ):
problem = self.problem
conf = self.conf
problem.variables.equation_mapping( conf.ebcs, conf.epbcs,
problem.domain.regions,
None, conf.funmod )
state = problem.create_state_vector()
problem.apply_ebc( state )
return problem.variables.has_ebc( state )
| {
"repo_name": "certik/sfepy",
"path": "tests/test_periodic_bc_2d.py",
"copies": "1",
"size": "2548",
"license": "bsd-3-clause",
"hash": 7189296440212312000,
"line_mean": 19.8852459016,
"line_max": 77,
"alpha_frac": 0.4839089482,
"autogenerated": false,
"ratio": 2.7846994535519127,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3768608401751913,
"avg_score": null,
"num_lines": null
} |
# 0 1 2
# 3 4 5
# 6 7 8
board = [[0,1,2],[3,4,5],[6,7,8]]
def unlockPattern(board):
not_reachable = {0:[2,6,8], 1:[7], 2:[0, 6,8], 3:[5], 5:[3], 6:[2,0,8], 7:[1], 8:[0,6,2]}
def dfs(currentVal, board, temp, final_res):
if len(temp) == 9:
if temp not in final_res:
final_res.append(temp)
return
if len(temp) >= 4:
if sorted(temp) not in final_res:
final_res.append(sorted(temp))
return
if currentVal != 'X':
for c in xrange(9):
if c == currentVal:
continue
if board[c/3][c%3] == 'X':
continue
if currentVal == 4:
tmp = board[currentVal/3][currentVal%3]
board[currentVal/3][currentVal%3] = 'X'
dfs(c, board, temp+[currentVal], final_res)
board[currentVal/3][currentVal%3] = tmp
else:
if c in not_reachable[currentVal]:
continue
else:
tmp = board[currentVal/3][currentVal%3]
board[currentVal/3][currentVal%3] = 'X'
dfs(c, board, temp+[currentVal], final_res)
board[currentVal/3][currentVal%3] = tmp
final_res = []
for i in xrange(9):
dfs(i, board, [], final_res)
return final_res
print len(unlockPattern(board)) | {
"repo_name": "quake0day/oj",
"path": "androidunlock.py",
"copies": "1",
"size": "1555",
"license": "mit",
"hash": 905474984499131300,
"line_mean": 32.8260869565,
"line_max": 93,
"alpha_frac": 0.4308681672,
"autogenerated": false,
"ratio": 3.7290167865707433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4659884953770743,
"avg_score": null,
"num_lines": null
} |
# 014 - Longest Common Prefix (Easy)
# https://leetcode.com/problems/longest-common-prefix/
class PrefixTree(object):
def __init__(self, string=""):
# Initialize empty dict of children nodes.
self.children = {}
self.terminal = False
def insert(self, string):
char = string[0]
# With the key of the first character of the string, create a new
# prefix tree that starts with the rest of the string, or if it's
# a path that already exists, just insert into it.
if char not in self.children:
self.children[char] = PrefixTree()
if len(string[1:]) > 0:
# Insert from the next position onwards.
self.children[char].insert(string[1:])
else:
# If the string has finished being consumed, mark this node as a
# terminal node.
self.terminal = True
# Recursively go down the prefix tree as long as there's only one path
# to go (meaning it's a shared prefix) and no string has been fully
# consumed.
def traverse(self, acum=""):
if len(self.children) == 1:
key = list(self.children.keys())[0]
child = self.children[key]
# A string finished here, so we can't find longer prefixes.
if self.terminal == True:
return acum + key
return child.traverse(acum + key)
else:
return acum
class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
base_tree = PrefixTree()
for string in strs:
# Input strings can have length 0. If any string is empty then we
# can't find any common prefix at all.
if len(string) > 0:
base_tree.insert(string)
else:
return ""
return base_tree.traverse()
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_014.py",
"copies": "1",
"size": "1939",
"license": "mit",
"hash": 3384986309768517600,
"line_mean": 33.0175438596,
"line_max": 77,
"alpha_frac": 0.5667870036,
"autogenerated": false,
"ratio": 4.270925110132159,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5337712113732158,
"avg_score": null,
"num_lines": null
} |
"""01 create table
Revision ID: 9b78889aa13e
Revises:
Create Date: 2016-01-20 15:12:50.987583
"""
# revision identifiers, used by Alembic.
revision = '9b78889aa13e'
down_revision = None
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('orders',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', sa.Integer(), nullable=True),
sa.Column('id', sa.String(length=64), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('order_detail',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('deleted_at', sa.DateTime(), nullable=True),
sa.Column('deleted', sa.Integer(), nullable=True),
sa.Column('id', sa.String(length=64), nullable=False),
sa.Column('order_id', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['order_id'], ['orders.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('order_detail')
op.drop_table('orders')
### end Alembic commands ###
| {
"repo_name": "swaygently/clapton",
"path": "clapton/db/alembic/versions/9b78889aa13e_01_create_table.py",
"copies": "1",
"size": "1420",
"license": "apache-2.0",
"hash": 6846571144626326000,
"line_mean": 29.8695652174,
"line_max": 63,
"alpha_frac": 0.6647887324,
"autogenerated": false,
"ratio": 3.4466019417475726,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46113906741475724,
"avg_score": null,
"num_lines": null
} |
# 01HandParse.py vers 000
#------------------------
# Eric Shinn
#------------------------
# Will accept a hand-type and return a specific hand that's useable by
# subsequent scripts and routines.
#------------------------
import copy, random, secrets, time
deck0 = ['AS','2S','3S','4S','5S','6S','7S','8S','9S','TS','JS','QS','KS','AH'
,'2H','3H','4H','5H','6H','7H','8H','9H','TH','JH','QH','KH','AC','2C','3C'
,'4C','5C','6C','7C','8C','9C','TC','JC','QC','KC','AD','2D','3D','4D','5D'
,'6D','7D','8D','9D','TD','JD','QD','KD']
suits = ['s','h','c','d']
secretSpeed = 0
randomSpeed = 0
def acceptHand(hand):
# Accepts a 'hand' as a string. The first two characters denote the value of
# card. The third character indicates suitedness. If a lowercase 's' or 'o'
# are used, they denote suited and off-suited, respectively. If an uppercase
# 'S', 'H', 'C', or 'D' are used, they will denote spade, heart, club, and
# diamond, respectively.
# Pocket pairs
if(hand[0]==hand[1]):
# Choose suits
# We considered using secrets.choice() but it is about three times slower
# than random.choice(). | 4.66e-06 vs. 1.18e-06 over 10^6 samples.
suit0 = random.choice(suits)
suit1 = random.choice(suits)
while(suit0==suit1):
suit1 = random.choice(suits)
print(suit0,suit1)
# Suited hands
# Non-suited hands
acceptHand('AA')
| {
"repo_name": "musandsmrts/poker",
"path": "01HandParse.py",
"copies": "1",
"size": "1379",
"license": "mit",
"hash": 7323624697976335000,
"line_mean": 33.475,
"line_max": 78,
"alpha_frac": 0.583031182,
"autogenerated": false,
"ratio": 2.7690763052208833,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8617096616935764,
"avg_score": 0.047002174057023685,
"num_lines": 40
} |
# 0-1 Knapsack backtracking, recursive
# By James Lao
import time
import sys
import threading
def knapsack(vw, limit):
def bound(v, w, j):
if j >= len(vw) or w > limit:
return -1
else:
while j < len(vw) and w + vw[j][1] <= limit:
v, w, j = v + vw[j][0], w + vw[j][1], j + 1
if j < len(vw):
v += (limit - w) * vw[j][0] / (vw[j][1] * 1.0)
return v
def traverse(v, w, j):
nonlocal maxValue
# print(v,w,j, bound(v,w,j))
if bound(v, w, j) >= maxValue: # promising w/ j
if w + vw[j][1] <= limit:
maxValue = max(maxValue, v + vw[j][0])
traverse(v + vw[j][0], w + vw[j][1], j + 1)
if j < len(vw) - 1: # promising w/o j
traverse(v, w, j + 1)
return
maxValue = 0
traverse(0, 0, 0)
return maxValue
def main():
with open(sys.argv[1] if len(sys.argv) > 1 else sys.exit(1)) as f:
limit, n = map(int, f.readline().split())
vw = [] # value, weight, value density
for ln in f.readlines():
vl, wl = tuple(map(int, ln.split()))
vw.append([vl, wl, vl / (wl * 1.0)])
start = time.time()
A = knapsack(sorted(vw, key=lambda x: x[2], reverse=True), limit)
end = time.time()
print(A)
print(end - start)
if __name__ == "__main__":
threading.stack_size(67108864) # 64MB stack
sys.setrecursionlimit(20000)
thread = threading.Thread(target=main)
thread.start()
| {
"repo_name": "jameslao/Knapsack-in-Python",
"path": "knapsack_bt_recursive.py",
"copies": "2",
"size": "1572",
"license": "mit",
"hash": -1363736623440270300,
"line_mean": 27.0714285714,
"line_max": 70,
"alpha_frac": 0.4866412214,
"autogenerated": false,
"ratio": 2.949343339587242,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4435984560987242,
"avg_score": null,
"num_lines": null
} |
# 0-1 Knapsack Branch and Bound with Heapq
# By James Lao.
from heapq import *
import time
import sys
def knapsack(vw, limit):
def bound(v, w, j):
if j >= len(vw) or w > limit:
return -1
else:
while j < len(vw) and w + vw[j][1] <= limit:
v, w, j = v + vw[j][0], w + vw[j][1], j + 1
if j < len(vw):
v += (limit - w) * vw[j][0] / (vw[j][1] * 1.0)
return v
maxValue = 0
PQ = [[-bound(0, 0, 0), 0, 0, 0]] # -bound to keep maxheap
counter = 0
while PQ:
counter += 1
b, v, w, j = heappop(PQ)
if b <= -maxValue: # promising w/ j
if w + vw[j][1] <= limit:
maxValue = max(maxValue, v + vw[j][0])
heappush(PQ, [-bound(v + vw[j][0], w + vw[j][1],
j + 1), v + vw[j][0], w + vw[j][1], j + 1])
if j < len(vw) - 1:
heappush(PQ, [-bound(v, w, j + 1), v, w, j + 1])
print("Total nodes:", counter)
return maxValue
if __name__ == "__main__":
with open(sys.argv[1] if len(sys.argv) > 1 else sys.exit(1)) as f:
limit, n = map(int, f.readline().split())
vw = [] # value, weight, value density
for ln in f.readlines():
vl, wl = tuple(map(int, ln.split()))
vw.append([vl, wl, vl / (wl * 1.0)])
start = time.time()
A = knapsack(sorted(vw, key=lambda x: x[2], reverse=True), limit)
end = time.time()
print(A)
print(end - start)
| {
"repo_name": "jameslao/Algorithmic-Pearls",
"path": "0-1-Knapsack/knapsack_bnb_pq.py",
"copies": "2",
"size": "1557",
"license": "mit",
"hash": 6769685474102678000,
"line_mean": 31.4375,
"line_max": 80,
"alpha_frac": 0.4463712267,
"autogenerated": false,
"ratio": 2.846435100548446,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4292806327248446,
"avg_score": null,
"num_lines": null
} |
"""0/1 Knapsack Problem (0/1 means that items cannot be divided)
Given a bag which can only hold a weight W and a list of items,
each one with a weight Wi and a price Pi, which items should be
put on the bag to maximize the total value of the bag?
Example:
Input:
W = 4
i1 = (W1 = 2, P1 = 1)
i2 = (W2 = 1, P2 = 2)
i3 = (W3 = 3, P3 = 3)
i4 = (W4 = 2, P4 = 3)
Solutions:
i2, i4 => (W = 3, P = 5)
i2, i3 => (W = 4, P = 5)
"""
from collections import namedtuple
def knapsack(max_weight, items):
"""
0 1 2 3 ... w
0 no item
1 item 1
2 item 2
...
n item n
"""
n = len(items) + 1
m = max_weight + 1
dp = [[0] * m for _ in range(n)]
for i_index in range(1, n):
current_item = items[i_index - 1]
for current_weight in range(1, m):
if current_item.w <= current_weight:
dp[i_index][current_weight] = max(
current_item.v + dp[i_index - 1][current_weight - current_item.w],
dp[i_index - 1][current_weight]
)
else:
dp[i_index][current_weight] = dp[i_index][current_weight - 1]
return dp[n - 1][m - 1]
Item = namedtuple("Item", ["w", "v"]) # w = weight, v = value
if __name__ == "__main__":
max_weight = 7
items = [
Item(1, 1),
Item(3, 4),
Item(4, 5),
Item(5, 7)
]
max_value = knapsack(max_weight, items)
print("Max value = ", max_value)
assert max_value == 9
| {
"repo_name": "rcanepa/cs-fundamentals",
"path": "python/dynamic_programming/knapsack.py",
"copies": "1",
"size": "1599",
"license": "mit",
"hash": -8186323048662680000,
"line_mean": 23.2272727273,
"line_max": 86,
"alpha_frac": 0.4828017511,
"autogenerated": false,
"ratio": 3.069097888675624,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40518996397756235,
"avg_score": null,
"num_lines": null
} |
"""0-1 Knapsack Problem
Given weights and values of n "non-splittable" items, put these items in a
knapsack of capacity to get the maximum total value in the knapsack.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def _knapsack01_recur(val, wt, wt_cap, n):
"""0-1 Knapsack Problem by naive recursion.
Time complexity: O(2^n), where n is the number of items.
Space complexity: O(n).
"""
if n < 0 or wt_cap == 0:
return 0
if wt[n] > wt_cap:
# Cannot be put.
max_val = _knapsack01_recur(val, wt, wt_cap, n - 1)
else:
# Can be put: to put or not to put.
val_in = val[n] + _knapsack01_recur(val, wt, wt_cap - wt[n], n - 1)
val_ex = _knapsack01_recur(val, wt, wt_cap, n - 1)
max_val = max(val_in, val_ex)
return max_val
def knapsack01_recur(val, wt, wt_cap):
"""0-1 Knapsack Problem by naive recursion.
Time complexity: O(2^n), where n is the number of items.
Space complexity: O(n).
"""
n = len(wt) - 1
return _knapsack01_recur(val, wt, wt_cap, n)
def _knapsack01_memo(val, wt, wt_cap, T, n):
if n < 0 or wt_cap == 0:
return 0
if T[n][wt_cap]:
return T[n][wt_cap]
if wt[n] > wt_cap:
# Cannot be put.
max_val = _knapsack01_memo(val, wt, wt_cap, T, n - 1)
else:
# Can be put: to put or not to put.
val_in = val[n] + _knapsack01_memo(val, wt, wt_cap - wt[n], T, n - 1)
val_ex = _knapsack01_memo(val, wt, wt_cap, T, n - 1)
max_val = max(val_in, val_ex)
T[n][wt_cap] = max_val
return max_val
def knapsack01_memo(val, wt, wt_cap):
"""0-1 Knapsack Problem by top-down dynamic programming w/ memoization.
Time complexity: O(nC), where
- n is the number of items, and
- C is the weight capacity.
Space complexity: O(nC).
"""
n = len(wt) - 1
# Create tabular T of (n+1)x(wt_cap+1).
T = [[None] * (wt_cap + 1) for i in range(n + 1)]
# For empty cap, no value can be added.
for i in range(n + 1):
T[i][0] = 0
return _knapsack01_memo(val, wt, wt_cap, T, n)
def knapsack_dp(val, wt, wt_cap):
"""0-1 Knapsack Problem by bottom-up dynamic programming.
Time complexity: O(nC), where
- n is the number of items, and
- C is the weight capacity.
Space complexity: O(nC).
"""
n = len(wt)
# Create tabular T of n x (wt_cap+1).
T = [[None] * (wt_cap + 1) for i in range(n)]
# For empty cap, no value can be added.
for i in range(n):
T[i][0] = 0
# For 1s item only.
for j in range(1, wt_cap + 1):
if wt[0] <= j:
T[0][j] = val[0]
else:
T[0][j] = 0
for i in range(1, n):
for j in range(1, wt_cap + 1):
if wt[i] <= j:
# Can be put: to put or not to put.
T[i][j] = max(val[i] + T[i - 1][j - wt[i]], T[i - 1][j])
else:
# Cannot be put.
T[i][j] = T[i-1][j]
return T
def item_list(T, wt, wt_cap):
n = len(wt)
items = [0] * n
w = wt_cap
for i in range(n - 1, -1, -1):
if i >= 1 and T[i][w] > T[i - 1][w]:
# Item i, i >= 1, is put.
items[i] = 1
w -= wt[i]
elif i == 0 and T[i][w] != 0:
# Item 0 is put.
items[i] = 1
return items
def main():
import time
val = [6, 3, 5, 4, 6]
wt = [2, 5, 4, 2, 3]
wt_cap = 10
# Output: 17
start_time = time.time()
print('By recur: {}'.format(knapsack01_recur(val, wt, wt_cap)))
print('Time: {}'.format(time.time() - start_time))
start_time = time.time()
print('By memo: {}'.format(knapsack01_memo(val, wt, wt_cap)))
print('Time by memo: {}'.format(time.time() - start_time))
start_time = time.time()
T = knapsack_dp(val, wt, wt_cap)
print('By DP: {}'.format(T[-1][-1]))
print('Time: {}'.format(time.time() - start_time))
print('Items: {}'.format(item_list(T, wt, wt_cap)))
if __name__ == '__main__':
main()
| {
"repo_name": "bowen0701/algorithms_data_structures",
"path": "alg_knapsack01.py",
"copies": "1",
"size": "4159",
"license": "bsd-2-clause",
"hash": 7049605746672697000,
"line_mean": 25.3227848101,
"line_max": 77,
"alpha_frac": 0.5210387112,
"autogenerated": false,
"ratio": 2.79502688172043,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.381606559292043,
"avg_score": null,
"num_lines": null
} |
# 01 - Saving The Universe Again
t = int(input())
def laser_strength(instructions):
strength = 1
energy = 0
for i in range(0, len(instructions)):
char = instructions[i]
if (char == "S"):
energy += strength
elif (char == "C"):
strength *= 2
return energy
for case in range(1, t + 1):
# Each line contains the shield energy and laser firing sequence.
energy, instructions = input().strip().split(" ")
energy = int(energy)
swaps = 0
swapped = False
success = False
num = len(instructions)
while True:
# Stop swapping once the laser energy has been reduced enough to
# not destroy the shield.
if (laser_strength(instructions) <= energy):
success = True
break
swapped = False
# Iterate from the end of the string finding a position to swap,
# this "strategy" makes it so we focus on the right-most elements,
# as they are going to have the highest energy.
for i in range(num - 1, 0, -1):
pre, pos = instructions[i - 1], instructions[i]
# In the case of a C(harge) preceding a S(hoot), swap them!
if (pre == "C" and pos == "S"):
ins_array = bytearray(instructions, 'utf8')
ins_array[i - 1] = ord('S')
ins_array[i] = ord('C')
instructions = ins_array.decode('utf8')
swapped = True
swaps += 1
break
# If nothing was swapped, it means its no longer possible to swap
# anything, and if it hasn't exited yet it means its impossible
# to do swaps to prevent the shield being destroyed.
if (not swapped):
success = False
break
result = "Case #%d: " % (case)
result += str(swaps) if (success) else "IMPOSSIBLE"
print(result)
| {
"repo_name": "Zubieta/CPP",
"path": "Google_Code_Jam/2018/Qualifier_Round/Saving_The_Universe_Again.py",
"copies": "3",
"size": "1925",
"license": "mit",
"hash": -7416631262157481000,
"line_mean": 34.6481481481,
"line_max": 74,
"alpha_frac": 0.5527272727,
"autogenerated": false,
"ratio": 4.069767441860465,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006397135656770546,
"num_lines": 54
} |
# 01 - Vestigium
# Vestigium means "trace" in Latin. In this problem we work with Latin squares
# and matrix traces. The trace of a square matrix is the sum of the values on
# the main diagonal (which runs from the upper left to the lower right).
# An N-by-N square matrix is a Latin Square if each cell contains one of N
# different values, and no value is repeated within a row or a column. In this
# problem we'll deal only with "natural Latin Squares", in which the N values
# are the integers between 1 and N.
# Given a matrix that contains only integers between 1 and N, we want to
# compute its trace and check whether it is a natural Latin square. To give
# some additional information, instead of simply telling us whether the matrix
# is a natural Latin Square or not, please compute the number of rows and the
# number of columns that contain repeated values.
from collections import Counter
num_cases = int(input())
for t in range(1, num_cases + 1):
# The dimension of the matrix.
n = int(input())
# Now read the matrix.
matrix = []
for i in range(n):
row = list(map(lambda x: int(x), input().split(" ")))
matrix.append(row)
# 1. Compute the trace.
trace = 0
for i in range(n):
trace += matrix[i][i]
# 2. Compute how many rows and how many columns have repeated values.
# Build a set of values for each row and column, the set should have N
# values, anything lower than that means the row/column has duplicates.
rows_w_dups = 0
for i in range(n):
row = set(matrix[i])
if (len(row) < n):
rows_w_dups += 1
cols_w_dups = 0
for i in range(n):
col = set([matrix[j][i] for j in range(n)])
if (len(col) < n):
cols_w_dups += 1
print("Case #{}: {} {} {}".format(t, trace, rows_w_dups, cols_w_dups))
| {
"repo_name": "Zubieta/CPP",
"path": "Google_Code_Jam/2020/Qualifier_Round/01_Vestigium.py",
"copies": "2",
"size": "1832",
"license": "mit",
"hash": -1416390280962659300,
"line_mean": 35.64,
"line_max": 78,
"alpha_frac": 0.663209607,
"autogenerated": false,
"ratio": 3.502868068833652,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5166077675833652,
"avg_score": null,
"num_lines": null
} |
#02-03a.py
#------------------------------------------------------------
# first compute and plot the outlier fraction as a function
# of max_depth
max_depth_array = np.arange(1, 21)
train_error = np.zeros(len(max_depth_array))
cv_error = np.zeros(len(max_depth_array))
for i, max_depth in enumerate(max_depth_array):
# print progress update
print '%i / %i' % (max_depth, max_depth_array[-1])
clf = DecisionTreeRegressor(max_depth=max_depth)
clf.fit(X_train, y_train)
y_train_pred = clf.predict(X_train)
y_cv_pred = clf.predict(X_cv)
train_error[i] = compute_outlier_fraction(y_train_pred, y_train)
cv_error[i] = compute_outlier_fraction(y_cv_pred, y_cv)
pl.figure()
pl.plot(max_depth_array, cv_error, label='cross-val error')
pl.plot(max_depth_array, train_error, label='training error')
pl.legend(loc=0)
pl.xlabel('max depth')
pl.ylabel('error')
# select the value of max_depth which led to the best results
max_depth = max_depth_array[np.argmin(cv_error)]
print "max_depth = %i" % max_depth
| {
"repo_name": "vtesin/sklearn_tutorial",
"path": "doc/notebooks/soln/02-03a.py",
"copies": "4",
"size": "1034",
"license": "bsd-3-clause",
"hash": -6348206193150569000,
"line_mean": 30.3333333333,
"line_max": 68,
"alpha_frac": 0.6528046422,
"autogenerated": false,
"ratio": 2.92090395480226,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003243579489913801,
"num_lines": 33
} |
#02-03b.py
#------------------------------------------------------------
# compute and plot the outlier fraction
# as a function of number of samples
n_samples_array = np.linspace(50, Ntrain, 20).astype(int)
train_error_2 = np.zeros(n_samples_array.shape)
cv_error_2 = np.zeros(n_samples_array.shape)
for i, n_samples in enumerate(n_samples_array):
# print progress update
print ' %i / %i' % (n_samples, Ntrain)
clf = DecisionTreeRegressor(max_depth=max_depth)
clf.fit(X_train[:n_samples], y_train[:n_samples])
y_train_pred = clf.predict(X_train[:n_samples])
y_cv_pred = clf.predict(X_cv)
train_error_2[i] = compute_outlier_fraction(y_train_pred,
y_train[:n_samples])
cv_error_2[i] = compute_outlier_fraction(y_cv_pred, y_cv)
pl.figure()
pl.plot(n_samples_array, cv_error_2, label='cross-val error')
pl.plot(n_samples_array, train_error_2, label='training error')
pl.legend(loc=0)
pl.xlabel('number of samples')
pl.ylabel('error')
pl.title('max_depth = %s' % max_depth)
| {
"repo_name": "rain1024/sklearn_tutorial",
"path": "doc/notebooks/soln/02-03b.py",
"copies": "4",
"size": "1053",
"license": "bsd-3-clause",
"hash": -2466935906201194500,
"line_mean": 31.90625,
"line_max": 63,
"alpha_frac": 0.6210826211,
"autogenerated": false,
"ratio": 2.9661971830985916,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5587279804198592,
"avg_score": null,
"num_lines": null
} |
# 02/06/2017
import itertools
from collections import Counter
class Card(object):
def __init__(self, value, suit):
self.value = value
self.suit = suit
def rank(self):
return "234567890JQKA".index(self.value)
class Hand(object):
def __init__(self, cards):
assert len(cards) == 5
self.cards = cards
self.get_type()
self.sort_cards()
def get_suits(self):
return [card.suit for card in self.cards]
def get_values(self):
return [card.value for card in self.cards]
def get_type(self):
if len(set(self.get_suits())) == 1 and self.have_consecs():
self.name = "Straight Flush"
self.tier = 9
elif max(Counter(self.get_values()).values()) == 4:
self.name = "Four of a Kind"
self.tier = 8
elif set(Counter(self.get_values()).values()) == {3, 2}:
self.name = "Full House"
self.tier = 7
elif len(set(self.get_suits())) == 1:
self.name = "Flush"
self.tier = 6
elif self.have_consecs():
self.name = "Straight"
self.tier = 5
elif set(Counter(self.get_values()).values()) == {3, 1}:
self.name = "Three of a Kind"
self.tier = 4
elif list(Counter(self.get_values()).values()).count(2) == 2:
self.name = "Two Pairs"
self.tier = 3
elif len(set(self.get_values())) == 4:
self.name = "Pair"
self.tier = 2
else:
self.name = "Highest Card"
self.tier = 1
def sort_cards(self):
if self.name in ["Straight Flush", "Straight"]:
self.cards.sort(key=Card.rank, reverse=True)
if 'A' in self.get_values() and '2' in self.get_values():
self.cards = self.cards[1:] + [self.cards[0]]
elif self.name in ["Four of a Kind", "Full House", "Three of a Kind", "Pair"]:
x_of_this = Counter(self.get_values()).most_common(1)[0][0]
tmp = [card for card in self.cards if card.value == x_of_this]
self.cards = tmp + sorted([card for card in self.cards if card.value != x_of_this],
key=Card.rank, reverse=True)
elif self.name in ["Flush", "Highest Card"]:
self.cards.sort(key=Card.rank, reverse=True)
elif self.name == "Two Pairs":
pairs = [v for v, _ in Counter(self.get_values()).most_common(2)]
tmp = sorted([card for card in self.cards if card.value in pairs], key=Card.rank, reverse=True)
self.cards = tmp + [card for card in self.cards if card.value not in pairs]
def have_consecs(self):
value_list = "A234567890JQKA"
possibles = []
for i in range(1+len(value_list)-5):
possibles.append(value_list[i:i+5])
sorted_values = sorted(self.get_values(), key=lambda x: "234567890JQKA".index(x))
if 'A' in self.get_values() and '2' in self.get_values():
sorted_values = [sorted_values[-1]] + sorted_values[:-1]
return ''.join(sorted_values) in possibles
def __eq__(self, other):
if self.tier == other.tier:
for card_s, card_o in zip(self.cards, other.cards):
if card_s.rank() != card_o.rank():
return False
return True
return False
def __lt__(self, other):
if self.tier < other.tier:
return True
elif self.tier == other.tier:
for card_s, card_o in zip(self.cards, other.cards):
if card_s.rank() < card_o.rank():
return True
elif card_s.rank() > card_o.rank():
return False
return False
def get_available_cards(flop, hands):
deck = [Card(v, s) for v in "234567890JQKA" for s in "CSDH"
if not any(c.suit == s and c.value == v for c in flop + hands)]
return deck
def parse_cards(string):
hand = [Card(v, s) for v, s in zip(string[::2], string[1::2])]
return hand
def get_best_hand(cards):
best_hand = None
for hand in itertools.combinations(cards, 5):
this_hand = Hand(list(hand))
if best_hand is None or this_hand > best_hand:
best_hand = this_hand
return best_hand
if __name__ == "__main__":
#flop = parse_cards(input("Flop cards: "))
#player_cards = []
#for i in range(4):
# player_cards.append(parse_cards(input("Player {} cards: ".format(i+1))))
flop = parse_cards("3D5C9C")
player_cards = []
player_cards.append(parse_cards("3C7H"))
player_cards.append(parse_cards("AS0S"))
player_cards.append(parse_cards("9S2D"))
player_cards.append(parse_cards("KCJC"))
remaining = get_available_cards(flop, [item for sublist in player_cards for item in sublist])
player_wins = {'1': 0, '2': 0, '3': 0, '4': 0}
totals = 0
for turn in remaining:
for river in set(remaining) - set([turn]):
player_hands = {}
for i in range(4):
table_cards = flop + [turn] + [river]
player_hands[str(i)] = get_best_hand(player_cards[i] + table_cards)
winner = max(player_hands, key=player_hands.get)
if any([player_hands[x] == player_hands[winner] for x in player_hands if x != winner]):
totals += 1
else:
winner = str(int(winner) + 1)
player_wins[winner] += 1
totals += 1
for i in "1234":
print("{}: {:.1f}%".format(i, player_wins[i]/totals * 100)) | {
"repo_name": "tlseabra/dailyprogrammer",
"path": "Python/hard/h317.py",
"copies": "2",
"size": "5674",
"license": "mit",
"hash": 8808928666491495000,
"line_mean": 35.3782051282,
"line_max": 107,
"alpha_frac": 0.5384208671,
"autogenerated": false,
"ratio": 3.416014449127032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.992093614884459,
"avg_score": 0.006699833476488517,
"num_lines": 156
} |
# 020 - Valid Parentheses (Easy)
# https://leetcode.com/problems/valid-parentheses/
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
# A stack for symbols.
stack = []
for idx in range(len(s)):
symbol = s[idx]
# If it's an opening symbol, append it into the stack.
if symbol in ["(", "[", "{"]:
stack.append(symbol)
# If it's anything else...
else:
# If there's nothing in the stack, definitely a bad string.
if len(stack) == 0:
return False
last_symbol = stack.pop()
# If there's something in the stack, and the current symbol
# is not what's expected to close that...
cond_1 = last_symbol == "(" and symbol != ")"
cond_2 = last_symbol == "[" and symbol != "]"
cond_3 = last_symbol == "{" and symbol != "}"
# ... for any of the possible pairs, then it's an invalid string.
if any([cond_1, cond_2, cond_3]):
return False
# If the string was so far "valid" but not enough symbols closed it.
if len(stack):
return False
# If the string was fully consumed, or was empty to begin with.
return True
# obj = Solution()
# print(obj.isValid("()"))
# print(obj.isValid("()[]{}"))
# print(obj.isValid("(]"))
# print(obj.isValid("([)]"))
# print(obj.isValid("{[]}"))
# print(obj.isValid("{"))
# print(obj.isValid("}"))
# print(obj.isValid(""))
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_020.py",
"copies": "1",
"size": "1656",
"license": "mit",
"hash": 1810337038955149800,
"line_mean": 33.5,
"line_max": 81,
"alpha_frac": 0.4951690821,
"autogenerated": false,
"ratio": 4.068796068796069,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5063965150896069,
"avg_score": null,
"num_lines": null
} |
# 021 - Merge Two Sorted Lists (Easy)
# https://leetcode.com/problems/merge-two-sorted-lists/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
# Merge two lists in place. Take a base list to splice
# the other list into it.
# l1 will be the base list to splice nodes into.
# l2 will be the list we'll remove nodes from.
# Store the reference to the start of l1.
res = l1
# Edge case, when any starts empty, return the other..
if l1 is None:
return l2
if l2 is None:
return l1
# While there's still a l2 to remove stuff from...
while l2 is not None:
# Let's assume the lower value is in l1, doesn't really matter
# if we switch this.
cur_l1, cur_l2 = sorted([l1.val, l2.val])
l1.val, l2.val = cur_l1, cur_l2
# Assume the next l1 value is really large, unless there's actually
# a next value.
nex_l1 = 2<<31
if l1.next is not None:
nex_l1 = l1.next.val
# If the current value of the node of l2 is in-between
# the current value of the node of l1 and the next value
# of l1, then splice the current l2 node!
if cur_l1 <= cur_l2 and cur_l2 <= nex_l1:
l2_temp = l2.next
l2.next = l1.next
l1.next = l2
l2 = l2_temp
else:
l1 = l1.next
return res
# Recursive solution:
# O(m + n) time, as well as space because of the stack frames created.
# class Solution:
# def mergeTwoLists(self, l1, l2):
# if l1 is None:
# return l2
# elif l2 is None:
# return l1
# elif l1.val < l2.val:
# l1.next = self.mergeTwoLists(l1.next, l2)
# return l1
# else:
# l2.next = self.mergeTwoLists(l1, l2.next)
# return l2 | {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_021.py",
"copies": "1",
"size": "2263",
"license": "mit",
"hash": 8331326784094632000,
"line_mean": 31.3428571429,
"line_max": 79,
"alpha_frac": 0.5240830756,
"autogenerated": false,
"ratio": 3.4761904761904763,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4500273551790476,
"avg_score": null,
"num_lines": null
} |
# 026 - Remove Duplicates From Sorted Array (Easy)
# https://leetcode.com/problems/remove-duplicates-from-sorted-array/submissions/
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Deduplicate elements in a sorted list, in place. This is done by
# copying values to earlier in the list, the remainder at the end won't
# matter because we'll return the index at which the array should "end".
# An index to keep track of the last moved unique number.
unique_idx = 0
last_uniq_number = -2<<31
for idx in range(len(nums)):
number = nums[idx]
# If we stopped going over duplicated numbers...
if number != last_uniq_number:
# Copy the current number to the latest unique position...
nums[unique_idx] = number
# And update the last unique number.
last_uniq_number = number
unique_idx += 1
return unique_idx
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_026.py",
"copies": "1",
"size": "1073",
"license": "mit",
"hash": -145369881276076200,
"line_mean": 37.3214285714,
"line_max": 80,
"alpha_frac": 0.5908667288,
"autogenerated": false,
"ratio": 4.433884297520661,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5524751026320661,
"avg_score": null,
"num_lines": null
} |
# 028 - Implement strStr() (Easy)
# https://leetcode.com/problems/implement-strstr/
class Solution(object):
def strStr(self, haystack, needle):
"""
:type haystack: str
:type needle: str
:rtype: int
"""
if needle == "":
return 0
# Build the initial prefix-suffix table with the length of the needle.
lps = [0 for _ in range(len(needle))]
# We'll always start comparing with position 0.
idx_needle_j = 0
# ... but start from position 1, by default position 0 will have value 0.s
# This loop takes O(len(needle)).
for idx_needle_i in range(1, len(needle)):
char_i = needle[idx_needle_i]
char_j = needle[idx_needle_j]
if char_i == char_j:
# Store the index of the matching character + 1, and advance the initial index.
lps[idx_needle_i] = idx_needle_j + 1
idx_needle_j += 1
else:
# Move j to the index of the value of the previous character, until it either
# matches or goes back all the way to the start.
while idx_needle_j != 0:
idx_needle_j = lps[idx_needle_j - 1]
# And do the same previous operations at new position.
char_i = needle[idx_needle_i]
char_j = needle[idx_needle_j]
if char_i == char_j:
lps[idx_needle_i] = idx_needle_j + 1
idx_needle_j += 1
break
idx_needle = 0
idx_haystack = 0
# This loop takes O(len(haystack)).
while idx_haystack < len(haystack):
char_haystack = haystack[idx_haystack]
char_needle = needle[idx_needle]
if char_haystack == char_needle:
idx_needle += 1
else:
# Look in the prefix-suffix table how further back we want to go,
# because it is redundant to check for existing prefix-suffixes.
if idx_needle > 0:
idx_needle = lps[idx_needle - 1]
# Step back the haystack index so that the next iteration we compare
# again with the current character with the character at the new
# needle index.
idx_haystack -= 1
if idx_needle == len(needle):
return idx_haystack - idx_needle + 1
# Keep advancing the index of the haystack.
idx_haystack += 1
# Meaning no index was found.
return -1
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_028.py",
"copies": "1",
"size": "2677",
"license": "mit",
"hash": 5631125527481553000,
"line_mean": 38.9552238806,
"line_max": 95,
"alpha_frac": 0.5136346657,
"autogenerated": false,
"ratio": 3.965925925925926,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9965250462497321,
"avg_score": 0.002862025825720902,
"num_lines": 67
} |
# 02 - Nesting Depth
# tl;dr Given a string of digits S, insert a minimum number of opening and
# closing parentheses into it such that the resulting string is balanced and
# each digit d is inside exactly d pairs of matching parentheses.
num_cases = int(input())
for t in range(1, num_cases + 1):
numbers = input()
opens = 0
to_close = 0
result = ""
for i in range(len(numbers)):
# Iterate over each digit...
digit = int(numbers[i])
# Now we need to add opens as the difference between this digit value
# and current amout of opens.
to_open = digit - opens
# If this value is negative, it means we need to actually add closes.
if (to_open > 0):
# Add the opens to the result string and to the count of opens.
result += to_open * "("
opens += to_open
else:
# Add closes to the return string and substract that from count
# of opens.
result += abs(to_open) * ")"
opens -= abs(to_open)
result += str(digit)
# At the end of processing if there were still x opens, just add x closes.
if (opens > 0):
result += opens * ")"
print("Case #{}: {}".format(t, result))
| {
"repo_name": "zubie7a/Algorithms",
"path": "Google_Code_Jam/2020/Qualifier_Round/02_Nesting_Depth.py",
"copies": "2",
"size": "1232",
"license": "mit",
"hash": -3387141469388816000,
"line_mean": 33.2222222222,
"line_max": 78,
"alpha_frac": 0.6006493506,
"autogenerated": false,
"ratio": 3.85,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.54506493506,
"avg_score": null,
"num_lines": null
} |
#02_reaction_game.py
# Written for Simon Monk's Electronics Starter Kit for the Raspberry Pi by Henry Budden (@pi_tutor)
#Thanks to Ben Nuttall from the Raspberry Pi Foundation for the GPIO Zero library
#Import relevant libraries
from gpiozero import *
from time import sleep
import random
#Set pin numbers
led_1 = LED(22)
led_2 = LED(4)
rgb = RGBLED(red=14, green=17, blue=27)
button_1 = Button(24)
button_2 = Button(2)
#Input player names
player_1 = raw_input("Enter the name of Player 1: ")
player_2 = raw_input("Enter the name of Player 2: ")
#Initialise score variables
player_1_score = 0
player_2_score = 0
#Initialise round number variable
round_num = 0
#Start the game
while True:
trick = False #Set trick mode
round_num = round_num +1 #Increment round number by one for each iteration
wait = 0
time = random.uniform(0, 10) #Randomly generate time delay between 0 and 10 seconds
color_2 = random.uniform(0, 1) #Randomly generate green colour value (on or off)
color_3 = random.uniform(0, 1) #Randomly generate blue colour value (on or off)
fix = random.randint(0,3) #Randomly generate number for trick question between 0 and 3 inclusive
if color_2 == 0 and color_3 == 0: #If all colours are off by chance
color_2 = 1 #Set green pixel to on
print " " #Print blank line for better formatting
print "Round", str(round_num) + ":" #Print round number
print "Get Ready!"
sleep(time) #Wait for random amount of time
if fix == 3: #Approx 25% of the time
trick = True #Set trick mode
rgb.color = (1, 0, 0) #Make RGB LED Red
else:
rgb.color = (0, color_2, color_3) #Turn on RGB LED to random colour
while True:
if button_1.is_pressed and trick == False: #If player 1 button is pressed
print player_1, "wins!" #Display winning message
player_1_score = player_1_score + 1 #Increment score by one
rgb.color = (0,0,0) #Turn LED off
print player_1, ":", player_1_score #Display scores
print player_2, ":", player_2_score
wait = 0 #Reset wait value
break
elif button_1.is_pressed and trick == True:
print player_1, "Oh dear!" #Display winning message
player_1_score = player_1_score - 1 #Increment score by one
rgb.color = (0,0,0) #Turn LED off
print player_1, ":", player_1_score #Display scores
print player_2, ":", player_2_score
wait = 0
break
if button_2.is_pressed and trick == False:
print player_2, "wins!"
player_2_score = player_2_score + 1
rgb.color = (0,0,0)
print player_1, ":", player_1_score
print player_2, ":", player_2_score
wait = 0
break
elif button_2.is_pressed and trick == True:
print player_2, "Oh dear!"
player_2_score = player_2_score - 1
rgb.color = (0,0,0)
print player_1, ":", player_1_score
print player_2, ":", player_2_score
wait = 0
break
if trick == True and wait == 100000: #If on trick question time out after approx 5 seconds
print "Well Done"
rgb.color = (0,0,0)
print player_1, ":", player_1_score
print player_2, ":", player_2_score
wait = 0
break
wait = wait + 1 #Increment wait value by one
if player_1_score > player_2_score: #If player one is winning overall
led_1.on() #Turn on player one LED and turn player two LED off
led_2.off()
elif player_2_score > player_1_score:
led_2.on()
led_1.off()
else: #If they are drawing
led_1.on() #Turn both LEDs on
led_2.on()
| {
"repo_name": "henrybudden/rpesk-advanced",
"path": "02_reaction_game.py",
"copies": "1",
"size": "4465",
"license": "mit",
"hash": -2397144569582046000,
"line_mean": 38.5132743363,
"line_max": 125,
"alpha_frac": 0.5126539754,
"autogenerated": false,
"ratio": 3.979500891265597,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4992154866665597,
"avg_score": null,
"num_lines": null
} |
# 02 - Trouble Sort
t = int(input())
for case in range(1, t + 1):
n = int(input())
nums = list(map(int, input().split(" ")))
# Trouble sort is like bubble sort, but instead will compare pairs separated
# away by two positions. This means that only elements in even positions will
# be compared with other elements in even positions, and the same for odds.
evens = []
odds = []
for i in range(0, n):
if (i % 2):
odds.append(nums[i])
else:
evens.append(nums[i])
# After having separated even and odd index elements, sort that list any
# way you want. No matter how much Trouble Sort runs, it will only leave
# sorted evens and odds in their positions.
evens.sort()
odds.sort()
final = []
evens_iterator = 0
odds_iterator = 0
for i in range(0, n):
if (i % 2):
final.append(odds[odds_iterator])
odds_iterator += 1
else:
final.append(evens[evens_iterator])
evens_iterator += 1
pos = -1
# After having been merged back, the first element thats not sorted is
# where the sorting is broken, because Trouble Sort will never be able
# to sort it properly.
for i in range(1, n):
if (final[i] < final[i - 1]):
pos = i - 1
break
result = "Case #%d: " % (case)
result += str(pos) if (pos != -1) else "OK"
print(result)
| {
"repo_name": "Zubieta/CPP",
"path": "Google_Code_Jam/2018/Qualifier_Round/Trouble_Sort.py",
"copies": "3",
"size": "1451",
"license": "mit",
"hash": 7265070357121993000,
"line_mean": 31.9772727273,
"line_max": 81,
"alpha_frac": 0.5761543763,
"autogenerated": false,
"ratio": 3.582716049382716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.008070315721934163,
"num_lines": 44
} |
# 03.10.2007, c
from __future__ import absolute_import
from sfepy import data_dir
from sfepy.mechanics.matcoefs import stiffness_from_lame
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
# Whole domain $Y$.
region_1000 = {
'name' : 'Y',
'select' : 'all',
}
# Domain $Y_1$.
region_1 = {
'name' : 'Y1',
'select' : 'cells of group 1',
}
# Domain $Y_2$.
region_2 = {
'name' : 'Y2',
'select' : 'cells of group 2',
}
# Domain $Y_3$.
region_3 = {
'name' : 'Y3',
'select' : 'vertices in (x > %f) & (x < %f) & (y > %f) & (y < %f)'\
% (-0.3, 0.3, -0.48, -0.3),
}
wx = wy = 0.499
region_10 = {
'name' : 'Bottom',
'select' : 'vertices in (y < %f)' % -wy,
'kind' : 'facet',
}
region_11 = {
'name' : 'Top',
'select' : 'vertices in (y > %f)' % wy,
'kind' : 'facet',
}
material_1 = {
'name' : 'solid',
'values' : {
'D' : stiffness_from_lame(2, 1e1, 1e0),
'density' : 1e-1,
},
}
field_1 = {
'name' : '2_displacement',
'dtype' : 'real',
'shape' : 'vector',
'region' : 'Y',
'approx_order' : 2,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '2_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '2_displacement',
'dual' : 'u',
}
ebc_1 = {
'name' : 'Fix',
'region' : 'Bottom',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.all' : 0.2},
}
lcbc_1 = {
'name' : 'rigid1',
'region' : 'Y2',
'dofs' : {'u.all' : None},
'kind' : 'rigid',
}
lcbc_2 = {
'name' : 'rigid2',
'region' : 'Y3',
'dofs' : {'u.all' : None},
'kind' : 'rigid',
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'balance' : """dw_lin_elastic.i.Y( solid.D, v, u ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
from tests_basic import TestLCBC
output_name = 'test_lcbc_2d.vtk'
##
# 03.10.2007, c
class Test( TestLCBC ):
pass
| {
"repo_name": "BubuLK/sfepy",
"path": "tests/test_lcbc_2d.py",
"copies": "5",
"size": "2172",
"license": "bsd-3-clause",
"hash": 5617036390332742000,
"line_mean": 16.1023622047,
"line_max": 71,
"alpha_frac": 0.4700736648,
"autogenerated": false,
"ratio": 2.3155650319829424,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5285638696782943,
"avg_score": null,
"num_lines": null
} |
# 03.10.2007, c
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
# Whole domain $Y$.
region_1000 = {
'name' : 'Y',
'select' : 'all',
}
# Domain $Y_1$.
region_1 = {
'name' : 'Y1',
'select' : 'cells of group 1',
}
# Domain $Y_2$.
region_2 = {
'name' : 'Y2',
'select' : 'cells of group 2',
}
# Domain $Y_3$.
region_3 = {
'name' : 'Y3',
'select' : 'vertices in (x > %f) & (x < %f) & (y > %f) & (y < %f)'\
% (-0.3, 0.3, -0.48, -0.3),
}
wx = wy = 0.499
region_10 = {
'name' : 'Bottom',
'select' : 'vertices in (y < %f)' % -wy,
'kind' : 'facet',
}
region_11 = {
'name' : 'Top',
'select' : 'vertices in (y > %f)' % wy,
'kind' : 'facet',
}
material_1 = {
'name' : 'solid',
'values' : {
'lam' : 1e1,
'mu' : 1e0,
'density' : 1e-1,
},
}
field_1 = {
'name' : '2_displacement',
'dtype' : 'real',
'shape' : 'vector',
'region' : 'Y',
'approx_order' : 2,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '2_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '2_displacement',
'dual' : 'u',
}
ebc_1 = {
'name' : 'Fix',
'region' : 'Bottom',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.all' : 0.2},
}
lcbc_1 = {
'name' : 'rigid1',
'region' : 'Y2',
'dofs' : {'u.all' : None},
'kind' : 'rigid',
}
lcbc_2 = {
'name' : 'rigid2',
'region' : 'Y3',
'dofs' : {'u.all' : None},
'kind' : 'rigid',
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'balance' : """dw_lin_elastic_iso.i.Y( solid.lam, solid.mu, v, u ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
from tests_basic import TestLCBC
output_name = 'test_lcbc_2d.vtk'
##
# 03.10.2007, c
class Test( TestLCBC ):
pass
| {
"repo_name": "RexFuzzle/sfepy",
"path": "tests/test_lcbc_2d.py",
"copies": "1",
"size": "2086",
"license": "bsd-3-clause",
"hash": 6218837036810237000,
"line_mean": 15.4251968504,
"line_max": 78,
"alpha_frac": 0.4506232023,
"autogenerated": false,
"ratio": 2.297356828193833,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8144353113704582,
"avg_score": 0.020725383357850107,
"num_lines": 127
} |
#034ไธฐๅฏ็else่ฏญๅฅๅ็ฎไป็with่ฏญๅฅ
#elseไธๅผๅธธๅค็ ๆญ้
# if ๏ผ *** else: ***
'''
่ฆไนๆๆ ท๏ผ่ฆไนไธๆๆ ท
ๅนฒๅฎไบ่ฝๆๆ ท๏ผๅนฒไธๅฎๅฐฑๅซๆณๆๆ ท
ๆฒกๆ้ฎ้ข๏ผ้ฃๅฐฑๅนฒๅง
'''
def showMaxFactor(num):
count = num // 2
while count > 1:
if num % count == 0:
print("%dๆๅคง็็บฆๆฐๆฏ%d" % (num, count))
break
count -= 1
else:
print("%dๆฏ็ด ๆฐ๏ผ" % num)
num = int(input("่ฏท่พๅ
ฅไธไธชๆฐ๏ผ"))
showMaxFactor(num)
try:
int('abc')
except ValueError as reason:
print("ๅบ้ๅฆ" + str(reason))
else:
print("ๆฒกๆไปปไฝๅผๅธธ๏ผ")
#ไฝฟ็จwith่ฏญๅฅ๏ผๆฝ่ฑกๅบtry except๏ผ with่ชๅจๅ
ณ้ญๆไปถ
try:
f = open('data.txt','w')
for each_line in f:
print(each_line)
except OSError as reason:
print("ๅบ้ๅฆ" + str(reason))
finally:
f.close()
#ไธ้ข็่ฏญๅฅๆน่ฟไธบไธ้ข็่ฏญๅฅ
try:
with open('data.txt','w') as f: #withๆฒกๆ็จๅฐ๏ผ่ชๅจๅ
ณ้ญ๏ผ่็ๆถ้ด
for each_line in f:
print(each_line)
except OSError as reason:
print("ๅบ้ๅฆ" + str(reason))
| {
"repo_name": "icelip/python170829",
"path": "034ไธฐๅฏ็else่ฏญๅฅๅ็ฎไป็with่ฏญๅฅๆต่ฏ.py",
"copies": "1",
"size": "1156",
"license": "epl-1.0",
"hash": 6245428356949404000,
"line_mean": 14,
"line_max": 56,
"alpha_frac": 0.5622222222,
"autogenerated": false,
"ratio": 2.0089285714285716,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.795192884037758,
"avg_score": 0.023844390650198274,
"num_lines": 60
} |
# 038 - Count and Say (Easy)
# https://leetcode.com/problems/count-and-say/
class Solution(object):
def countAndSay(self, n):
"""
:type n: int
:rtype: str
"""
# "1": "11" -> 1 one.
# "11" : "21" -> 2 ones.
# "21" : "1211" -> 1 two, 1 one.
# ...etc.
def recursive(n):
# Base case of the recursion.
if n == 1:
return "1"
# Build the character sequence recursively.
seq_n = recursive(n - 1)
# To store the counts of consecutive identical characters.
counts = []
for char in seq_n:
# Initialize counts with the first character found.
if len(counts) == 0:
counts.append((char, 1))
else:
# Get the last group of consecutive identical characters.
last_char, last_count = counts[-1]
# If current character is yet another of the same, update the count.
# ... otherwise create a new group from scratch with count one.
if char == last_char:
counts[-1] = (last_char, last_count + 1)
else:
counts.append((char, 1))
result = ""
# Create a result string where you put the count and the character.
for (char, count) in counts:
result += "{}{}".format(count, char)
return result
return recursive(n)
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_038.py",
"copies": "1",
"size": "1573",
"license": "mit",
"hash": 782754866886537300,
"line_mean": 33.1956521739,
"line_max": 88,
"alpha_frac": 0.4691671964,
"autogenerated": false,
"ratio": 4.5201149425287355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5489282138928736,
"avg_score": null,
"num_lines": null
} |
# 03 - Cryptopangrams
# I don't know how to do it for N <= 10^100, only N <= 10^4.
# The "factorize first value" thing will obviously explode for
# such a large N.
num_cases = int(input())
for t in range(1, num_cases + 1):
# Parse the input.
n, length = [int(x) for x in input().split(" ")]
values = [int(x) for x in input().split(" ")]
# Take out the first value to factorize only it.
first_value = values[0]
first_factor = 2
while first_value % first_factor != 0:
first_factor += 1
second_factor = first_value//first_factor
prime_factors = {}
prime_values = []
def try_result(pos, carry_factor, factors_list, create_result=False):
# Copy so we can modify this list locally.
factors = factors_list[:]
# Append the carried factor to the copied list.
if create_result == False:
factors.append(carry_factor)
# If we already know the mappings, append the character instead.
else:
factors.append(prime_factors[carry_factor])
# We have reached the end, return the factors list.
if pos == len(values):
return factors
next_value = values[pos]
# We could've carried a bad factor earlier because amibiguity.
if next_value % carry_factor != 0:
# This was a bad path.
return []
# Now get what the next factor is.
next_factor = next_value//carry_factor
return try_result(pos + 1, next_factor, factors, create_result)
# Now with both initial factors, we don't really know which one
# to start carrying (and there can even be ambiguity), so let's
# try both paths.
real_first, real_second = -1, -1
# Start carrying the first factor.
l1 = try_result(1, first_factor, [second_factor])
# Start carrying the second factor.
l2 = try_result(1, second_factor, [first_factor])
# One list will come empty, the other won't.
if len(l1) > len(l2):
prime_values = list(sorted(set(l1)))
real_first = first_factor
real_second = second_factor
else:
prime_values = list(sorted(set(l2)))
real_first = second_factor
real_second = first_factor
# Now that for sure we have 26 unique sorted primes, let's map them
# to characters of the alphabet in increasing order.
prime_factors = {
prime_values[i] : chr(ord('A') + i) for i in range(len(prime_values))
}
# Now the function instead of building a list of primes, will build
# a list of the characters mapped with the already found primes.
res = try_result(1, real_first, [prime_factors[real_second]], create_result=True)
res = "".join(res)
# Case #1: CJQUIZKNOWBEVYOFDPFLUXALGORITHMS
# Case #2: SUBDERMATOGLYPHICFJKNQVWXZ
print("Case #{}: {}".format(t, res))
| {
"repo_name": "zubie7a/CPP",
"path": "Google_Code_Jam/2019/Qualifier_Round/03_Cryptopangrams.py",
"copies": "3",
"size": "2858",
"license": "mit",
"hash": -506474193357529900,
"line_mean": 35.641025641,
"line_max": 85,
"alpha_frac": 0.6231630511,
"autogenerated": false,
"ratio": 3.576971214017522,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0010818425099061175,
"num_lines": 78
} |
#03
import random
class Jugador(object):
def __init__(self, nombre="Jugador" ):
self.nombre= nombre
self.hp_max=random.randrange(45,55)
self.mp_max=random.randrange(40,50)
self.fuerza=random.randrange(3,7)
self.inteligencia=random.randrange(2,5)
self.hp=self.hp_max
self.mp=self.mp_max
self.habilidades=[Bola_de_fuego(),Golpe_letal(),Golpiar()]
def __str__(self):
return str(self.nombre)+" HP" +str(self.hp_max)+"/"+str(self.hp)
def stats(self):
print self.nombre
print "Hp: ",self.hp_max,"(max) / ",self.hp
print "Mp: ",self.mp_max, "(max)/ ", self.mp
print "Fuerza:",self.fuerza
print "Inteligencia:",self.inteligencia
def eleccion(self):
print "Elija una habilidad"
print "0-Bola de fuego **10mp"
print "1-Golpe Letal **5mp"
print "2-Golpiar **no requiere mp)"
x= input("? ")
return x
class AI(object):
def __init__(self):
self.hp_max=random.randrange(45,55)
self.mp_max=random.randrange(40,50)
self.fuerza=random.randrange(3,7)
self.inteligencia=random.randrange(2,5)
self.hp=self.hp_max
self.mp=self.mp_max
self.habilidades=[Bola_de_fuego(),Golpe_letal(),Golpiar()]
def __str__(self):
return "AI: " + " HP" +str(self.hp_max)+"/"+str(self.hp)
def stats(self):
print "Hp: ",self.hp_max,"(max) / ",self.hp
print "Mp: ",self.mp_max, "(max)/ ", self.mp
print "Fuerza",self.fuerza
print "Inteligencia",self.inteligencia
def eleccion(self):
x=random.randrange(0,3)
return x
class Bola_de_fuego(object):
def __init__(self):
self.dano=0
self.nombre="BOLA DE FUEGO"
def devolver_ataque(self,origen):
if origen.mp<10:
return 0
else:
self.dano=random.randrange(13,19)+origen.inteligencia
origen.mp+= -10
return self.dano
class Golpe_letal(object):
def __init__(self):
self.nombre="GOLPE LETAL"
self.dano=0
def devolver_ataque(self,origen):
if origen.mp<5:
return 0
else:
self.dano=random.randrange(7,15)+origen.fuerza
origen.mp-=5
return self.dano
class Golpiar(object):
def __init__(self):
self.nombre="Golpiar"
self.dano=0
def devolver_ataque(self,origen):
self.dano=origen.fuerza+origen.inteligencia
return self.dano
def main():
print " BIENVENIDOS A CHELINGAME\n"
print "Modo de juego"
print "1-Single Player"
print "2-Multiplayer"
modo=input("? ")
if modo==1:
j2=AI()
if modo==2:
print "ingrese su nombre j2"
name=raw_input("? ")
j2=Jugador(name)
print "ingrese su nombre j1"
name=raw_input("? ")
j1=Jugador(name)
print"STATS J1"
j1.stats()
tiempo=raw_input("..,.")
print "STATS J2"
j2.stats()
tiempo=raw_input(".....")
while j1.hp>0 and j2.hp>0:
print "Turno J1"
print j1
print "mp",j1.mp
print j2
elec1=j1.eleccion()
print "Se utilizo la hablidad",j1.habilidades[elec1].nombre
dano1=j1.habilidades[elec1].devolver_ataque(j1)
print "dano efectuado",dano1
j2.hp-=dano1
if j1.hp<=0 or j2.hp<=0:
break
tiempo=raw_input(".....")
print "...."
print "Turno J2"
print j1
print j2
print "mp",j2.mp
elec2=j2.eleccion()
print "Se utilizo la hablidad",j2.habilidades[elec2].nombre
dano2=j2.habilidades[elec2].devolver_ataque(j2)
print "dano efectuado",dano2
j1.hp-=dano2
tiempo=raw_input(".....")
if j1.hp>0:
print "Gano j1"
else:
print "Gano j2"
main()
| {
"repo_name": "chelinho139/Tutoriales-Python",
"path": "tut32.py",
"copies": "1",
"size": "3899",
"license": "apache-2.0",
"hash": -587982775870114300,
"line_mean": 26.0763888889,
"line_max": 72,
"alpha_frac": 0.5586047705,
"autogenerated": false,
"ratio": 2.8294629898403483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8755207944838425,
"avg_score": 0.026571963100384746,
"num_lines": 144
} |
# 03 - Parenting Partening Returns
# Scheduling kid activities and domestic necesities is a challenge for a couple
# parents of a 3 year old child. They have a list of N activities to take care
# during the day, each activity happens during a specified interval in the day.
# Thet need to assign each activity to any of the parents, so that neither of
# them is responsible of 2 activities that overlap. An activity that ends at
# time t is not considered to overlap with an activity that starts at time t.
# The parents names are Jamie and Cameron. Output who will do each activity
# from the given input in a way that someone doesn't have to do two activities
# that overlap, or show that it's impossible if there's no arrangement for it.
num_cases = int(input())
for t in range(1, num_cases + 1):
# The number of activities to perform per test case.
n = int(input())
# An array to hold the input activities.
activities = []
for i in range(n):
# Input is two values, starting and ending minute in the day.
# Convert into a list the starting and ending minute.
activity = list(map(lambda x: int(x), input().split(" ")))
# Append the index of the current activity as given in input, since
# we'll sort this list, it's so that we are able to restore the original
# order of activities as given.
activity.append(i)
# Append a partner by default to handle all activities.
activity.append("C")
# Now add this activity to the list of activities.
activities.append(activity)
activities = sorted(activities)
# Keep track of the most recent activity assigned to each parent.
c_last_activity, j_last_activity = activities[0], []
possible = True
# Check activities pair by pair as their real order in the day is.
for i in range(0, len(activities) - 1):
cur_activity = activities[i]
next_activity = activities[i + 1]
# Compare an activity with the one chronologically coming after it.
cur_ends = cur_activity[1]
next_starts = next_activity[0]
# This means that another parent has to do it.
if next_starts < cur_ends:
cur_parent = cur_activity[3]
# Given the parent of the current activity that overlaps with the
# next activity, try to reassign the next activity to the other.
# If the other already has an activity that overlaps with next,
# then it's impossible.
if cur_parent == "C":
# Try to give next activity to "J".
# If "J" didn't already have an activity, give them next.
if len(j_last_activity) != 0:
j_last_ends = j_last_activity[1]
if next_starts < j_last_ends:
# It's impossible because neither can take it.
possible = False
break
# If it's possible to give "J" the next activity, just give it.
next_activity[3] = "J"
j_last_activity = next_activity
activities[i + 1] = next_activity
elif cur_parent == "J":
# Try to give next activity to "C"
c_last_ends = c_last_activity[1]
if next_starts < c_last_ends:
# It's impossible because neither can take it.
possible = False
break
next_activity[3] = "C"
c_last_activity = next_activity
activities[i + 1] = next_activity
else:
# If there's no overlap between current pair... See that there's no
# overlap with last "C" and last "J"
cur_parent = cur_activity[3]
c_last_ends = c_last_activity[1]
if next_starts < c_last_ends:
# Try giving it to "J".
j_last_ends = j_last_activity[1]
if next_starts < j_last_ends:
# Neither "J" can take it, so it's impossible.
possible = False
break
next_activity[3] = "J"
j_last_activity = next_activity
activities[i + 1] = next_activity
else:
# Remember to update "C" last activity!
c_last_activity = next_activity
original_order = sorted(activities, key=lambda activity: activity[2])
parents_result = [activity[3] for activity in original_order]
result_string = "".join(parents_result)
if not possible:
result_string = "IMPOSSIBLE"
print("Case #{}: {}".format(t, result_string))
| {
"repo_name": "Zubieta/CPP",
"path": "Google_Code_Jam/2020/Qualifier_Round/03_Parenting_Partnering_Returns.py",
"copies": "2",
"size": "4723",
"license": "mit",
"hash": -5517275407275062000,
"line_mean": 44.4134615385,
"line_max": 80,
"alpha_frac": 0.5864916367,
"autogenerated": false,
"ratio": 4.28195829555757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5868449932257569,
"avg_score": null,
"num_lines": null
} |
# 03_rgb_cheerlights.py
# Set the color to the latest color that
# anyone in the world tweeted as #cheerlights
from squid import *
import urllib, time
squid = Squid(18, 23, 24)
cheerlights_url = "http://api.thingspeak.com/channels/1417/field/1/last.txt"
# setup a map data structure with the RGB values for the different colors
color_map = {"red":(100,0,0),
"green":(0,100,0),
"blue":(0,0,100),
"cyan":(0,50,100),
"white":(50,50,50),
"warmwhite":(100,100,100),
"purple":(50,0,100),
"magenta":(100,0,100),
"yellow":(100,100,0),
"orange":(100,50,0),
"pink":(100,0,100),
"oldlace":(100,100,100)}
try:
while True:
cheerlights = urllib.urlopen(cheerlights_url) # Open cheerlights file via URL
chosen_color = cheerlights.read() # Read the last cheerlights colour
cheerlights.close() # Close cheerlights file
print(chosen_color)
color = color_map.get(chosen_color, (0, 0, 0)) # LED off if color name not found
squid.set_color(color)
time.sleep(2)
finally:
GPIO.cleanup()
| {
"repo_name": "teknoteacher/raspirobot3",
"path": "02_rgb_cheerlights.py",
"copies": "1",
"size": "1247",
"license": "mit",
"hash": 290496986510902000,
"line_mean": 33.6388888889,
"line_max": 88,
"alpha_frac": 0.5485164395,
"autogenerated": false,
"ratio": 3.3164893617021276,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.916825942373348,
"avg_score": 0.0393492754937295,
"num_lines": 36
} |
# 03_rgb.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
# Based on Recipe 9.9 in The Raspberry Pi Cookbook by Simon Monk.
from Tkinter import * # tkinter provides the graphical user interface (GUI)
import RPi.GPIO as GPIO
import time
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
# Start Pulse Width Modulation (PWM) on the red, green and blue channels to
# control the brightness of the LEDs.
# Follow this link for more info on PWM: http://en.wikipedia.org/wiki/Pulse-width_modulation
pwmRed = GPIO.PWM(18, 500)
pwmRed.start(100)
pwmGreen = GPIO.PWM(23, 500)
pwmGreen.start(100)
pwmBlue = GPIO.PWM(24, 500)
pwmBlue.start(100)
# group together all of the GUI code into a class called App
class App:
# this function gets called when the app is created
def __init__(self, master):
# A frame holds the various GUI controls
frame = Frame(master)
frame.pack()
# Create the labels and position them in a grid layout
Label(frame, text='Red').grid(row=0, column=0)
Label(frame, text='Green').grid(row=1, column=0)
Label(frame, text='Blue').grid(row=2, column=0)
# Create the sliders and position them in a grid layout
# the 'command' attribute specifys a method to call when
# a slider is moved
scaleRed = Scale(frame, from_=0, to=100,
orient=HORIZONTAL, command=self.updateRed)
scaleRed.grid(row=0, column=1)
scaleGreen = Scale(frame, from_=0, to=100,
orient=HORIZONTAL, command=self.updateGreen)
scaleGreen.grid(row=1, column=1)
scaleBlue = Scale(frame, from_=0, to=100,
orient=HORIZONTAL, command=self.updateBlue)
scaleBlue.grid(row=2, column=1)
# These methods called whenever a slider moves
def updateRed(self, duty):
# change the led brightness to match the slider
pwmRed.ChangeDutyCycle(float(duty))
def updateGreen(self, duty):
pwmGreen.ChangeDutyCycle(float(duty))
def updateBlue(self, duty):
pwmBlue.ChangeDutyCycle(float(duty))
# Set the GUI running, give the window a title, size and position
root = Tk()
root.wm_title('RGB LED Control')
app = App(root)
root.geometry("200x150+0+0")
try:
root.mainloop()
finally:
print("Cleaning up")
GPIO.cleanup() | {
"repo_name": "simonmonk/pi_starter_kit",
"path": "03_rgb.py",
"copies": "1",
"size": "2518",
"license": "mit",
"hash": 4361744856778059000,
"line_mean": 32.5866666667,
"line_max": 92,
"alpha_frac": 0.6707704527,
"autogenerated": false,
"ratio": 3.388963660834455,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9495537764828734,
"avg_score": 0.012839269741144087,
"num_lines": 75
} |
# 04.06.2007, c
# last revision: 25.02.2008
from __future__ import absolute_import
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/various_formats/small3d.mesh'
material_1 = {
'name' : 'coef',
'values' : {'coef' : 1.0},
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Left',
'select' : 'vertices in (x < -0.499)',
'kind' : 'facet',
}
region_2 = {
'name' : 'Right',
'select' : 'vertices in (x > 0.499)',
'kind' : 'facet',
}
region_3 = {
'name' : 'Near',
'select' : 'vertices in (y < -0.499)',
'kind' : 'facet',
}
region_4 = {
'name' : 'Far',
'select' : 'vertices in (y > 0.499)',
'kind' : 'facet',
}
region_5 = {
'name' : 'Bottom',
'select' : 'vertices in (z < -0.499)',
'kind' : 'facet',
}
region_6 = {
'name' : 'Top',
'select' : 'vertices in (z > 0.499)',
'kind' : 'facet',
}
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : 2,
}
field_2 = {
'name' : 'pressure',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Omega',
'approx_order' : 1,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
ebcs = {}
epbc_10 = {
'name' : 'rl',
'region' : ['Left', 'Right'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_x_plane',
}
epbc_12 = {
'name' : 'tb',
'region' : ['Top', 'Bottom'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_z_plane',
}
epbc_13 = {
'name' : 'nf',
'region' : ['Near', 'Far'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_y_plane',
}
from sfepy.discrete.fem.periodic import match_x_plane, match_y_plane, match_z_plane
functions = {
'match_x_plane' : (match_x_plane,),
'match_y_plane' : (match_y_plane,),
'match_z_plane' : (match_z_plane,),
}
from test_periodic_bc_2d import Test
| {
"repo_name": "BubuLK/sfepy",
"path": "tests/test_periodic_bc_3d.py",
"copies": "5",
"size": "2331",
"license": "bsd-3-clause",
"hash": 4944190231392829000,
"line_mean": 18.9230769231,
"line_max": 83,
"alpha_frac": 0.4826254826,
"autogenerated": false,
"ratio": 2.4745222929936306,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.545714777559363,
"avg_score": null,
"num_lines": null
} |
# 04.06.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/various_formats/small3d.mesh'
material_1 = {
'name' : 'coef',
'values' : {'coef' : 1.0},
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Left',
'select' : 'nodes in (x < -0.499)',
}
region_2 = {
'name' : 'Right',
'select' : 'nodes in (x > 0.499)',
}
region_3 = {
'name' : 'Near',
'select' : 'nodes in (y < -0.499)',
}
region_4 = {
'name' : 'Far',
'select' : 'nodes in (y > 0.499)',
}
region_5 = {
'name' : 'Bottom',
'select' : 'nodes in (z < -0.499)'
}
region_6 = {
'name' : 'Top',
'select' : 'nodes in (z > 0.499)'
}
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : (3,),
'region' : 'Omega',
'approx_order' : 2,
}
field_2 = {
'name' : 'pressure',
'dtype' : 'real',
'shape' : (1,),
'region' : 'Omega',
'approx_order' : 1,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
ebcs = {}
epbc_10 = {
'name' : 'rl',
'region' : ['Left', 'Right'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_x_plane',
}
epbc_12 = {
'name' : 'tb',
'region' : ['Top', 'Bottom'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_z_plane',
}
epbc_13 = {
'name' : 'nf',
'region' : ['Near', 'Far'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_y_plane',
}
fe = {
'chunk_size' : 1000
}
from sfepy.fem.periodic import *
functions = {
'match_x_plane' : (match_x_plane,),
'match_y_plane' : (match_y_plane,),
'match_z_plane' : (match_z_plane,),
}
from test_periodic_bc_2d import Test
| {
"repo_name": "olivierverdier/sfepy",
"path": "tests/test_periodic_bc_3d.py",
"copies": "1",
"size": "2123",
"license": "bsd-3-clause",
"hash": -8523634816320629000,
"line_mean": 17.6228070175,
"line_max": 65,
"alpha_frac": 0.4696184644,
"autogenerated": false,
"ratio": 2.4070294784580497,
"config_test": false,
"has_no_keywords": true,
"few_assignments": false,
"quality_score": 0.8266830191953722,
"avg_score": 0.021963550180865498,
"num_lines": 114
} |
# 04.06.2007, c
# last revision: 25.02.2008
filename_mesh = 'database/tests/small3d.mesh'
material_1 = {
'name' : 'coef',
'mode' : 'here',
'region' : 'Omega',
'coef' : 1.0,
}
region_1000 = {
'name' : 'Omega',
'select' : 'all',
}
region_1 = {
'name' : 'Left',
'select' : 'nodes in (x < -0.499)',
}
region_2 = {
'name' : 'Right',
'select' : 'nodes in (x > 0.499)',
}
region_3 = {
'name' : 'Near',
'select' : 'nodes in (y < -0.499)',
}
region_4 = {
'name' : 'Far',
'select' : 'nodes in (y > 0.499)',
}
region_5 = {
'name' : 'Bottom',
'select' : 'nodes in (z < -0.499)'
}
region_6 = {
'name' : 'Top',
'select' : 'nodes in (z > 0.499)'
}
field_1 = {
'name' : '3_displacement',
'dim' : (3,1),
'flags' : (),
'domain' : 'Omega',
'bases' : {'Omega' : '3_4_P2'}
}
field_2 = {
'name' : 'pressure',
'dim' : (1,1),
'flags' : (),
'domain' : 'Omega',
'bases' : {'Omega' : '3_4_P1'}
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
variable_3 = {
'name' : 'p',
'kind' : 'unknown field',
'field' : 'pressure',
'order' : 1,
}
variable_4 = {
'name' : 'q',
'kind' : 'test field',
'field' : 'pressure',
'dual' : 'p',
}
ebcs = {}
epbc_10 = {
'name' : 'rl',
'region' : ['Left', 'Right'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_x_plane',
}
epbc_12 = {
'name' : 'tb',
'region' : ['Top', 'Bottom'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_z_plane',
}
epbc_13 = {
'name' : 'nf',
'region' : ['Near', 'Far'],
'dofs' : {'u.all' : 'u.all', 'p.0' : 'p.0'},
'match' : 'match_y_plane',
}
fe = {
'chunk_size' : 1000
}
from sfepy.fem.periodic import *
from sfepy.base.testing import TestCommon
##
# 01.06.2007, c
class Test( TestCommon ):
##
# 01.06.2007, c
def from_conf( conf, options ):
from sfepy.fem import ProblemDefinition
problem = ProblemDefinition.from_conf( conf, init_equations = False )
test = Test( problem = problem,
conf = conf, options = options )
return test
from_conf = staticmethod( from_conf )
##
# c: 01.06.2007, r: 18.02.2008
def test_pbc( self ):
problem = self.problem
conf = self.conf
problem.variables.equation_mapping( conf.ebcs, conf.epbcs,
problem.domain.regions,
None, conf.funmod )
state = problem.create_state_vector()
problem.apply_ebc( state )
return problem.variables.has_ebc( state )
| {
"repo_name": "certik/sfepy",
"path": "tests/test_periodic_bc_3d.py",
"copies": "1",
"size": "2843",
"license": "bsd-3-clause",
"hash": 8882517256896980000,
"line_mean": 19.9044117647,
"line_max": 77,
"alpha_frac": 0.4766092156,
"autogenerated": false,
"ratio": 2.7076190476190476,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8556173455837444,
"avg_score": 0.02561096147632093,
"num_lines": 136
} |
#04_07_hangman_print_word
import random
words = ['chicken', 'dog', 'cat', 'mouse', 'frog']
lives_remaining = 14
guessed_letters = ''
def play():
word = pick_a_word()
while True:
guess = get_guess(word)
if process_guess(guess, word):
print('You win! Well Done!')
break
if lives_remaining == 0:
print('You are Hung!')
print('The word was: ' + word)
break
def pick_a_word():
return random.choice(words)
def get_guess(word):
print_word_with_blanks(word)
print('Lives Remaining: ' + str(lives_remaining))
guess = input(' Guess a letter or whole word?')
return guess
def process_guess(guess, word):
global lives_remaining
global guessed_letters
lives_remaining = lives_remaining - 1
guessed_letters = guessed_letters + guess
return False
def print_word_with_blanks(word):
display_word = ''
for letter in word:
if guessed_letters.find(letter) > -1:
# letter found
display_word = display_word + letter
else:
# letter not found
display_word = display_word + '-'
print(display_word)
play() | {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "04_07_hangman_print_word.py",
"copies": "1",
"size": "1042",
"license": "mit",
"hash": 8603108288111661000,
"line_mean": 20.7291666667,
"line_max": 50,
"alpha_frac": 0.6775431862,
"autogenerated": false,
"ratio": 2.902506963788301,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40800501499883013,
"avg_score": null,
"num_lines": null
} |
# 04.08.2009
#!
#! Homogenization: Linear Elasticity
#! =================================
#$ \centerline{Example input file, \today}
#! Homogenization of heterogeneous linear elastic material
from sfepy.fem.periodic import *
from sfepy.mechanics.matcoefs import stiffness_tensor_youngpoisson
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part
def recovery_le( pb, corrs, macro ):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'u', dim )
out['u_mic'] = Struct( name = 'output_data',
mode = 'vertex', data = mic_u,
var_name = 'u', dofs = None )
stress_Ym, strain_Ym = compute_stress_strain_u( pb, 'i1', 'Ym', 'matrix.D', 'u', mic_u )
stress_Ym += compute_mac_stress_part( pb, 'i1', 'Ym', 'matrix.D', 'u', macro['strain'] )
stress_Yc, strain_Yc = compute_stress_strain_u( pb, 'i1', 'Yc', 'reinf.D', 'u', mic_u )
stress_Yc += compute_mac_stress_part( pb, 'i1', 'Yc', 'reinf.D', 'u', macro['strain'] )
strain = macro['strain'] + strain_Ym + strain_Yc
out['cauchy_strain'] = Struct( name = 'output_data',
mode = 'cell', data = strain,
dofs = None )
out['cauchy_stress'] = Struct( name = 'output_data',
mode = 'cell', data = stress_Ym + stress_Yc,
dofs = None )
return out
#! Mesh
#! ----
filename_mesh = data_dir + '/meshes/3d/matrix_fiber.mesh'
dim = 3
region_lbn = (0, 0, 0)
region_rtf = (1, 1, 1)
#! Regions
#! -------
#! Regions, edges, ...
regions = {
'Y' : ('all', {}),
'Ym' : ('elements of group 1', {}),
'Yc' : ('elements of group 2', {}),
}
regions.update( define_box_regions( dim, region_lbn, region_rtf ) )
#! Materials
#! ---------
materials = {
'matrix' : ({'D' : stiffness_tensor_youngpoisson( dim, 0.7e9, 0.4 ) },),
'reinf' : ({'D' : stiffness_tensor_youngpoisson( dim, 70.0e9, 0.2 ) },),
}
#! Fields
#! ------
#! Scalar field for corrector basis functions.
fields = {
'corrector' : ('real', dim, 'Y', 1),
}
#! Variables
#! ---------
#! Unknown and corresponding test variables. Parameter fields
#! used for evaluation of homogenized coefficients.
variables = {
'u' : ('unknown field', 'corrector', 0),
'v' : ('test field', 'corrector', 'u'),
'Pi' : ('parameter field', 'corrector', 'u'),
'Pi1' : ('parameter field', 'corrector', '(set-to-None)'),
'Pi2' : ('parameter field', 'corrector', '(set-to-None)'),
}
#! Functions
functions = {
'match_x_plane' : (match_x_plane,),
'match_y_plane' : (match_y_plane,),
'match_z_plane' : (match_z_plane,),
}
#! Boundary Conditions
#! -------------------
#! Fixed nodes.
ebcs = {
'fixed_u' : ('Corners', {'u.all' : 0.0}),
}
if dim == 3:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'}, 'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'}, 'match_z_plane'),
}
else:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Bottom', 'Top'], {'u.all' : 'u.all'}, 'match_y_plane'),
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:dim] ]
#! Integrals
#! ---------
#! Define the integral type Volume/Surface and quadrature rule.
integrals = {
'i1' : ('v', 'gauss_o2_d%d' % dim),
'i2' : ('s', 'gauss_o2_d%d' % (dim-1)),
}
#! Options
#! -------
#! Various problem-specific options.
options = {
'coefs' : 'coefs',
'requirements' : 'requirements',
'ls' : 'ls', # linear solver to use
'volume' : { 'variables' : ['u'],
'expression' : 'd_volume.i1.Y( u )' },
'output_dir' : 'output',
'coefs_filename' : 'output/coefs_le.h5',
'recovery_hook' : 'recovery_le',
}
#! Equations
#! ---------
#! Equations for corrector functions.
equation_corrs = {
'balance_of_forces' :
"""dw_lin_elastic.i1.Ym( matrix.D, v, u )
+ dw_lin_elastic.i1.Yc( reinf.D, v, u ) =
- dw_lin_elastic.i1.Ym( matrix.D, v, Pi )
- dw_lin_elastic.i1.Yc( reinf.D, v, Pi )"""
}
#! Expressions for homogenized linear elastic coefficients.
expr_coefs = """dw_lin_elastic.i1.Ym( matrix.D, Pi1, Pi2 )
+ dw_lin_elastic.i1.Yc( reinf.D, Pi1, Pi2 )"""
#! Coefficients
#! ------------
#! Definition of homogenized acoustic coefficients.
def set_elastic(variables, ir, ic, mode, pis, corrs_rs):
mode2var = {'row' : 'Pi1', 'col' : 'Pi2'}
val = pis.states[ir, ic]['u'] + corrs_rs.states[ir, ic]['u']
variables[mode2var[mode]].data_from_any(val)
coefs = {
'D' : {
'requires' : ['pis', 'corrs_rs'],
'expression' : expr_coefs,
'set_variables' : set_elastic,
'class' : cb.CoefSymSym,
},
'filenames' : {},
}
requirements = {
'pis' : {
'variables' : ['u'],
'class' : cb.ShapeDimDim,
},
'corrs_rs' : {
'requires' : ['pis'],
'ebcs' : ['fixed_u'],
'epbcs' : all_periodic,
'equations' : equation_corrs,
'set_variables' : [('Pi', 'pis', 'u')],
'class' : cb.CorrDimDim,
'save_name' : 'corrs_le',
'dump_variables' : ['u'],
},
}
#! Solvers
#! -------
#! Define linear and nonlinear solver.
solvers = {
'ls' : ('ls.umfpack', {}),
'newton' : ('nls.newton', {'i_max' : 1,
'eps_a' : 1e-4,
'problem' : 'nonlinear',
})
}
#! FE assembling parameters
#! ------------------------
#! 'chunk_size' determines maximum number of elements to assemble in one C
#! function call. Higher values mean faster assembling, but also more memory
#! usage.
fe = {
'chunk_size' : 1000
}
| {
"repo_name": "olivierverdier/sfepy",
"path": "examples/homogenization/linear_homogenization.py",
"copies": "1",
"size": "6107",
"license": "bsd-3-clause",
"hash": -2206429618670202400,
"line_mean": 30.9738219895,
"line_max": 107,
"alpha_frac": 0.5323399378,
"autogenerated": false,
"ratio": 2.8888363292336803,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8746705976160316,
"avg_score": 0.03489405817467272,
"num_lines": 191
} |
# 04.08.2009
#!
#! Homogenization: Linear Elasticity
#! =================================
#$ \centerline{Example input file, \today}
#! Homogenization of heterogeneous linear elastic material
import sfepy.discrete.fem.periodic as per
from sfepy.mechanics.matcoefs import stiffness_from_youngpoisson
from sfepy.homogenization.utils import define_box_regions
import sfepy.homogenization.coefs_base as cb
from sfepy import data_dir
from sfepy.base.base import Struct
from sfepy.homogenization.recovery import compute_micro_u, compute_stress_strain_u, compute_mac_stress_part
def recovery_le( pb, corrs, macro ):
out = {}
dim = corrs['corrs_le']['u_00'].shape[1]
mic_u = - compute_micro_u( corrs['corrs_le'], macro['strain'], 'u', dim )
out['u_mic'] = Struct( name = 'output_data',
mode = 'vertex', data = mic_u,
var_name = 'u', dofs = None )
stress_Y, strain_Y = compute_stress_strain_u( pb, 'i', 'Y', 'mat.D', 'u', mic_u )
stress_Y += compute_mac_stress_part( pb, 'i', 'Y', 'mat.D', 'u', macro['strain'] )
strain = macro['strain'] + strain_Y
out['cauchy_strain'] = Struct( name = 'output_data',
mode = 'cell', data = strain,
dofs = None )
out['cauchy_stress'] = Struct( name = 'output_data',
mode = 'cell', data = stress_Y,
dofs = None )
return out
#! Mesh
#! ----
filename_mesh = data_dir + '/meshes/3d/matrix_fiber.mesh'
dim = 3
region_lbn = (0, 0, 0)
region_rtf = (1, 1, 1)
#! Regions
#! -------
#! Regions, edges, ...
regions = {
'Y' : 'all',
'Ym' : 'cells of group 1',
'Yc' : 'cells of group 2',
}
regions.update( define_box_regions( dim, region_lbn, region_rtf ) )
#! Materials
#! ---------
materials = {
'mat' : ({'D' : {'Ym': stiffness_from_youngpoisson(dim, 7.0e9, 0.4),
'Yc': stiffness_from_youngpoisson(dim, 70.0e9, 0.2)}},),
}
#! Fields
#! ------
#! Scalar field for corrector basis functions.
fields = {
'corrector' : ('real', dim, 'Y', 1),
}
#! Variables
#! ---------
#! Unknown and corresponding test variables. Parameter fields
#! used for evaluation of homogenized coefficients.
variables = {
'u' : ('unknown field', 'corrector', 0),
'v' : ('test field', 'corrector', 'u'),
'Pi' : ('parameter field', 'corrector', 'u'),
'Pi1' : ('parameter field', 'corrector', '(set-to-None)'),
'Pi2' : ('parameter field', 'corrector', '(set-to-None)'),
}
#! Functions
functions = {
'match_x_plane' : (per.match_x_plane,),
'match_y_plane' : (per.match_y_plane,),
'match_z_plane' : (per.match_z_plane,),
}
#! Boundary Conditions
#! -------------------
#! Fixed nodes.
ebcs = {
'fixed_u' : ('Corners', {'u.all' : 0.0}),
}
if dim == 3:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Near', 'Far'], {'u.all' : 'u.all'}, 'match_y_plane'),
'periodic_z' : (['Top', 'Bottom'], {'u.all' : 'u.all'}, 'match_z_plane'),
}
else:
epbcs = {
'periodic_x' : (['Left', 'Right'], {'u.all' : 'u.all'}, 'match_x_plane'),
'periodic_y' : (['Bottom', 'Top'], {'u.all' : 'u.all'}, 'match_y_plane'),
}
all_periodic = ['periodic_%s' % ii for ii in ['x', 'y', 'z'][:dim] ]
#! Integrals
#! ---------
#! Define the integral type Volume/Surface and quadrature rule.
integrals = {
'i' : 2,
}
#! Options
#! -------
#! Various problem-specific options.
options = {
'coefs' : 'coefs',
'requirements' : 'requirements',
'ls' : 'ls', # linear solver to use
'volume' : { 'variables' : ['u'],
'expression' : 'd_volume.i.Y( u )' },
'output_dir' : 'output',
'coefs_filename' : 'coefs_le',
'recovery_hook' : 'recovery_le',
}
#! Equations
#! ---------
#! Equations for corrector functions.
equation_corrs = {
'balance_of_forces' :
"""dw_lin_elastic.i.Y(mat.D, v, u ) =
- dw_lin_elastic.i.Y(mat.D, v, Pi )"""
}
#! Expressions for homogenized linear elastic coefficients.
expr_coefs = """dw_lin_elastic.i.Y(mat.D, Pi1, Pi2 )"""
#! Coefficients
#! ------------
#! Definition of homogenized acoustic coefficients.
def set_elastic(variables, ir, ic, mode, pis, corrs_rs):
mode2var = {'row' : 'Pi1', 'col' : 'Pi2'}
val = pis.states[ir, ic]['u'] + corrs_rs.states[ir, ic]['u']
variables[mode2var[mode]].set_data(val)
coefs = {
'D' : {
'requires' : ['pis', 'corrs_rs'],
'expression' : expr_coefs,
'set_variables' : set_elastic,
'class' : cb.CoefSymSym,
},
'filenames' : {},
}
requirements = {
'pis' : {
'variables' : ['u'],
'class' : cb.ShapeDimDim,
},
'corrs_rs' : {
'requires' : ['pis'],
'ebcs' : ['fixed_u'],
'epbcs' : all_periodic,
'equations' : equation_corrs,
'set_variables' : [('Pi', 'pis', 'u')],
'class' : cb.CorrDimDim,
'save_name' : 'corrs_le',
'dump_variables' : ['u'],
},
}
#! Solvers
#! -------
#! Define linear and nonlinear solver.
solvers = {
'ls' : ('ls.scipy_direct', {}),
'newton' : ('nls.newton', {
'i_max' : 1,
'eps_a' : 1e-4,
})
}
| {
"repo_name": "RexFuzzle/sfepy",
"path": "examples/homogenization/linear_homogenization.py",
"copies": "1",
"size": "5287",
"license": "bsd-3-clause",
"hash": 5754898932409013000,
"line_mean": 28.8700564972,
"line_max": 107,
"alpha_frac": 0.5313031965,
"autogenerated": false,
"ratio": 2.912947658402204,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8776932754419711,
"avg_score": 0.03346362009649854,
"num_lines": 177
} |
#04_08_hangman_full
import random
words = ['chicken', 'dog', 'cat', 'mouse', 'frog']
lives_remaining = 14
guessed_letters = ''
def play():
word = pick_a_word()
while True:
guess = get_guess(word)
if process_guess(guess, word):
print('You win! Well Done!')
break
if lives_remaining == 0:
print('You are Hung!')
print('The word was: ' + word)
break
def pick_a_word():
return random.choice(words)
def get_guess(word):
print_word_with_blanks(word)
print('Lives Remaining: ' + str(lives_remaining))
guess = input(' Guess a letter or whole word?')
return guess
def print_word_with_blanks(word):
display_word = ''
for letter in word:
if guessed_letters.find(letter) > -1:
# letter found
display_word = display_word + letter
else:
# letter not found
display_word = display_word + '-'
print(display_word)
def process_guess(guess, word):
if len(guess) > 1:
return whole_word_guess(guess, word)
else:
return single_letter_guess(guess, word)
def whole_word_guess(guess, word):
global lives_remaining
if guess == word:
return True
else:
lives_remaining = lives_remaining - 1
return False
def single_letter_guess(guess, word):
global guessed_letters
global lives_remaining
if word.find(guess) == -1:
# letter guess was incorrect
lives_remaining = lives_remaining - 1
guessed_letters = guessed_letters + guess
if all_letters_guessed(word):
return True
return False
def all_letters_guessed(word):
for letter in word:
if guessed_letters.find(letter) == -1:
return False
return True
play() | {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "04_08_hangman_full.py",
"copies": "1",
"size": "1572",
"license": "mit",
"hash": -7337390986606939000,
"line_mean": 20.2567567568,
"line_max": 50,
"alpha_frac": 0.6844783715,
"autogenerated": false,
"ratio": 2.9165120593692024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.41009904308692025,
"avg_score": null,
"num_lines": null
} |
#04_09_hangman_full_solution
import random
words = ['chicken', 'dog', 'cat', 'mouse', 'frog']
lives_remaining = 14
guessed_letters = ''
def play():
word = pick_a_word()
while True:
guess = get_guess(word)
if process_guess(guess, word):
print('You win! Well Done!')
break
if lives_remaining == 0:
print('You are Hung!')
print('The word was: ' + word)
break
def pick_a_word():
return random.choice(words)
def get_guess(word):
print_word_with_blanks(word)
print('Lives Remaining: ' + str(lives_remaining))
guess = input(' Guess a letter or whole word?')
return guess
def print_word_with_blanks(word):
display_word = ''
for letter in word:
if guessed_letters.find(letter) > -1:
# letter found
display_word = display_word + letter
else:
# letter not found
display_word = display_word + '-'
print(display_word)
def process_guess(guess, word):
if len(guess) > 1 and len(guess) == len(word):
return whole_word_guess(guess, word)
else:
return single_letter_guess(guess, word)
def whole_word_guess(guess, word):
global lives_remaining
if guess.lower() == word.lower():
return True
else:
lives_remaining = lives_remaining - 1
return False
def single_letter_guess(guess, word):
global guessed_letters
global lives_remaining
if word.find(guess) == -1:
# letter guess was incorrect
lives_remaining = lives_remaining - 1
guessed_letters = guessed_letters + guess.lower()
if all_letters_guessed(word):
return True
return False
def all_letters_guessed(word):
for letter in word:
if guessed_letters.find(letter.lower()) == -1:
return False
return True
play() | {
"repo_name": "simonmonk/prog_pi_ed2",
"path": "04_09_hangman_full_solution.py",
"copies": "1",
"size": "1643",
"license": "mit",
"hash": 5408477614533430000,
"line_mean": 21.2162162162,
"line_max": 50,
"alpha_frac": 0.6828971394,
"autogenerated": false,
"ratio": 2.9339285714285714,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4116825710828571,
"avg_score": null,
"num_lines": null
} |
# 042. Trapping Rain Water
# Recursive solution with no tail recursive optimization may cause
# RuntimeError: maximum recursion depth exceeded
import unittest
class Solution(object):
def __trap__(self, height, hstack, index):
# Base case, if no height left, no trap water.
if height == []:
return 0
# In the beginning, no height in stack, add one and continue.
if hstack == []:
hstack.append((height[index], index))
return self.__trap__(height[1:], hstack, index+1)
# In the processing, first pop last element.
last_h, last_i = hstack[len(hstack) - 1]
if last_h > height[0]:
hstack.append((height[0], index))
return self.__trap__(height[1:], hstack, index+1)
if last_h < height[0]:
base_h,_ = hstack.pop()
if hstack == []:
hstack.append((height[0], index))
return self.__trap__(height[1:], hstack, index+1)
increment = 0
while hstack != []:
last_h, last_i = hstack[len(hstack) - 1]
if last_h > height[0]:
increment = increment + (height[0] - base_h) * (index - last_i - 1)
break
if last_h == height[0]:
hstack.pop()
increment = increment + (height[0] - base_h) * (index - last_i - 1)
break
if last_h < height[0]:
increment = increment + (last_h - base_h) * (index - last_i - 1)
base_h,_ = hstack.pop()
hstack.append((height[0], index))
return increment + self.__trap__(height[1:], hstack, index+1)
hstack.pop()
hstack.append((height[0],index))
return self.__trap__(height[1:], hstack, index+1)
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
return self.__trap__(height, [], 0)
class SolutionUnitTest(unittest.TestCase):
def setup(self):
pass
def tearDown(self):
pass
def testTrap(self):
s = Solution()
# print "result:", s.__trap__([100,1,5,6,0,6], [], 0)
# print "result:", s.__trap__([0,1,0,2,1,0,1,3,2,1,2,1], [], 0)
self.assertEqual(s.trap([0,1,0,2,1,0,1,3,2,1,2,1]), 6)
self.assertEqual(s.trap([]), 0)
l1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 7, 6, 5, 4, 3, 2, 1, 1, 2, 2, 3, 3, 4,
4, 4, 6, 7, 8, 8, 8, 7, 6, 5, 5, 4, 6, 6, 3, 2, 9, 8, 10, 1, 2]
l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 7, 6, 5, 4, 3, 2, 1, 1, 2, 2, 3, 3, 4]
# self.assertEqual(s.trap(l1), 131)
self.assertEqual(s.trap(l2), 15)
def testTrap2(self):
s = Solution()
l = [5,5,1,7,1,1,5,2,7,6]
self.assertEqual(s.trap(l), 23)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "hanlin-he/UTD",
"path": "leetcode/py/042.rec.py",
"copies": "1",
"size": "2921",
"license": "mit",
"hash": 8189848700201871000,
"line_mean": 29.4270833333,
"line_max": 87,
"alpha_frac": 0.4816843547,
"autogenerated": false,
"ratio": 3.2564102564102564,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9200823356179053,
"avg_score": 0.007454250986240764,
"num_lines": 96
} |
# 042. Trapping Rain Water
# Recursive solution with no tail recursive optimization may cause
# RuntimeError: maximum recursion depth exceeded
import unittest
class Solution(object):
def trap(self, height):
"""
:type height: List[int]
:rtype: int
"""
ret = 0
index = 0
hstack = list()
while index < len(height):
cur = height[index]
try:
if hstack == []:
continue
last_h, _ = hstack[len(hstack) - 1]
if last_h > cur:
continue
if last_h == cur:
hstack.pop()
continue
base_h, _ = hstack.pop()
if hstack == []:
continue
while hstack != []:
last_h, last_i = hstack[len(hstack) - 1]
if last_h > cur:
ret += (cur - base_h) * (index - last_i - 1)
break
if last_h == cur:
hstack.pop()
ret += (cur - base_h) * (index - last_i - 1)
break
if last_h < cur:
ret += (last_h - base_h) * (index - last_i - 1)
base_h,_ = hstack.pop()
finally:
hstack.append((cur, index))
index += 1
return ret
class SolutionUnitTest(unittest.TestCase):
def setup(self):
pass
def tearDown(self):
pass
def testTrap(self):
s = Solution()
# print "result:", s.__trap__([100,1,5,6,0,6], [], 0)
# print "result:", s.__trap__([0,1,0,2,1,0,1,3,2,1,2,1], [], 0)
self.assertEqual(s.trap([0,1,0,2,1,0,1,3,2,1,2,1]), 6)
self.assertEqual(s.trap([]), 0)
l1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 7, 6, 5, 4, 3, 2, 1, 1, 2, 2, 3, 3, 4,
4, 4, 6, 7, 8, 8, 8, 7, 6, 5, 5, 4, 6, 6, 3, 2, 9, 8, 10, 1, 2]
l2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 7, 6, 5, 4, 3, 2, 1, 1, 2, 2, 3, 3, 4]
# self.assertEqual(s.trap(l1), 131)
self.assertEqual(s.trap(l2), 15)
def testTrap2(self):
s = Solution()
l = [5,5,1,7,1,1,5,2,7,6]
self.assertEqual(s.trap(l), 23)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "hanlin-he/UTD",
"path": "leetcode/py/042.py",
"copies": "1",
"size": "2392",
"license": "mit",
"hash": 9176467835250468000,
"line_mean": 26.8139534884,
"line_max": 79,
"alpha_frac": 0.4071906355,
"autogenerated": false,
"ratio": 3.3737658674189,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42809565029188995,
"avg_score": null,
"num_lines": null
} |
#04
import sqlite3
def create():#ํ
์ด๋ธ ์์ฑ
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("CREATE TABLE IF NOT EXISTS account(account_num TEXT PRIMARY KEY, name TEXT, password TEXT, money INTEGER)")
#accountํ
์ด๋ธ ์์ฑ, account_num์ด ๊ธฐ๋ณธํค
con.commit()
con.close()
def viewall():#DB์ ์๋ ๋ชจ๋ ๋ฐ์ดํฐ๋ณด๊ธฐ
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("SELECT * FROM account") #accountํ
์ด๋ธ ์์ ์๋ ๋ชจ๋ ๋ฐ์ดํฐ ์ ํ
rows = cur.fetchall()#๋ชจ๋ ๋ฐ์ดํฐ ๋ณด์ฌ์ฃผ๊ธฐ
con.close()
return rows
def search(account_num="",name="",password=""):#๋งค๊ฐ๋ณ์๋ก ๋ค์ด์จ ๊ฐ์ ํตํด ์ํ๋ ๋ฐ์ดํฐ ์ฐพ๊ธฐ
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("SELECT * FROM account WHERE account_num=? AND name=? AND password=?",(account_num,name,password))
#๋ชจ๋ ๋ฐ์ดํฐ ์ค์์ ๋งค๊ฐ๋ณ์๋ก ๋ฐ๋ ๊ฒ๊ณผ ๋ชจ๋ ๋์ผํ ๋ฐ์ดํฐ ์ฐพ๊ธฐ
rows = cur.fetchall()#๋ชจ๋ ๋ฐ์ดํฐ ๋ณด์ฌ์ฃผ๊ธฐ
con.close()
return rows
def add(account_num, name, password, money):#๋ฐ์ดํฐ ์ถ๊ฐ
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("INSERT INTO account VALUES(?,?,?,?)",(account_num, name, password, money))
#๋งค๊ฐ๋ณ์๋ก ๋ค์ด์จ ๊ฐ์ DB์ ์ ์ฅ
con.commit()
con.close()
def update(name,account_num, money):#์
๊ธ, ์ถ๊ธ์ ๋ฐ์ดํฐ ์
๋ฐ์ดํธ
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("UPDATE account SET money = '%d' WHERE name = '%s' AND account_num = '%s'" % (money, name, account_num))
#๋งค๊ฐ๋ณ์๋ก ๋ค์ด์จ ๊ฐ์ ์๋กญ๊ฒ DB์ ์ ์ฅ
con.commit()
con.close()
def update2(account_num, money):#๊ณ์ข์ด์ฒด์ ๋ฐ์ดํฐ ์
๋ฐ์ดํธ
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("UPDATE account SET money = '%d' WHERE account_num = '%s'" % (money, account_num))
#๋งค๊ฐ๋ณ์๋ก ๋ค์ด์จ ๊ฐ์ ์๋กญ๊ฒ DB์ ์ ์ฅ
con.commit()
con.close()
def delete(account_num):#๋ฐ์ดํฐ ์ญ์
con = sqlite3.connect("aledger.db")
cur = con.cursor()
cur.execute("DELETE FROM account WHERE account_num=?",(account_num,))
#๋งค๊ฐ๋ณ์๋ก ๋ค์ด์จ ๊ฐ์ ๊ฐ์ง ๋ฐ์ดํฐ ์ญ์
con.commit()
con.close()
create()
#print(search(category="social")) | {
"repo_name": "Martialhimanshu/Account-management-system",
"path": "ledger_bk.py",
"copies": "1",
"size": "2413",
"license": "mpl-2.0",
"hash": -4778451989475139000,
"line_mean": 32.6,
"line_max": 124,
"alpha_frac": 0.6466501241,
"autogenerated": false,
"ratio": 1.9300766283524904,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.30767267524524905,
"avg_score": null,
"num_lines": null
} |
# 04_thermomether_f.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
from Tkinter import * # tkinter provides the graphical user interface (GUI)
import RPi.GPIO as GPIO
import time, math
C = 0.38 # uF - Tweek this value around 0.33 to improve accuracy
R1 = 1000 # Ohms
B = 3800.0 # The thermistor constant - change this for a different thermistor
R0 = 1000.0 # The resistance of the thermistor at 25C -change for different thermistor
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# This project uses a thermistor, a component whose resistance varies with the temperature.
# To measure its resistance, the code records the time it takes for a capacitor to fill
# when supplied by a current passing through the resistor. The lower the resistance the faster
# it fills up.
#
# You can think of a capacitor as a tank of electricity, and as it fills with charge, the voltage
# across it increases. We cannot measure that voltage directly, because the Raspberry Pi
# does not have an analog to digital convertor (ADC or analog input). However, we can time how long it
# takes for the capacitor to fill with charge to the extent that it gets above the 1.65V or so
# that counts as being a high digital input.
#
# For more information on this technique take a look at:
# learn.adafruit.com/basic-resistor-sensor-reading-on-raspberry-pi
# The code here is based on that in the Raspberry Pi Cookbook (Recipes 12.1 to 12.3)
# Pin a charges the capacitor through a fixed 1k resistor and the thermistor in series
# pin b discharges the capacitor through a fixed 1k resistor
a_pin = 18
b_pin = 23
# empty the capacitor ready to start filling it up
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
# return the time taken for the voltage on the capacitor to count as a digital input HIGH
# than means around 1.65V
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
return (t2 - t1) * 1000000 # microseconds
# Take an analog reading as the time taken to charge after first discharging the capacitor
def analog_read():
discharge()
t = charge_time()
discharge()
return t
# Convert the time taken to charge the cpacitor into a value of resistance
# To reduce errors, do it lots of times and take the average.
def read_resistance():
n = 10
total = 0;
for i in range(0, n):
total = total + analog_read()
t = total / float(n)
T = t * 0.632 * 3.3
r = (T / C) - R1
return r
def read_temp_c():
R = read_resistance()
t0 = 273.15 # 0 deg C in K
t25 = t0 + 25.0 # 25 deg C in K
# Steinhart-Hart equation - Google it
inv_T = 1/t25 + 1/B * math.log(R/R0)
T = (1/inv_T - t0)
return T
# group together all of the GUI code into a class called App
class App:
# this function gets called when the app is created
def __init__(self, master):
self.master = master
# A frame holds the various GUI controls
frame = Frame(master)
frame.pack()
label = Label(frame, text='Temp F', font=("Helvetica", 32))
label.grid(row=0)
self.reading_label = Label(frame, text='12.34', font=("Helvetica", 110))
self.reading_label.grid(row=1)
self.update_reading()
# Update the temperature reading
def update_reading(self):
temp_c = read_temp_c()
temp_f = temp_c * 9.0 / 5.0 + 32
reading_str = "{:.2f}".format(temp_f)
self.reading_label.configure(text=reading_str)
self.master.after(500, self.update_reading) # schedule yourself to be called after 0.5 seconds
# Set the GUI running, give the window a title, size and position
root = Tk()
root.wm_title('Thermometer')
app = App(root)
root.geometry("400x300+0+0")
try:
root.mainloop()
finally:
print("Cleaning up")
GPIO.cleanup()
| {
"repo_name": "simonmonk/pi_starter_kit",
"path": "04_thermometer_f.py",
"copies": "1",
"size": "4117",
"license": "mit",
"hash": -646094627912686100,
"line_mean": 33.8898305085,
"line_max": 102,
"alpha_frac": 0.6803497692,
"autogenerated": false,
"ratio": 3.2726550079491257,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.939313097205993,
"avg_score": 0.011974761017839083,
"num_lines": 118
} |
# 04_thermomether.py
# From the code for the Electronics Starter Kit for the Raspberry Pi by MonkMakes.com
from Tkinter import * # tkinter provides the graphical user interface (GUI)
import RPi.GPIO as GPIO
import time, math
C = 0.38 # uF - Tweek this value around 0.33 to improve accuracy
R1 = 1000 # Ohms
B = 3800.0 # The thermistor constant - change this for a different thermistor
R0 = 1000.0 # The resistance of the thermistor at 25C -change for different thermistor
# Configure the Pi to use the BCM (Broadcom) pin names, rather than the pin positions
GPIO.setmode(GPIO.BCM)
# This project uses a thermistor, a component whose resistance varies with the temperature.
# To measure its resistance, the code records the time it takes for a capacitor to fill
# when supplied by a current passing through the resistor. The lower the resistance the faster
# it fills up.
#
# You can think of a capacitor as a tank of electricity, and as it fills with charge, the voltage
# across it increases. We cannot measure that voltage directly, because the Raspberry Pi
# does not have an analog to digital convertor (ADC or analog input). However, we can time how long it
# takes for the capacitor to fill with charge to the extent that it gets above the 1.65V or so
# that counts as being a high digital input.
#
# For more information on this technique take a look at:
# learn.adafruit.com/basic-resistor-sensor-reading-on-raspberry-pi
# The code here is based on that in the Raspberry Pi Cookbook (Recipes 12.1 to 12.3)
# Pin a charges the capacitor through a fixed 1k resistor and the thermistor in series
# pin b discharges the capacitor through a fixed 1k resistor
a_pin = 18
b_pin = 23
# empty the capacitor ready to start filling it up
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
# return the time taken for the voltage on the capacitor to count as a digital input HIGH
# than means around 1.65V
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
return (t2 - t1) * 1000000 # microseconds
# Take an analog reading as the time taken to charge after first discharging the capacitor
def analog_read():
discharge()
t = charge_time()
discharge()
return t
# Convert the time taken to charge the cpacitor into a value of resistance
# To reduce errors, do it lots of times and take the average.
def read_resistance():
n = 10
total = 0;
for i in range(0, n):
total = total + analog_read()
t = total / float(n)
T = t * 0.632 * 3.3
r = (T / C) - R1
return r
def read_temp_c():
R = read_resistance()
t0 = 273.15 # 0 deg C in K
t25 = t0 + 25.0 # 25 deg C in K
# Steinhart-Hart equation - Google it
inv_T = 1/t25 + 1/B * math.log(R/R0)
T = (1/inv_T - t0)
return T
# group together all of the GUI code into a class called App
class App:
# this function gets called when the app is created
def __init__(self, master):
self.master = master
# A frame holds the various GUI controls
frame = Frame(master)
frame.pack()
label = Label(frame, text='Temp C', font=("Helvetica", 32))
label.grid(row=0)
self.reading_label = Label(frame, text='12.34', font=("Helvetica", 110))
self.reading_label.grid(row=1)
self.update_reading()
# Update the temperature reading
def update_reading(self):
temp_c = read_temp_c()
reading_str = "{:.2f}".format(temp_c)
self.reading_label.configure(text=reading_str)
self.master.after(500, self.update_reading) # schedule yourself to be called after 0.5 seconds
# Set the GUI running, give the window a title, size and position
root = Tk()
root.wm_title('Thermometer')
app = App(root)
root.geometry("400x300+0+0")
try:
root.mainloop()
finally:
print("Cleaning up")
GPIO.cleanup()
| {
"repo_name": "simonmonk/pi_starter_kit",
"path": "04_thermometer.py",
"copies": "1",
"size": "4074",
"license": "mit",
"hash": 5862957425255477000,
"line_mean": 33.8205128205,
"line_max": 102,
"alpha_frac": 0.6833578792,
"autogenerated": false,
"ratio": 3.2907915993537964,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4474149478553796,
"avg_score": null,
"num_lines": null
} |
# 04_thermomether.py
import RPi.GPIO as GPIO
import time, math
from RgbLedGPIO import RgbLed
GPIO.setmode(GPIO.BCM)
a_pin = 25
b_pin = 8
buzzer_pin = 7
fiddle_factor = 0.9;
def discharge():
GPIO.setup(a_pin, GPIO.IN)
GPIO.setup(b_pin, GPIO.OUT)
GPIO.output(b_pin, False)
time.sleep(0.01)
def charge_time():
GPIO.setup(b_pin, GPIO.IN)
GPIO.setup(a_pin, GPIO.OUT)
GPIO.output(a_pin, True)
t1 = time.time()
while not GPIO.input(b_pin):
pass
t2 = time.time()
return (t2 - t1) * 1000000
def analog_read():
discharge()
return charge_time()
def read_resistance():
n = 100
total = 0;
for i in range(1, n):
total = total + analog_read()
reading = total / float(n)
resistance = reading * 6.05 - 939
return resistance
def temp_from_r(R):
B = 3800.0
R0 = 1000.0
t0 = 273.15
t25 = t0 + 25.0
inv_T = 1/t25 + 1/B * math.log(R/R0)
T = 1/inv_T - t0
return T * fiddle_factor
def buzz(pitch, duration):
GPIO.setup(buzzer_pin, GIPO.OUT)
period = 1.0 / pitch
delay = period / 2
cycles = int(duration * pitch)
for i in range(cycles):
GPIO.output(buzzer_pin, True)
time.sleep(delay)
GPIO.output(buzzer_pin, False)
time.sleep(delay)
try:
led = RgbLed(18, 23, 24)
led.changeColour(0,100,0)
led.toggle()
while True:
temp_c = temp_from_r(read_resistance())
if temp_c > 25:
led.changeColour(100,0,0)
buzz(500, 0.3)
else:
led.changeColour(0,100,0)
print "{:.2f}".format(temp_c)
except KeyboardInterrupt:
print 'end'
finally:
GPIO.cleanup()
| {
"repo_name": "jakubmikos/rpi-python",
"path": "thermometer_cmd_line.py",
"copies": "1",
"size": "1679",
"license": "mit",
"hash": -2407122687842134500,
"line_mean": 20.253164557,
"line_max": 47,
"alpha_frac": 0.5848719476,
"autogenerated": false,
"ratio": 2.7569786535303775,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.38418506011303777,
"avg_score": null,
"num_lines": null
} |
# 05-07-03
#v1.0.3
# ordereddict.py
# A class that is a drop in replacement for an ordinary dictionary.
# methods that would normally return values/items in a random manner return them
# in an ordered and consistent manner.
# Copyright Michael Foord
# Not for use in commercial projects without permission. (Although permission will probably be given).
# If you use this code in a project then please credit me and include a link back.
# If you release the project then let me know (and include this message with my code !)
# No warranty express or implied for the accuracy, fitness to purpose or otherwise for this code....
# Use at your own risk !!!
# E-mail or michael AT foord DOT me DOT uk
# Maintained at www.voidspace.org.uk/atlantibots/pythonutils.html
import sys # purely used for the version_info
####################################################################################
class dIter:
"""Implements a basic dictionary iterator with 3 modes.
If mode=0 (default) returns the keys (by returns I mean iterates over !)
If mode=1 returns the values
If mode=-1 returns the items - (key, value) tuple
mode=0 equates to the iterkeys method or __iter__ (for entry in dict)
mode=1 equates to the itervalues method.
mode=-1 equates to the iteritems method.
"""
def __init__(self, indict, mode=0):
self.thedict = indict
self.inseq = indict.keys()
self.index = 0
self.mode = mode
def next(self):
if self.index >= len(self.inseq): raise StopIteration
thekey = self.inseq[self.index]
self.index += 1
if not self.mode:
return thekey
elif self.mode == 1:
return self.thedict[thekey]
elif self.mode == -1:
return (thekey, self.thedict[thekey])
def __iter__(self):
return self
####################################################################################
class oDict:
"""An ordered dictionary. ordereddict = oDict(indict, order)"""
__doc__ = """ordereddict = oDict({'a' : 1, 'b' : 2}, True)
The dictionary can be initialised with an optional dictionary passed in as the first argument,
You can also pass in an order parameter which chooses the sort method.
order=True (default) means all ordered methods use the normal sort function.
order=False means all ordered methods use the reverse sort function.
order=None means no sort function.
keys, items, iter and pop methods are ordered - based on the key.
The ordering is implemented in the keys() function.
The iterators are returned using the custom iterator dIter (which will work in three different ways)."""
def __init__(self, indict={}, order=True):
self._thedict = {}
self._thedict.update(indict)
self._order = order
def __setitem__(self, item, value):
"""Setting a keyword"""
self._thedict[item] = value
def __getitem__(self, item):
"""Fetching a value."""
return self._thedict[item]
def __delitem__(self, item):
"""Deleting a keyword"""
del self._thedict[item]
def pop(self, item=[], default=None):
"""Emulates the pop method.
If item is not supplied it pops the first value in the dictionary.
This is different from the normal dict pop method."""
if item != []:
return self._thedict.pop(item, default)
else:
try:
return self._thedict.pop(self.keys()[0])
except IndexError:
raise KeyError(': \'pop(): dictionary is empty\'')
def popitem(self):
"""Emulates the popitem method - pops the first one in the list based on the chosen sort method."""
try:
theitem = self.keys()[0]
except IndexError:
raise KeyError(': \'popitem(): dictionary is empty\'')
return (theitem, self._thedict.pop(theitem))
def has_key(self, item):
"""Does the dictionary have this key."""
return self._thedict.has_key(item) # does the key exist
def __contains__(self, item):
"""Does the dictionary have this key."""
return self._thedict.has_key(item) # does the key exist
def setdefault(self, item, default=None):
"""Fetch an item if it exists, otherwise set the item to default and return default."""
return self._thedict.setdefault(item, default)
def get(self, item, default=None):
"""Fetch the item if it exists, otherwise return default."""
return self._thedict.get(item, default)
def update(self, indict):
"""Update the current oDdict with the dictionary supplied."""
self._thedict.update(indict)
def copy(self):
"""Create a new oDict object that is a copy of this one."""
return oDict(self._thedict)
def dict(self):
"""Create a dictionary version of this oDict."""
return dict.copy(self._thedict)
def clear(self):
"""Clear oDict."""
self._thedict.clear()
def __repr__(self):
"""An oDict version of __repr__ """
return 'oDict(' + self._thedict.__repr__() + ')'
def keys(self):
"""Return an ordered list of the keys of this oDict."""
thelist = self._thedict.keys()
if self._order == True:
thelist.sort()
elif self._order == False:
thelist.sort()
thelist.reverse()
return thelist
def items(self):
"""Like keys() but returns a list of (key, value)"""
return [(key, self._thedict[key]) for key in self.keys()]
def values(self):
"""Like keys() but returns an ordered list of values (ordered by key)"""
return [self._thedict[key] for key in self.keys()]
def fromkeys(cls, *args):
"""Return a new oDict initialised from the values supplied.
If sys.version_info > 2.2 this becomes a classmethod."""
return oDict(*args)
if (sys.version_info[0] + sys.version_info[1]/10.0) >= 2.2:
fromkeys = classmethod(fromkeys)
def __len__(self):
return len(self._thedict)
def __cmp__(self, other):
if hasattr(other, '_thedict'):
other = other._thedict
return cmp(self._thedict, other)
def __eq__(self, other):
if hasattr(other, '_thedict'):
other = other._thedict
return self._thedict.__eq__(other)
def __ne__(self, other):
if hasattr(other, '_thedict'):
other = other._thedict
return self._thedict.__ne__(other)
def __gt__(self, other):
if hasattr(other, '_thedict'):
other = other._thedict
return self._thedict.__gt__(other)
def __ge__(self, other):
if hasattr(other, '_thedict'):
other = other._thedict
return self._thedict.__ge__(other)
def __lt__(self, other):
if hasattr(other, '_thedict'):
other = other._thedict
return self._thedict.__lt__(other)
def __le__(self, other):
if hasattr(other, '_thedict'):
other = other._thedict
return self._thedict.__le__(other)
def __hash__(self):
"""This just raises a TypeError."""
self._thedict.__hash__()
def __iter__(self):
"""Return an ordered iterator for the oDict."""
return dIter(self)
def iteritems(self):
"""Return an ordered iterator over the the oDict - returning (key, value) tuples."""
return dIter(self, -1)
def iterkeys(self):
"""Return an ordered iterator over the keys the oDict."""
return dIter(self)
def itervalues(self):
"""Return an ordered iterator over the the values of the oDict - ordered by key."""
return dIter(self, 1)
def __str__(self):
"""An oDict version of __str__ """
return 'oDict(' + self._thedict.__str__() + ')'
############################################################################################
if __name__ == '__main__':
dictmethods = ['__class__', '__cmp__', '__contains__', '__delattr__', '__delitem__', '__doc__', '__eq__', '__ge__', '__getattribute__', '__getitem__', '__gt__', '__hash__', '__init__', '__iter__', '__le__', '__len__', '__lt__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__setitem__', '__str__', 'clear', 'copy', 'fromkeys', 'get', 'has_key', 'items', 'iteritems', 'iterkeys', 'itervalues', 'keys', 'pop', 'popitem', 'setdefault', 'update', 'values']
odict = oDict({'x' : 'a', 'y' : 'b', 'z' : 'c'})
print 'print oDict.__doc__ \n', oDict.__doc__
print
print
print 'Attribute Test.\nTesting against the full attribute list for a normal dictionary. (40 attributes)'
for entry in dictmethods:
if not hasattr(odict, entry):
print 'oDict doesn\'t have attribute \'%s\'' % entry
print 'See the docs as to why those attributes are missing !!'
print 'Method test.\nIf nothing prints below this then all the tests passed !\n'
dlist = []
for key in odict.iterkeys(): dlist.append(key)
if dlist != ['x', 'y', 'z']: print 'Order fail in iterkeys method.'
dlist = []
for value in odict.itervalues(): dlist.append(value)
if dlist != ['a', 'b', 'c']: print 'Order fail in itervalues method.'
dlist = []
for item in odict.iteritems(): dlist.append(item)
if dlist != [('x','a'), ('y','b'), ('z','c')]: print 'Order fail in iteritems method.'
if not odict.keys() == ['x', 'y', 'z']: print 'Order fail in keys method.'
if not odict.values() == ['a', 'b', 'c']: print 'Order fail in values method.'
if not odict.items() == [('x','a'), ('y','b'), ('z','c')]: print 'Order fail in items method.'
dlist = []
while odict:
dlist.append(odict.pop())
if len(dlist) > 4:
print 'Fail in pop to remove items'
break
if dlist != ['a', 'b', 'c']: print 'Order fail in pop method.'
if not odict.fromkeys({'test':'z', 'fish':4}, False) == oDict({'test':'z', 'fish':4}, False): print 'Odd behaviour in fromkeys method.'
"""
oDict is an ordered dictionary.
It behaves as a drop in replacement for an ordinary dictionary in almost every circumstance.
Many dictionary methods which normally return a random value, or return values in a randomn order.
Those methods in oDict return values in an ordered and consistent manner.
The ordering is applied in the keys() method and uses the Python sort() method of lists to do the sorting.
You can additionally set it to apply the reverse method by passing in a parameter when you create the instance.
See the oDict docstring for more details.
An ordered dictinary is useful where, for example, a consistent return order for the iterators and pop methods is helpful.
I use it in FSDM markup structures (describing files and directories in a file structure) so that the markup files are built in a consistent order.
Methods which are now ordered are :
pop, popitem, keys, items, values
iteritems, iterkeys, itervalues, __iter__ ( for key in odict )
As oDict has methods defined for almost all the dictionary methods, and also has custom iterators,
it would be a good template for anyone else who wanted to create a new dictionary type with custom access methods etc.
Doesn't subclass dict or use the iter function, so I think might be compatible with versions of Python pre 2.2 ?
Extra Methods, Not in a Normal dictionary :
'dict'
'pop' is slightly different to the normal dictionary method, it can be used without a parameter.
'str' and '__repr__' are modified to indicate this is an oDict rather than just a dictionary.
A lot of the methods that would return new dictionaries (copy, fromkeys) return new oDicts (hence the new dict method which returns an ordinary dictionary copy of the oDIct)
'Not Implemented Yet' Methods Include :
'__class__' : The default is fine.
'__getattribute__', '__setattr__', '__delattr__' : What the heck are these used for in a dict, probably raise errors. The standard 'classic' methods will be fine for us
'__new__' : not a new style class so don't need it
'__reduce__', '__reduce_ex__', : To do with pickling, I don't understand, hopefully Python can do something sensible anyway
The only time oDict won't act as a replacement for a dict is if the isinstance test is used.
In Python 2.2 you can fix this by making oDict a subclass of dict.
We could make oDict a bit more lightweight by overloading getattr.
Several methods that just call the same method on the underlying _thedict could all be replaced by one method that called getattr.
As one of the aims of oDict was to create a full dictionary like object, with all the methods defined, I won't do this. (__getitem__ and __setitem__ are two methods we could do away with).
TODO/ISSUES
CHANGELOG
05-07-04 Version 1.0.3
Fixed a bug in get.
Use some slightly more pythonic hasattr tests rather than isinstance.
Got rid of the slightly odd dum class.
Got rid of my suggested, nonsensical, __class__ stuff.
Changed the tests to use keys that would naturally be unordered !
20-06-04 Version 1.0.2
Slight change to the dum class, to give it a __cmp__ method.
17-06-04 Version 1.0.1
clear method is slightly better. (clears dictionary rather than rebinds)
Sorted out a few index errors where empty dictionaries are used. (raise KeyError rather than IndexError)
Made fromkeys a classmethod where Python Version > 2.2
16-06-04 Version 1.0.0
First version, appears to work fine.
"""
| {
"repo_name": "tschalch/pyTray",
"path": "src/util/ordereddict.py",
"copies": "1",
"size": "14100",
"license": "bsd-3-clause",
"hash": -3320459828320156000,
"line_mean": 39.7159763314,
"line_max": 491,
"alpha_frac": 0.5981560284,
"autogenerated": false,
"ratio": 4.086956521739131,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.518511255013913,
"avg_score": null,
"num_lines": null
} |
# 05.10.2007, c
# last revision: 25.02.2008
from __future__ import absolute_import
from sfepy import data_dir
from sfepy.mechanics.matcoefs import stiffness_from_lame
filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
# Whole domain $Y$.
region_1000 = {
'name' : 'Y',
'select' : 'all',
}
# Domain $Y_1$.
region_1 = {
'name' : 'Y1',
'select' : 'cells of group 1',
}
# Domain $Y_2$.
region_2 = {
'name' : 'Y2',
'select' : 'cells of group 2',
}
region_10 = {
'name' : 'Bottom',
'select' : 'vertices in (z < %f)' % -0.499,
'kind' : 'facet',
}
region_11 = {
'name' : 'Top',
'select' : 'vertices in (z > %f)' % 0.499,
'kind' : 'facet',
}
material_1 = {
'name' : 'solid',
'values' : {
'D' : stiffness_from_lame(3, 1e1, 1e0),
'density' : 1e-1,
},
}
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : 'vector',
'region' : 'Y',
'approx_order' : 1,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
ebc_1 = {
'name' : 'Fix',
'region' : 'Bottom',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.[0,1]' : 0.2, 'u.2' : 0.5},
}
lcbc_1 = {
'name' : 'rigid1',
'region' : 'Y2',
'dofs' : {'u.all' : None},
'kind' : 'rigid',
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'balance' : """dw_lin_elastic.i.Y( solid.D, v, u ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
from tests_basic import TestLCBC
output_name = 'test_lcbc_3d.vtk'
##
# 03.10.2007, c
class Test( TestLCBC ):
pass
| {
"repo_name": "lokik/sfepy",
"path": "tests/test_lcbc_3d.py",
"copies": "5",
"size": "1935",
"license": "bsd-3-clause",
"hash": 7444949809907624000,
"line_mean": 15.9736842105,
"line_max": 64,
"alpha_frac": 0.484754522,
"autogenerated": false,
"ratio": 2.3713235294117645,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5356078051411765,
"avg_score": null,
"num_lines": null
} |
# 05.10.2007, c
# last revision: 25.02.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/3d/special/cube_sphere.mesh'
# Whole domain $Y$.
region_1000 = {
'name' : 'Y',
'select' : 'all',
}
# Domain $Y_1$.
region_1 = {
'name' : 'Y1',
'select' : 'cells of group 1',
}
# Domain $Y_2$.
region_2 = {
'name' : 'Y2',
'select' : 'cells of group 2',
}
region_10 = {
'name' : 'Bottom',
'select' : 'vertices in (z < %f)' % -0.499,
'kind' : 'facet',
}
region_11 = {
'name' : 'Top',
'select' : 'vertices in (z > %f)' % 0.499,
'kind' : 'facet',
}
material_1 = {
'name' : 'solid',
'values' : {
'lam' : 1e1,
'mu' : 1e0,
'density' : 1e-1,
},
}
field_1 = {
'name' : '3_displacement',
'dtype' : 'real',
'shape' : 'vector',
'region' : 'Y',
'approx_order' : 1,
}
variable_1 = {
'name' : 'u',
'kind' : 'unknown field',
'field' : '3_displacement',
'order' : 0,
}
variable_2 = {
'name' : 'v',
'kind' : 'test field',
'field' : '3_displacement',
'dual' : 'u',
}
ebc_1 = {
'name' : 'Fix',
'region' : 'Bottom',
'dofs' : {'u.all' : 0.0},
}
ebc_2 = {
'name' : 'Load',
'region' : 'Top',
'dofs' : {'u.[0,1]' : 0.2, 'u.2' : 0.5},
}
lcbc_1 = {
'name' : 'rigid1',
'region' : 'Y2',
'dofs' : {'u.all' : None},
'kind' : 'rigid',
}
integral_1 = {
'name' : 'i',
'order' : 2,
}
equations = {
'balance' : """dw_lin_elastic_iso.i.Y( solid.lam, solid.mu, v, u ) = 0""",
}
solver_0 = {
'name' : 'ls',
'kind' : 'ls.scipy_direct',
}
solver_1 = {
'name' : 'newton',
'kind' : 'nls.newton',
'i_max' : 1,
'eps_a' : 1e-10,
}
from tests_basic import TestLCBC
output_name = 'test_lcbc_3d.vtk'
##
# 03.10.2007, c
class Test( TestLCBC ):
pass
| {
"repo_name": "RexFuzzle/sfepy",
"path": "tests/test_lcbc_3d.py",
"copies": "1",
"size": "1853",
"license": "bsd-3-clause",
"hash": -1226128184962196000,
"line_mean": 15.2543859649,
"line_max": 78,
"alpha_frac": 0.4624932542,
"autogenerated": false,
"ratio": 2.3575063613231553,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33199996155231554,
"avg_score": null,
"num_lines": null
} |
# 053 - Maximum Subarray (Easy)
# https://leetcode.com/problems/maximum-subarray/submissions/
# Use Kadane's Algorithm!
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Find the max subarray ending at each index.
# The max subarray ending at 0th index is just the value.
# What if array is empty?
# Apparently Leetcode judge isn't checking this case, the manual tester
# tells me that for input [] the result should be "-2147483648" but I
# return 0.
if len(nums) == 0:
return 0
# Now start computing...
max_local_subarrays = []
for idx in range(len(nums)):
value = nums[idx]
# The max subarray ending at current position will be either
# just the current value, or the current value + previous max
# local subarray.
if idx == 0:
max_local_subarrays.append(value)
else:
max_local_subarrays.append(max(value, value + max_local_subarrays[-1]))
return max(max_local_subarrays)
# Can also do it without extra arrays...
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Find the max subarray ending at each index.
# The max subarray ending at 0th index is just the value.
# What if array is empty?
# Apparently Leetcode judge isn't checking this case, the manual tester
# tells me that for input [] the result should be "-2147483648" but I
# return 0.
if len(nums) == 0:
return 0
# Now start computing...
max_local_subarray_sum = nums[0]
max_global_subarray_sum = nums[0]
for idx in range(1, len(nums)):
value = nums[idx]
# The max subarray ending at current position will be either
# just the current value, or the current value + previous max
# local subarray.
max_local_subarray_sum = max(max_local_subarray_sum + value, value)
max_global_subarray_sum = max(max_local_subarray_sum, max_global_subarray_sum)
return max_global_subarray_sum
| {
"repo_name": "zubie7a/Algorithms",
"path": "LeetCode/01_Easy/lc_053.py",
"copies": "1",
"size": "2298",
"license": "mit",
"hash": 740295592660026200,
"line_mean": 33.8181818182,
"line_max": 90,
"alpha_frac": 0.5857267189,
"autogenerated": false,
"ratio": 4.0174825174825175,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5103209236382518,
"avg_score": null,
"num_lines": null
} |
# 053. Maximum Subarray
# The D&C solution, however, python is not suitable for recursion.
import unittest
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0:
return 0
if len(nums) == 1:
return nums[0]
mid = len(nums) / 2
lmax = self.maxSubArray(nums[:mid])
rmax = self.maxSubArray(nums[mid:])
mmax = self.maxFromStart(nums[mid:]) + self.maxToEnd(nums[:mid])
return max(lmax, rmax, mmax)
def maxFromStart(self, nums):
if nums == []:
return 0
return max(nums[0], nums[0] + self.maxFromStart(nums[1:]))
def maxToEnd(self, nums):
if nums == []:
return 0
return max(nums[-1], nums[-1] + self.maxToEnd(nums[:-1]))
class SolutionUnitTest(unittest.TestCase):
def setup(self):
pass
def tearDown(self):
pass
def testMaxSubArray(self):
s = Solution()
self.assertEqual(s.maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4]), 6)
self.assertEqual(s.maxSubArray([-2, 1]), 1)
self.assertEqual(s.maxSubArray([-1]), -1)
self.assertEqual(s.maxSubArray([-2, 1, -3, 4, -1, 2, 1, -5, 4, -2, 1,
-3, 4, -1, 2, 1, -5, 4, -2, 1, -3, 4, -1, 2, 1, -5, 4, -2, 1, -3,
4, -1, 2, 1, -5, 4]), 9)
if __name__ == '__main__':
unittest.main()
| {
"repo_name": "hanlin-he/UTD",
"path": "leetcode/py/053.dc.py",
"copies": "1",
"size": "1451",
"license": "mit",
"hash": -8517537093766426000,
"line_mean": 27.4509803922,
"line_max": 77,
"alpha_frac": 0.5127498277,
"autogenerated": false,
"ratio": 3.0483193277310923,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9040736284960207,
"avg_score": 0.004066574094177061,
"num_lines": 51
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.