code
stringlengths 20
13.2k
| label
stringlengths 21
6.26k
|
---|---|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4 # !/usr/bin/python
5 # Filename: str_format.py
6 age = 25
7 name = 'Swaroop'
8 print (' {0} is {1} years old'.format(name, age))
9 print('Why is {0} playing with that python?'.format(name))
10
11 print('{:3}'.format('hello'))
12 print('{:<11}'.format('hello'))
13 print('{:>11}'.format('hello'))
14 print('{:>15}'.format('huaibizai'))
15 print('{:^11}'.format('hello'))
16 print('{:^11}'.format('bizi'))
17 print ('{:_>11}'.format('hello'))
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2
3 from lxml import etree
4
5 __author__ = 'florije'
6
7 tree = etree.parse('citylist.xml')
8 root = tree.getroot()
9 for subroot in root:
10 for field in subroot:
11 print 'number:', field.get('d1'), 'name_cn:', field.get('d2'), 'name_en:', field.get(
12 'd3'), 'prov_name:', field.get('d4')
| 11 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 """
5 练习双引号使用
6 """
7
8 print("Kate Austen")
9 print("123 Full Circle Drive")
10 print("Asheville, NC 28899")
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 from Crypto.Hash import SHA512
5
6 import cgi, base64
7 from Crypto.PublicKey import RSA
8 from Crypto.Signature import PKCS1_v1_5
9 from Crypto.Hash import SHA
10
11 h = SHA512.new()
12
13 h.update(b'Hello')
14 print h.hexdigest()
15
16
17 """
18 先生成rsa的公私钥:
19 打开控制台,输入 openssl
20 再输入 genrsa -out private.pem 1024 来生成私钥
21 接着输入 rsa -in private.pem -pubout -out public.pem 来生成公钥
22 """
23
24 # 私钥文件
25 priKey = """-----BEGIN RSA PRIVATE KEY-----
26 MIICXgIBAAKBgQD4c1uYjQfxsxD/RWLWYPKUN1QPWIp1Vu/K3Do6DFh2tyhnN8PD
27 iMzNoIKh5f6QtRU45uHzQtw+WSVCks6hJvAWUAmPZEk7T0Wfl9yny4vJkkbgA5/E
28 UIVvHDkyLC5qcDly0n/wC/SF+NC4QGi+N8MTThseMcf06eBpEq2mpXpawwIDAQAB
29 AoGBALzvjOJPrZDiabSWYXlBtHd+M9CPtotRF32fSDBInyV4V6NWxup1p7lfrLfN
30 nW8SJhdljMJfP/mx9SHRXo0yfTQa/0X/bBF73Q6Bg0kNGA8SSSvrYTeh9SPTqrae
31 a6R2Y8WEvtcnTa4NE1fNE00kYjSGpC8Iit5dkYTZ5dBY0CjBAkEA//8D0HMnSO1E
32 iEL0AyZQyAWnkWOTVHBhKz4qGaJBo2tyUt8WcLyxUD2Wi31ni3EdGk1UO+QhRIPC
33 6bOkn9TA0QJBAPh0UFkyLG846uTvv5haUG9QfExnLgpeUHotc4u1k6RbiZfHapzt
34 8DKByS3+nDGuWD8KCHrgzT5SSpuH1FAgJ1MCQQC8M4plVFNcXPsWRkrIigG3m9ie
35 nZsx59C4DuK6p7wj3ZlV7aa8ySx+dljYQiC+tjEUJie4RDZk/Y1tbOGpk6sRAkEA
36 pc8yJCTI5L0uffTGf82eKnujSHX/kunYeYFFwHJAgwqX69QpAWwFxh85fNmTsdAx
37 kniGqkLGlpXitqNSfNrIgwJAJ6crblQCX9M+8QzgeYP3g+/DD6k8t5BPlNcA1IPs
38 A2w8mlNwTNuDzKqP17yyBBVivDu0sYYczSiOlvg0xhkLig==
39 -----END RSA PRIVATE KEY-----"""
40
41 # 公钥文件
42 pubKey = """-----BEGIN PUBLIC KEY-----
43 MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQD4c1uYjQfxsxD/RWLWYPKUN1QP
44 WIp1Vu/K3Do6DFh2tyhnN8PDiMzNoIKh5f6QtRU45uHzQtw+WSVCks6hJvAWUAmP
45 ZEk7T0Wfl9yny4vJkkbgA5/EUIVvHDkyLC5qcDly0n/wC/SF+NC4QGi+N8MTThse
46 Mcf06eBpEq2mpXpawwIDAQAB
47 -----END PUBLIC KEY-----"""
48
49 '''*RSA签名
50 * data待签名数据
51 * 签名用商户私钥,必须是没有经过pkcs8转换的私钥
52 * 最后的签名,需要用base64编码
53 * return Sign签名
54 '''
55
56
57 def sign(data):
58 key = RSA.importKey(priKey)
59 h = SHA.new(data)
60 signer = PKCS1_v1_5.new(key)
61 signature = signer.sign(h)
62 return base64.b64encode(signature)
63
64
65 '''*RSA验签
66 * data待签名数据
67 * signature需要验签的签名
68 * 验签用支付宝公钥
69 * return 验签是否通过 bool值
70 '''
71
72
73 def verify(data, signature):
74 key = RSA.importKey(pubKey)
75
76 h = SHA.new(data)
77 verifier = PKCS1_v1_5.new(key)
78 if verifier.verify(h, base64.b64decode(signature)):
79 return True
80 return False
81
82
83 raw_data = 'partner="2088701924089318"&seller="774653@qq.com"&out_trade_no="123000"&subject="123456"&body="2010新款NIKE 耐克902第三代板鞋 耐克男女鞋 386201 白红"&total_fee="0.01"¬ify_url="http://notify.java.jpxx.org/index.jsp"'
84 sign_data = sign(raw_data)
85 print "sign_data: ", sign_data
86 print verify(raw_data, sign_data)
| 14 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 '''
5 1.输入20个数
6 2.收入一个列表
7 3.按正负数分入两个列表
8 '''
9
10 source_list = []
11 positive_list = []
12 negative_list = []
13
14 for i in range(20):
15 num = int(input('Please input the number:'))
16 print(num)
17 source_list.append(num)
18
19 print('source list data is:')
20 print(source_list)
21
22 for item in source_list:
23 if item >= 0:
24 positive_list.append(item)
25 else:
26 negative_list.append(item)
27
28 print('positive data is:')
29 print(positive_list)
30 print('negative data is:')
31 print(negative_list)
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 import re
5 import json
6 import time
7 import random
8 import requests
9 import datetime
10 import requesocks
11 from bs4 import BeautifulSoup
12
13
14 def test_url_available(url):
15 proxies = {
16 'http': 'http://{url}'.format(url=url),
17 # 'http': 'http://222.88.142.51:8000',
18 }
19 try:
20 res = requests.get('http://www.luoo.net/', proxies=proxies)
21 except Exception as e:
22 return False
23 return res.status_code == 200
24
25 available_url = 'http://pachong.org/test/single/id/{id}'
26
27 proxies = {
28 "http": "socks5://127.0.0.1:1080",
29 "https": "socks5://127.0.0.1:1080"
30 }
31 user_agent = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36"
32 headers = {
33 "User-Agent": user_agent,
34 "Host": "pachong.org",
35 "Referer": "http://pachong.org/"
36 }
37
38
39 def get_var_list(page_content):
40 # res = 'var chick=958+2894;var bee=6981+6600^chick;var cock=8243+890^bee;var calf=8872+4397^cock;var fish=3571+2760^calf;'
41 # var dog=7544+4134;var worm=3554+5240^dog;var goat=1408+5265^worm;var cat=1981+8542^goat;var fish=7827+5906^cat;
42 # print page_content
43 pattern = re.compile(r'var \w+=\S+;var \w+=\S+;var \w+=\S+;var \w+=\S+;var \w+=\S+;')
44 match = pattern.search(page_content)
45 if match:
46 print match.group()
47 content = match.group()[:-1]
48 var_list = content.split(';') # ['var chick=958+2894']
49
50 map_dict = {}
51
52 # one
53 raw_one = var_list[0] # 'var chick=958+2894'
54 left_raw_one, right_raw_one = raw_one.split('=')[0], raw_one.split('=')[1]
55 ont_var = left_raw_one.split(' ')[1]
56 one_value = sum([int(value) for value in right_raw_one.split('+')])
57 print ont_var, one_value
58 map_dict[ont_var] = one_value
59
60 # two
61 raw_two = var_list[1] # 'var bee=6981+6600^chick'
62 left_raw_two, right_raw_two = raw_two.split('=')[0], raw_two.split('=')[1]
63 two_var = left_raw_two.split(' ')[1]
64 two_value = int(right_raw_two.split('+')[0]) + int(right_raw_two.split('+')[1].split('^')[0]) ^ map_dict.get(right_raw_two.split('+')[1].split('^')[1])
65 print two_var, two_value
66 map_dict[two_var] = two_value
67
68 # three
69 raw_three = var_list[2] # 'var cock=8243+890^bee'
70 left_raw_three, right_raw_three = raw_three.split('=')[0], raw_three.split('=')[1]
71 three_var = left_raw_three.split(' ')[1]
72 three_value = int(right_raw_three.split('+')[0]) + int(right_raw_three.split('+')[1].split('^')[0]) ^ map_dict.get(right_raw_three.split('+')[1].split('^')[1])
73 print three_var, three_value
74 map_dict[three_var] = three_value
75
76 # four
77 raw_four = var_list[3] # var calf=8872+4397^cock;
78 left_raw_four, right_raw_four = raw_four.split('=')[0], raw_four.split('=')[1]
79 four_var = left_raw_four.split(' ')[1]
80 four_value = int(right_raw_four.split('+')[0]) + int(right_raw_four.split('+')[1].split('^')[0]) ^ map_dict.get(right_raw_four.split('^')[1])
81 print four_var, four_value
82 map_dict[four_var] = four_value
83
84 # five
85 raw_five = var_list[4] # 'var fish=3571+2760^calf'
86 left_raw_five, right_raw_five = raw_five.split('=')[0], raw_five.split('=')[1]
87 five_var = left_raw_five.split(' ')[1]
88 five_value = int(right_raw_five.split('+')[0]) + int(right_raw_five.split('+')[1].split('^')[0]) ^ map_dict.get(right_raw_five.split('^')[1])
89 print five_var, five_value
90 map_dict[five_var] = five_value
91
92 return map_dict
93
94
95 def get_urls():
96
97 session = requesocks.Session(headers=headers, proxies=proxies)
98 page_content = session.get('http://pachong.org/').content
99 # print(page_content.content)
100
101 soup = BeautifulSoup(page_content)
102 # print soup.find("table", {"class": "tb"})
103 # var chick=958+2894;var bee=6981+6600^chick;var cock=8243+890^bee;var calf=8872+4397^cock;var fish=3571+2760^calf;
104
105 map_dict = get_var_list(page_content)
106
107 table = soup.find('table', attrs={'class': 'tb'})
108 table_body = table.find('tbody')
109 rows = table_body.find_all('tr')
110 data = []
111 for row in rows:
112 cols = row.find_all('td')
113
114 # url_res = session.get(available_url.format(id=cols[6].find('a').get('name'))).content
115 # json_url_res = json.loads(url_res)
116 json_url_res = {'ret': 0, 'data': {'all': 0}}
117 if json_url_res.get('ret') == 0:
118 raw_port = cols[2].text.strip() # document.write((11476^fish)+298);
119 port = raw_port[raw_port.index('(') + 1: raw_port.index(';') - 1] # (11476^fish)+298
120
121 port = (int(port.split('^')[0][1:]) ^ map_dict.get(port[port.index('^') + 1: port.index(')')])) + int(port.split('+')[1])
122
123 tmp = {'id': cols[0].text.strip(), 'ip': cols[1].text.strip(), 'port': port,
124 'url_id': cols[6].find('a').get('name'), 'con_time': json_url_res.get('data').get('all'), 'region': cols[3].text.strip()}
125 data.append(tmp)
126 print(tmp)
127 # time.sleep(random.randint(3, 7))
128
129 # cols = [ele.text.strip() for ele in row.find_all('td')]
130 # data.append([ele for ele in cols if ele])
131
132 return data
133
134
135 def save_data(json_data):
136 with open('data_{date}.json'.format(date=datetime.datetime.now().strftime('%Y%m%d%H%M%S')), mode='w') as f:
137 f.write(json_data)
138
139
140 if __name__ == '__main__':
141 res = get_urls()
142 data = []
143 for item in res:
144 if u'调试' not in item.get('region'):
145 if test_url_available('{ip}:{port}'.format(ip=item.get('ip'), port=item.get('port'))):
146 data.append(item)
147 save_data(json.dumps(data))
| 46 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 from Crypto.Cipher import AES
5 # Encryption
6 encryption_suite = AES.new('This is a key123', AES.MODE_CBC, 'This is an IV456')
7 cipher_text = encryption_suite.encrypt("The answer is no")
8 print cipher_text['ciphertext']
9
10 # Decryption
11 decryption_suite = AES.new('This is a key123', AES.MODE_CBC, 'This is an IV456')
12 plain_text = decryption_suite.decrypt(cipher_text)
13 print plain_text
| 8 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 '''输入3个整数,列表,求最小值'''
5 aint = int(input('please input a int:'))
6 bint = int(input('please input a int:'))
7 cint = int(input('please input a int:'))
8 alst = [aint, bint,cint]
9 print(min(alst))
10
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 """
5 根据用户输入打印正三角,比如用户输入3打印如下:
6
7 *
8
9 *
10 * *
11
12 *
13 * *
14 * * *
15
16 打印菱形
17
18
19 """
20
21
22 # 1. 三角形
23 # def show_triangle(num):
24 # """
25 # print triangle
26 # :param num:
27 # :return:
28 # """
29 # for i in range(num):
30 # # print('i%s' % i)
31 # print(' ' * (num - i - 1), end='')
32 # for j in range(i + 1):
33 # print('*', end=' ')
34 # print()
35 #
36 # if __name__ == '__main__':
37 # num = int(input('Please input the number:'))
38 # show_triangle(num)
39
40
41 # 2. 菱形
42 def show_diamond(num):
43 """
44 print diamond
45 :param num:
46 :return:
47 """
48 # 菱形的上半部分
49 for i in range(num):
50 print(' ' * (num - i) + '*' * (2 * i + 1))
51 # 菱形的正中
52 print('*' * (2 * num + 1))
53 # 菱形的下半部分
54 for i in range(num):
55 print(' ' * (i + 1) + '*' * (2 * (num - i - 1) + 1))
56
57
58
59 if __name__ == '__main__':
60 num = int(input('Please input the number:'))
61 show_diamond(num)
| 42 - warning: redefined-outer-name
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4 print 'Simple Assignment'
5 shoplist = ['apple', 'mango', 'carrot', 'banana']
6
7 mylist = shoplist
8
9 del shoplist[0]
10
11 print 'shoplist is', shoplist, id(shoplist)
12 print 'mylist is', mylist, id(mylist)
13
14 print 'Copy by making a full slice'
15 mylist = shoplist[:]
16
17 del mylist[0]
18
19 print 'shoplist is', shoplist, id(shoplist)
20 print 'mylist is', mylist, id(mylist)
| 4 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 min_salary = 30000.0
4 min_years = 2
5
6 salary = float(input('Enter your annual salary:'))
7 years_on_job = int(input('Enter the number of ' +
8 'years employed:'))
9 if salary >= min_salary and years_on_job >= min_years:
10 print('You qualify for the loan.')
11 else:
12 print('You do not qualify for this loan.')
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 """
5 遍历文件夹jsons,获取所有文件内容,并保存文件title到一个list里面并打印。
6 """
7
8 import os
9 import json
10
11 base_path = '{base_path}\\jsons'.format(base_path=os.getcwd())
12
13
14 def get_content(filename):
15 """
16 从filename读取数据
17 :param filename:
18 :return:
19 """
20 result = ''
21 with open(filename) as f:
22 for line in f:
23 result += line
24 return result
25
26
27 def get_json_data():
28 """
29 获取json数据的title的list
30 :param content:
31 :return: res_list
32 """
33 res_list = []
34 for item in os.listdir(base_path): # '.\\jsons'
35 res = get_content('{base_path}\\{filename}'.format(base_path=base_path, filename=item))
36 json_res = json.loads(res)
37 res_list.extend(data.get('title') for data in json_res)
38 return res_list
39
40
41 def print_data(res_list):
42 """
43
44 :param res_list:
45 :return:
46 """
47 for title in res_list:
48 print(title)
49
50
51 if __name__ == '__main__':
52 res_list = get_json_data()
53 print_data(res_list)
| 21 - warning: unspecified-encoding
33 - warning: redefined-outer-name
41 - warning: redefined-outer-name
|
1 # -*- coding: utf-8 -*-
2
3 import jpype
4
5 __author__ = 'florije'
6
7 lib_base_path = r'E:\fuboqing\files\company\po_upnp\libs'
8 jar_list = ['commons-codec-1.10.jar', 'log4j-1.2.17.jar', 'chinapaysecure.jar', 'bcprov-jdk15on-154.jar']
9 class_path = ';'.join(['%s\%s' % (lib_base_path, jar) for jar in jar_list])
10
11 jpype.startJVM(jpype.getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % class_path)
12
13 secssUtil = jpype.JClass('com.chinapay.secss.PythonDelegate')()
14 prop = jpype.JClass('java.util.Properties')()
15 # jpype.java.lang.System.out.println("hello world")
16
17 configs_dict = {'sign.file.password': 'abcd1234', 'sign.cert.type': 'PKCS12', 'sign.invalid.fields': 'Signature,CertId',
18 'verify.file': 'E:\\fuboqing\\files\\company\\po_upnp\\pfx\\000001509184450.cer',
19 'sign.file': 'E:\\fuboqing\\files\\company\\po_upnp\\pfx\\000001509184450.pfx', 'log4j.name': 'cpLog',
20 'signature.field': 'Signature'}
21
22 for key, value in configs_dict.iteritems():
23 prop.setProperty(key, value)
24
25 try:
26 secssUtil.PyInit(prop)
27 if secssUtil.getErrCode() != '00':
28 print(secssUtil.getErrMsg())
29 except jpype.JavaException as e:
30 raise e
31
32 data = {'Version': '20140728', 'OrderAmt': str(1L), 'TranDate': '20160114', 'BusiType': '0001',
33 'MerBgUrl': 'http://182.48.115.36:8443/upnp/payNotify', 'MerPageUrl': 'http://182.48.115.35:20060/pay-success',
34 'MerOrderNo': '2016844500000009', 'TranTime': '170733', 'CurryNo': 'CNY', 'MerId': '000001509184450'}
35 data_map = jpype.JClass("java.util.HashMap")()
36 for key, value in data.iteritems():
37 data_map.put(key, value)
38
39 try:
40 secssUtil.PySign(data_map)
41 except jpype.JavaException as e:
42 print(e.message())
43
44 res = secssUtil.getSign()
45 print res
46
47 jpype.shutdownJVM()
| 32 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 # this program displays a person's name and address
5 print('Kate')
6 print('123 Full circle Drive')
7 print('Asheville, NC 28899')
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4 tabby_cat = "\tI'm tabbed in."
5 persian_cat = "I'm split non a line."
6 backslash_cat = "I'm \\ a \\cat."
7
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4 # Filename: mymodule_demo.py
5
6 import mymodule
7
8 mymodule.say_hi()
9 print('Version', mymodule.__version__)
10
11
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4
5 def pydanny_selected_numbers():
6 # If you multiple 9 by any other number you can easily play with
7 # numbers to get back to 9.
8 # Ex: 2 * 9 = 18. 1 + 8 = 9
9 # Ex: 15 * 9 = 135. 1 + 3 + 5 = 9
10 # See https://en.wikipedia.org/wiki/Digital_root
11 yield 9
12
13 # A pretty prime.
14 yield 31
15
16 # What's 6 * 7?
17 yield 42
18
19 # The string representation of my first date with Audrey Roy
20 yield "2010/02/20"
21
22
23 if __name__ == '__main__':
24 for item in pydanny_selected_numbers():
25 print item
| 25 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 """
5 1.判断用户输入的年月日,然后判断这一天是当年的第几天。
6 2.随机生成20个10以内的数字,放入数组中,然后去掉重复的数字。然后打印数组。
7 """
8
9
10 # 根据出生年先判断是平年还是闰年,得出2月份天数,各月份天数相加即可。
11
12
13
14
15 # year = int(input("请输入年份: "))
16 # march = int(input("请输入月份: "))
17 # days = int(input("请输入日子: "))
18 # def feb_days():
19 # for i in range():
20 # if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
21 # xadct.valus({'feb': 28})
22 # days = days + xadct.valus()
23 # return(days)
24 # else:
25 # return(days + 1)
26
27
28 def is_leap(year):
29 """
30 判断是否是瑞年
31 :param year:
32 :return:
33 """
34 if year % 4 == 0 and year % 100 != 0 or year % 400 == 0:
35 return True
36 else:
37 return False
38
39
40 def print_days_of_year(year, month, day):
41 """
42 打印当前天是这一年的第几天
43 :param year:
44 :param month:
45 :param day:
46 :return:
47 """
48 month_days = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
49 if is_leap(year):
50 month_days[2] = 29
51 full_month_day_count = 0
52 for i in range(1, month):
53 full_month_day_count += month_days[i]
54 total_days = full_month_day_count + day
55 print(total_days)
56
57 if __name__ == '__main__':
58 year = int(input('Please input year:'))
59 month = int(input('Please input month:'))
60 day = int(input('Please input day:'))
61 print_days_of_year(year, month, day)
| 28 - warning: redefined-outer-name
34 - refactor: simplifiable-if-statement
34 - refactor: no-else-return
40 - warning: redefined-outer-name
40 - warning: redefined-outer-name
40 - warning: redefined-outer-name
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 room =1002
5 print('I am staying in room number')
6 print(room)
7
8 room = 503
9 print('I am staying in room number', room)
10
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4 for i in range(1, 5, 2):
5 print(i)
6 else:
7 print('The for loop is over')
8
9 for i in range(1, 5):
10 print(i)
11 else:
12 print('The for loop is over')
| 6 - warning: useless-else-on-loop
11 - warning: useless-else-on-loop
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 first_name = 'ww'
5 last_name = 'rr'
6 print('I am a teacher', \
7 first_name, \
8 last_name)
9
10 print('a', 'b', 'c', end=';')
11 print('a', 'b', 'c', sep='**')
12
13 print(1, 2, 3, 4, sep='')
14 print('****\n*\n****\n* *\n****')
15 print('****')
16 print('*')
17 print('* *')
18 print('* *')
19
20
21 print('你好'.encode())
22 # print('你好'.decode())
23 test_dict = {'a': 3}
24 abc = test_dict.get('b')
25 print('~~~~~~~~')
26 print(abc)
27
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 """
5 <script type="text/javascript">var fish=1734+473;var frog=435+2716^fish;var seal=168+8742^frog;var chick=7275+2941^seal;var snake=3820+4023^chick;</script>
6 以上script脚本提取变量为字典。
7 """
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4
5 class Person:
6 def __init__(self, name):
7 self.name = name
8
9 def say_hi(self):
10 print 'Hello, my name is', self.name
11
12
13 p = Person('Swaroop')
14 p.say_hi()
| 10 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 """
5 2.2 输入,执行,输出
6
7 """
8
9 print('Hello world')
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4
5 def say(message, times=1):
6 print(message * times)
7
8
9 say('Hello')
10 say('World', 5)
11
12
13 def say(message, times=2):
14 print(message * times)
15
16
17 say('Hello', 2)
18 say('World', 4)
19
20
21 def say(message, times=1):
22 print(message * times)
23
24
25 say('Hate')
26 say('You', 1314)
| 13 - error: function-redefined
21 - error: function-redefined
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 import base64
5
6 from Crypto.Cipher import AES
7 from pkcs7 import PKCS7Encoder
8
9 # 使用256位的AES,Python会根据传入的Key长度自动选择,长度为16时使用128位的AES
10 key = 'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
11 mode = AES.MODE_CBC
12 iv = '1234567812345678' # AES的CBC模式使用IV
13
14 encoder = PKCS7Encoder()
15 text = "This is for test."
16
17 def encrypt(data):
18 encryptor = AES.new(key, AES.MODE_CBC, iv)
19 padded_text = encoder.encode(data)
20 encrypted_data = encryptor.encrypt(padded_text)
21
22 return base64.b64encode(encrypted_data)
23
24 def decrypt(data):
25 cipher = base64.b64decode(data)
26 decryptor = AES.new(key, AES.MODE_CBC, iv)
27 plain = decryptor.decrypt(cipher)
28
29 return encoder.decode(plain)
30
31 encrypted_text = encrypt(text)
32 clean_text = decrypt(encrypted_text)
33
34 print "encrypted_text:", encrypted_text
35 print "clean_text: ", clean_text | 34 - error: syntax-error
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4
5 def say_hi():
6 print('Hi,this is mymodule speaking.')
7
8
9 __version__ = '0.1'
10
11
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4
5 class Person:
6 pass
7
8
9 p = Person
10 print(p)
| 5 - refactor: too-few-public-methods
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 start_speed = 60 # Starting speed
4 end_speed = 131 # Ending speed
5 increment = 10 # Speed increment
6 conversion_factor = 0.6214 # Conversation factor
7 print('KPH\tMPH')
8 print('--------')
9
10 for kph in range(start_speed, end_speed, increment):
11 mph = kph * conversion_factor
12 print(kph, '\t', format(mph, '.1f'))
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 """
5 2.1 设计程序流程
6 设计程序-写代码-更正语法错误-测试程序-更正逻辑错误-继续循环
7
8 流程图的概念,稍后搜索学习。
9 """
10
11
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 first_name = 'Kathryn'
5 last_name = 'Marino'
6 print(first_name, last_name)
7 print(first_name, last_name)
8
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4
5 def reverse(text):
6 return text[::-1]
7
8
9 def is_palindrome(text):
10 return text == reverse(text)
11
12
13 something = raw_input("Enter input: ")
14 if is_palindrome(something):
15 print "Yes,it is a palindrome"
16 else:
17 print"No,it is not a palindrome"
| 15 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 print('This program displays a list of number')
4 print('and their squares.')
5 start = int(input('Enter the starting number: '))
6 end = int(input('How high should I go?'))
7 print()
8 print('Number\tSquare')
9 print('--------------')
10 for number in range(start, end + 1):
11 square = number ** 2
12 print(number, '\t', square)
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 # Program 2-1 练习单引号使用
5 print('Kate Austen')
6 print('123 Full Circle Drive')
7 print('Asheville, NC 28899')
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2
3 from Crypto.Cipher import AES
4 from Crypto import Random
5 from binascii import b2a_hex, a2b_hex
6
7 __author__ = 'florije'
8
9
10 class prpcrypt():
11 def __init__(self, key):
12 self.key = key
13 self.mode = AES.MODE_CBC
14
15 # 加密函数,如果text不足16位就用空格补足为16位,
16 # 如果大于16当时不是16的倍数,那就补足为16的倍数。
17 def encrypt(self, text):
18 cryptor = AES.new(self.key, self.mode, b'0000000000000000')
19 # 这里密钥key 长度必须为16(AES-128),
20 # 24(AES-192),或者32 (AES-256)Bytes 长度
21 # 目前AES-128 足够目前使用
22 length = 16
23 count = len(text)
24 if count < length:
25 add = (length - count)
26 # \0 backspace
27 text = text + ('\0' * add)
28 elif count > length:
29 add = (length - (count % length))
30 text = text + ('\0' * add)
31 self.ciphertext = cryptor.encrypt(text)
32 # 因为AES加密时候得到的字符串不一定是ascii字符集的,输出到终端或者保存时候可能存在问题
33 # 所以这里统一把加密后的字符串转化为16进制字符串
34 return b2a_hex(self.ciphertext)
35
36 # 解密后,去掉补足的空格用strip() 去掉
37 def decrypt(self, text):
38 cryptor = AES.new(self.key, self.mode, b'0000000000000000')
39 plain_text = cryptor.decrypt(a2b_hex(text))
40 return plain_text.rstrip('\0')
41
42
43 if __name__ == '__main__':
44 # pc = prpcrypt('keyskeyskeyskeys')
45 # import sys
46 #
47 # e = pc.encrypt(sys.argv[1])
48 # d = pc.decrypt(e)
49 # print "加密:", e
50 # print "解密:", d
51
52 key = '0123456789abcdef'
53 mode = AES.MODE_CBC
54 iv = Random.new().read(AES.block_size)
55 encryptor = AES.new(key, mode, iv)
56 text = 'j' * 64
57 ciphertext = encryptor.encrypt(text)
58 print ciphertext
59
60 """
61 上例中的key是16位, 还可以是24 或 32 位长度, 其对应为 AES-128, AES-196 和 AES-256.
62 解密则可以用以下代码进行:
63 """
64
65 decryptor = AES.new(key, mode, iv)
66 plain = decryptor.decrypt(ciphertext)
67 print plain
| 58 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 import time
5 import requests
6 from bs4 import BeautifulSoup
7
8
9 def sign_daily(username, password):
10 username = username # username
11 password = password # password
12 singin_url = 'https://v2ex.com/signin'
13 home_page = 'https://www.v2ex.com'
14 daily_url = 'https://www.v2ex.com/mission/daily'
15
16 user_agent = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36"
17
18 headers = {
19 "User-Agent": user_agent,
20 "Host": "www.v2ex.com",
21 "Referer": "https://www.v2ex.com/signin",
22 "Origin": "https://www.v2ex.com"
23 }
24
25 v2ex_session = requests.Session()
26
27 def make_soup(url, tag, name):
28 page = v2ex_session.get(url, headers=headers, verify=True).text
29 soup = BeautifulSoup(page)
30 soup_result = soup.find(attrs={tag: name})
31 return soup_result
32
33 once_vaule = make_soup(singin_url, 'name', 'once')['value']
34 print(once_vaule) # will show you the once
35
36 post_info = {
37 'u': username,
38 'p': password,
39 'once': once_vaule,
40 'next': '/'
41 }
42
43 resp = v2ex_session.post(singin_url, data=post_info, headers=headers, verify=True)
44
45 short_url = make_soup(daily_url, 'class', 'super normal button')['onclick']
46
47 first_quote = short_url.find("'")
48 last_quote = short_url.find("'", first_quote + 1)
49 final_url = home_page + short_url[first_quote + 1:last_quote]
50
51 page = v2ex_session.get(final_url, headers=headers, verify=True).content
52
53 suceessful = make_soup(daily_url, 'class', 'fa fa-ok-sign')
54 if suceessful:
55 print ("Sucessful.")
56 else:
57 print ("Something wrong.")
58
59
60 if __name__ == '__main__':
61 user_list = [{'username': 'florije', 'password': 'fuboqing'}, {'username': 'vob636', 'password': 'fuboqing1988'},
62 {'username': 'moioutoi@163.com', 'password': '19931221'}]
63 for user in user_list:
64 sign_daily(user.get('username'), user.get('password'))
65 time.sleep(60)
| 9 - refactor: too-many-locals
10 - warning: self-assigning-variable
11 - warning: self-assigning-variable
43 - warning: unused-variable
51 - warning: unused-variable
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4 import sys
5
6 print('The command line arguments are:')
7 for i in sys.argv:
8 print i
9
10 print '\n\nThe PRTHONPATH is', sys.path, '\n'
11
12
13
| 8 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 aint = int(input('please input a int:'))
5 bint = int(input('please input a int:'))
6 cint = int(input('please input a int:'))
7 dint = int(input('please input a int:'))
8 fint = int(input('please input a int:'))
9 gint = int(input('please input a int:'))
10 hint = int(input('please input a int:'))
11 lint = int(input('please input a int:'))
12 mint = int(input('please input a int:'))
13 nint = int(input('please input a int:'))
14
15 alst = [aint, bint, cint, dint, fint, gint, hint, lint, mint, nint]
16
17 print(sum(alst))
18 print(len(alst))
19 print(sum(alst)/len(alst))
20
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 high_score = 95
4 test1 = int(input('Enter the score for test 1:'))
5 test2 = int(input('Enter the score for test 2:'))
6 test3 = int(input('Enter the score for test 3:'))
7 average = (test1 + test2 + test3) / 3
8 print('The average score is', average)
9 if average >= high_score:
10 print('Congratulations!')
11 print('That is a great average!')
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 num1 = 127.899
4 num2 = 3465.148
5 num3 = 3.776
6 num4 = 264.821
7 num5 = 88.081
8 num6 = 799.999
9
10 print(format(num1, '7.2f'))
11 print(format(num2, '7.2f'))
12 print(format(num3, '7.2f'))
13 print(format(num4, '7.2f'))
14 print(format(num5, '7.2f'))
15 print(format(num6, '7.2f'))
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 # This program demonstrates a variable.
5 room = 503
6 print('I am staying in room number.')
7 print(room)
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 amount_due = 5000.0
4 monthly_payment = amount_due / 12
5 # \可加可不加
6 print('The monthly payment is ' +\
7 format(monthly_payment, '.2f'))
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 '''
4 总科目状态:1,未通过,2,补考,3,通过
5 1.输入两门成绩
6 2.第一门成绩大于60分即通过,不及格则未通过
7 3.第一门成绩及格而第二门成绩不及格,则显示‘补考’。
8 '''
9 '''
10 x = int(input('输入第一门课程成绩:'))
11 y = int(input('输入第二门课程成绩:'))
12
13 status = 0 # 总科目状态,0定义为未知。
14
15 if x >= 60:
16 if y < 60:
17 status = 2
18 else:
19 status = 3
20 else:
21 status = 1
22
23 if status == 1:
24 print('未通过')
25 elif status == 2:
26 print('补考')
27 elif status == 3:
28 print('通过')
29 '''
30
31 x = int(input('输入第一门课程成绩:'))
32 y = int(input('输入第二门课程成绩:'))
33 if x >= 60:
34 if y >= 60:
35 print('通过')
36 else:
37 print('补考')
38 else:
39 print('未通过')
40
| 9 - warning: pointless-string-statement
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 function_class = (lambda x: x).__class__
5 print(function_class)
6
7
8 def foo():
9 print("hello world.")
10
11
12 def myprint(*args, **kwargs):
13 print("this is my print.")
14 print(*args, **kwargs)
15
16 newfunc1 = function_class(foo.__code__, {'print': myprint})
17 newfunc1()
18
19 newfunc2 = function_class(compile("print('asdf')", "filename", "single"), {'print': print})
20 newfunc2()
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4
5 def total(initial=5, *numbers, **keywords):
6 count = initial
7 for number in numbers:
8 count += number
9 for key in keywords:
10 count += keywords[key]
11 return count
12
13
14 print(total(10, 1, 2, 3, vegetables=50, fruits=100))
15
16
17 def add(a, b, c):
18 if a == 1:
19 return a + b + c
20 else:
21 return a * b * c
22
23
24 print add(2, 2, 4)
| 24 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 first_name = input('Enter your first name: ')
5 last_name = input('Enter your last name: ')
6 print('Hello', first_name, last_name)
7
8
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 aint = int(input('please input a int:'))
4 bint = int(input('please input a int:'))
5 cint = int(input('please input a int:'))
6 dint = int(input('please input a int:'))
7 fint = int(input('please input a int:'))
8 alst = [aint, bint, cint, dint, fint]
9
10 print(alst[1:4:2])
11 print(alst[3:0:-2]) | Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 #.adct = {'a': 1, 'b': 2, 'c': 3}
4 #.print(adct)
5
6 # adct = {'a': 1, 'b': 2}
7 # adct.update({'c': 3})
8 # print(adct)
9
10
11 # d1,d2,d3是三种不同的表达形式,
12 d1 = dict((['a', 1], ['b', 2],['c', 3]))
13 d2 = dict(a=1, b=2,c=3)
14 d3 = {'a': 1, 'b': 2, 'c': 3}
15 print(d1)
16 print(d2)
17 print(d3)
18
19
| 13 - refactor: use-dict-literal
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 test1 = float(input('Enter the first test score: '))
4 test2 = float(input('Enter the second test score: '))
5 test3 = float(input('Enter the third test score: '))
6 average = (test1 + test2 + test3) / 3.0
7 print('The average score is', average)
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4
5 def func_outer():
6 x = 2
7 print('x is', x)
8
9 def func_inner():
10 nonlocal x # zhe li bu dui ya. bu dong.
11
12 x = 5
13
14 func_inner()
15 print('Changed local x to', x)
16
17
18 func_outer()
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 """
5 去除重复关键字,
6 '[a:1,b:2,c:3]','[a:7,c:2,m:7,r:4]','[a:3,b:2,c:7,o:5]'
7 """
8
9 myseq = """[a:1,b:2,c:3]
10 [a:3,b:3,c:8]
11 [a:7,c:2,m:7,r:4]
12 [a:2,c:4,m:6,r:4]
13 [a:3,b:2,c:7,o:5]"""
14
15 res_keys, res_strs = [], []
16
17 for item in myseq.split('\n'):
18 # 获取key
19 item_keys = [raw_item.split(':')[0] for raw_item in item[1: len(item) - 1].split(',')]
20 # 判断是否有key的列表,然后决定是否放进去。
21 for key_list in res_keys: # 已存['a', 'b', 'c']
22 list_r = [a for a in item_keys if a in key_list]
23 long_len = len(item_keys) if len(item_keys) >= len(key_list) else len(key_list)
24 if len(list_r) >= long_len:
25 break
26 else:
27 res_keys.append(item_keys)
28 res_strs.append(item)
29
30 print(', '.join(res_strs))
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 import binascii
5 import StringIO
6 from Crypto.Cipher import AES
7
8 KEY = 'ce975de9294067470d1684442555767fcb007c5a3b89927714e449c3f66cb2a4'
9 IV = '9AAECFCF7E82ABB8118D8E567D42EE86'
10 PLAIN_TEXT = "ciao"
11
12
13 class PKCS7Padder(object):
14 '''
15 RFC 2315: PKCS#7 page 21
16 Some content-encryption algorithms assume the
17 input length is a multiple of k octets, where k > 1, and
18 let the application define a method for handling inputs
19 whose lengths are not a multiple of k octets. For such
20 algorithms, the method shall be to pad the input at the
21 trailing end with k - (l mod k) octets all having value k -
22 (l mod k), where l is the length of the input. In other
23 words, the input is padded at the trailing end with one of
24 the following strings:
25 01 -- if l mod k = k-1
26 02 02 -- if l mod k = k-2
27 .
28 .
29 .
30 k k ... k k -- if l mod k = 0
31 The padding can be removed unambiguously since all input is
32 padded and no padding string is a suffix of another. This
33 padding method is well-defined if and only if k < 256;
34 methods for larger k are an open issue for further study.
35 '''
36
37 def __init__(self, k=16):
38 self.k = k
39
40 ## @param text: The padded text for which the padding is to be removed.
41 # @exception ValueError Raised when the input padding is missing or corrupt.
42 def decode(self, text):
43 '''
44 Remove the PKCS#7 padding from a text string
45 '''
46 nl = len(text)
47 val = int(binascii.hexlify(text[-1]), 16)
48 if val > self.k:
49 raise ValueError('Input is not padded or padding is corrupt')
50
51 l = nl - val
52 return text[:l]
53
54 ## @param text: The text to encode.
55 def encode(self, text):
56 '''
57 Pad an input string according to PKCS#7
58 '''
59 l = len(text)
60 output = StringIO.StringIO()
61 val = self.k - (l % self.k)
62 for _ in xrange(val):
63 output.write('%02x' % val)
64 return text + binascii.unhexlify(output.getvalue())
65
66
67 def encrypt(my_key=KEY, my_iv=IV, my_plain_text=PLAIN_TEXT):
68 """
69 Expected result if called without parameters:
70 PLAIN 'ciao'
71 KEY 'ce975de9294067470d1684442555767fcb007c5a3b89927714e449c3f66cb2a4'
72 IV '9aaecfcf7e82abb8118d8e567d42ee86'
73 ENCRYPTED '62e6f521d533b26701f78864c541173d'
74 """
75
76 key = binascii.unhexlify(my_key)
77 iv = binascii.unhexlify(my_iv)
78
79 padder = PKCS7Padder()
80 padded_text = padder.encode(my_plain_text)
81
82 encryptor = AES.new(key, AES.MODE_CFB, iv, segment_size=128) # Initialize encryptor
83 result = encryptor.encrypt(padded_text)
84
85 return {
86 "plain": my_plain_text,
87 "key": binascii.hexlify(key),
88 "iv": binascii.hexlify(iv),
89 "ciphertext": result
90 }
91
92
93 if __name__ == '__main__':
94 result = encrypt()
95 print "PLAIN %r" % result['plain']
96 print "KEY %r" % result['key']
97 print "IV %r" % result['iv']
98 print "ENCRYPTED %r" % binascii.hexlify(result['ciphertext'])
| 95 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 import json
5 import urllib.request
6
7 """
8 读取1.json文件内容,然后解析出来把mp3下载地址找到,然后下载mp3文件。
9
10 """
11
12 # import os
13 #
14 # for r, ds, fs in os.walk("/home/test"):
15 # for f in fs:
16 # fn = os.path.join(r, f)
17 # if f == "A":
18 # print(os.path.abspath(fn))
19 #
20 #
21 # def file_hdl(name='1.json'):
22 # f = open(name)
23 # for line in f:
24 # print(line)
25
26
27 def read_content(filename):
28 """
29 从filename读取数据
30 :param filename:
31 :return:
32 """
33 result = ''
34 f = open(filename)
35 for line in f:
36 result += line
37 f.close()
38 return result
39
40
41 def find_mp3_link(content):
42 """
43 从content里面找到mp3链接
44 :param content:
45 :return:
46 """
47 json_data = json.loads(content)
48 print(json_data)
49 res = []
50 for item in json_data:
51 res.append(item['mp3'])
52 return res # http://luoo-mp3.kssws.ks-cdn.com/low/luoo/radio1/1.mp3
53
54
55 def store_mp3(mp3_links):
56 """
57 根据mp3链接下载mp3文件,然后存储到本地
58 :param mp3_link:
59 :return:
60 """
61 for link in mp3_links:
62 res = urllib.request.urlopen(link).read()
63 f = open(link.split('/')[-1], 'wb')
64 f.write(res)
65 f.close()
66 print('{mp3} download success.'.format(mp3=link.split('/')[-1]))
67
68
69 if __name__ == '__main__':
70 content = read_content('1.json')
71 mp3_links = find_mp3_link(content)
72 store_mp3(mp3_links)
| 7 - warning: pointless-string-statement
34 - warning: unspecified-encoding
34 - refactor: consider-using-with
41 - warning: redefined-outer-name
55 - warning: redefined-outer-name
62 - refactor: consider-using-with
63 - refactor: consider-using-with
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 """
5 求 2/1+3/2+5/3+8/5+13/8.....前20项之和?
6 """
7
8 a = 1.0
9 b = 2.0
10 sum = 0
11 c = 0
12 for i in range(0, 20):
13 sum = sum + b/a
14 c = a + b
15 a = b
16 b = c
17 print(sum)
18
| 10 - warning: redefined-builtin
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 """
5 编写程序求 1+3+5+7+……+99 的和值。
6 """
7 '''
8 1.定义一组数列
9 2.求和
10 '''
11
12
13 def get_res():
14 source = range(1, 100, 2)
15 sum = 0
16 for i in source:
17 print(i)
18 sum += i
19 return sum
20
21 if __name__ == '__main__':
22 res = get_res()
23 print(res)
| 7 - warning: pointless-string-statement
15 - warning: redefined-builtin
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'florije'
3
4 a = [1, 2, 3]
5 b = [item for item in a]
6 tmp = []
7 for item in a:
8 tmp.append(item)
9 print b, tmp
| 9 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3
4 """
5 1,给定字符串,带分行,然后split(分裂)为list
6 2,分析list每个元素,判断是否有href关键字,保存有关键字的数据
7 3,提取剩余元素里面的href=开头的部分
8 """
9
10
11 def get_hrefs(str_data):
12 """
13 获取str_data里面所有超链接
14 :param str_data:
15 :return:
16 """
17 # split str_data to list
18 str_list = str_data.split('\n') # 分析原始数据看到有回车换行
19 href_list = []
20 for line in str_list:
21 if 'href=' in line:
22 line = line.strip() # 看到有的行有空格,就去掉
23 line = line[4: -5] # href带其他标签,规则去掉
24 line = line[0: line.find('>') + 1] # 看规则去掉字符串尾部的汉字
25 href_list.append(line)
26 #
27 return href_list # [href sre[0:href str.find('>') + 1] for href str in[line.strip()[4: -5]for]
28
29 if __name__ == '__main__':
30 str_data = """
31 <h3>联系我们</h3>
32 <p>联系人:王经理</p>
33 <p>电话:021-87017800</p>
34 <div>
35 <ul>
36 <li><a class="nav-first" href="/"首 页</li>
37 <li><a href="/lista.php">吸粮机</a></li>
38 <li><a href="/listb.php">灌包机</a></li>
39 <li><a href="/listc.php">汽油吸粮机</a></li>
40 <li><a href="/order/setorder.php">我要订购</a></li>
41 <li><a href="/about.php">关于我们</a></li>
42 </ul>
43 </div>
44 """
45 res = get_hrefs(str_data)
46 for item in res:
47 print(item)
48
| 11 - warning: redefined-outer-name
|
1 # -*- coding: utf-8 -*-
2 __author__ = 'manman'
3 for x in range(5):
4 print('Hello world')
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8-*-
2 __author__ = 'manman'
3
4
5 def func(a, b=5, c=10):
6 print 'a is', a, 'b is', b, 'c is', c
7
8
9 func(3, 7)
10 func(25, c=24)
11 func(c=50, a=100)
12
13 func(a=3)
14
15
16 def show_info(name, age=20, *args, **kwargs):
17 """
18 show student's info
19 :param name: student's name
20 :param age: student's age
21 :param score: student's score
22 :return: nothing
23 """
24 print name,
25 print age,
26 print args,
27 print kwargs
28
29 if __name__ == '__main__':
30 show_info('zhuzai')
31 print '-' * 40
32 show_info('zhuzai', 10)
33 print '-' * 40
34 show_info('zhuzai', age=11)
35 print '-' * 40
36 show_info('zhuzai', 12, 13, 14)
37 print '-' * 40
38 show_info('zhuzai', 15, *(16, 17), score=100)
39 print '-' * 40
40 show_info('zhuzai', 18, *(19, ), **{'score': 20})
41
| 6 - error: syntax-error
|
1 import json
2 import os
3 from math import sqrt
4 import re
5
6
7 def norm(vec):
8 dot = 0
9
10 for v in vec:
11 dot += v*v
12
13 return sqrt(dot)
14
15
16 def main():
17 filename = 'embedding.txt'
18 embedding = dict()
19
20 with open(filename, encoding='utf-8') as f:
21 args = f.readline().strip('\n').split(' ')
22 embedding['size'] = int(args[1])
23 embedding['embedding'] = dict()
24
25 for line in f:
26 word, *vector = line.strip().split(' ')
27
28 if not re.fullmatch(r'[а-яА-Я\-]+', word) or len(word) < 3:
29 continue
30
31 assert(len(vector) == embedding['size'])
32 vector = [float(v) for v in vector]
33 length = norm(vector)
34 vector = [v / length for v in vector]
35 embedding['embedding'][word] = vector
36
37 with open(filename + '.js', 'w', encoding='utf-8') as f:
38 f.write('const EMBEDDING = ' + json.dumps(embedding))
39
40
41 if __name__ == '__main__':
42 main()
| 18 - refactor: use-dict-literal
23 - refactor: use-dict-literal
2 - warning: unused-import
|
1 import pandas as pd
2 import json
3
4 def write_county_json():
5 tsv = pd.read_csv("../data/county_fips.tsv", sep='\t')
6 tsv = tsv[["FIPS", "Name"]]
7 print(tsv.head())
8 county_json = json.loads(tsv.to_json())
9 print(county_json)
10
11 with open('county_fips.json', 'w') as outfile:
12 json.dump(county_json, outfile)
13
14 def write_od_distribution_json():
15 csv = pd.read_csv("../data/county_health_rankings.csv")
16 csv = csv[["FIPS", "Drug Overdose Mortality Rate"]]
17 max_od = csv["Drug Overdose Mortality Rate"].max()
18 print(max_od)
19
20 csv["pctile"] = (csv["Drug Overdose Mortality Rate"] / max_od * 100)
21 csv["pctile"] = pd.to_numeric(csv["pctile"], downcast="unsigned")
22 print(csv.tail())
23
24 if __name__ == "__main__":
25 write_od_distribution_json() | 11 - warning: unspecified-encoding
|
1 """backend URL Configuration
2
3 The `urlpatterns` list routes URLs to views. For more information please see:
4 https://docs.djangoproject.com/en/2.2/topics/http/urls/
5 Examples:
6 Function views
7 1. Add an import: from my_app import views
8 2. Add a URL to urlpatterns: path('', views.home, name='home')
9 Class-based views
10 1. Add an import: from other_app.views import Home
11 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
12 Including another URLconf
13 1. Import the include() function: from django.urls import include, path
14 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
15 """
16 from django.contrib import admin
17 from django.urls import path
18
19 # from customers import views
20 from django.conf.urls import url
21 from gallery import views
22 from rest_framework_simplejwt import views as jwt_views
23 from django.conf import settings
24 from django.contrib.staticfiles.urls import static
25 from django.contrib.staticfiles.urls import staticfiles_urlpatterns
26
27 urlpatterns = [
28 path('admin/', admin.site.urls),
29
30 path('api/token/refresh/', jwt_views.TokenRefreshView.as_view(), name='token_refresh'),
31 path('api/token/verify/', jwt_views.TokenVerifyView.as_view(), name='token_verify'),
32
33 url(r'^getuser$', views.get_user),
34 url(r'^signup$', views.user_signup),
35 url(r'^login$', views.user_login),
36 url(r'^logout$', views.user_logout),
37
38 path('getimage/', views.get_image),
39 # path('getuserimage/<int:user_id>', views.get_imgs_owned_by_user),
40 path('gettags/', views.get_tags),
41 path('api/upsert', views.upsert_image),
42 path('api/updateuser', views.update_user),
43 path('api/updateavatar', views.update_avatar),
44
45
46 ] + staticfiles_urlpatterns() + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
47
48 # url(r'^api/customers/$', views.customers_list),
49 # url(r'^api/customers/(?P<pk>[0-9]+)$', views.customers_detail),
50 # url(r'^api/gallery/(?P<pageNo>[0-9]+)$', views.image_page),
| Clean Code: No Issues Detected
|
1 from django import forms
2 from .models import Image, Profile
3 from django.contrib.auth.models import User
4 from django.db import models
5
6 class UpsertImageForm(forms.ModelForm):
7 class Meta:
8 model = Image
9 fields = ["image_name", "image_desc", "image_file", "user", "tags"]
10
11 class UpdateUserForm(forms.ModelForm):
12 class Meta:
13 model = User
14 fields = ["email", "password"]
15
16
17 class UpdateAvatarForm(forms.ModelForm):
18 class Meta:
19 model = Profile
20 fields = ["avatar", "last_edit"]
| 2 - error: relative-beyond-top-level
7 - refactor: too-few-public-methods
6 - refactor: too-few-public-methods
12 - refactor: too-few-public-methods
11 - refactor: too-few-public-methods
18 - refactor: too-few-public-methods
17 - refactor: too-few-public-methods
4 - warning: unused-import
|
1 from django.test import TestCase
2
3
4 class SampleTest(TestCase):
5 def test_fail(self):
6 self.assertEqual(2, 1)
| 4 - refactor: too-few-public-methods
|
1 from .imports import *
2 from .torch_imports import *
3 from .core import *
4
5 import IPython, graphviz
6 from concurrent.futures import ProcessPoolExecutor
7
8 import sklearn_pandas, sklearn, warnings
9 from sklearn_pandas import DataFrameMapper
10 from sklearn.preprocessing import LabelEncoder, Imputer, StandardScaler
11 from pandas.api.types import is_string_dtype, is_numeric_dtype
12 from sklearn.ensemble import forest
13 from sklearn.tree import export_graphviz
14
15
16 def set_plot_sizes(sml, med, big):
17 plt.rc('font', size=sml) # controls default text sizes
18 plt.rc('axes', titlesize=sml) # fontsize of the axes title
19 plt.rc('axes', labelsize=med) # fontsize of the x and y labels
20 plt.rc('xtick', labelsize=sml) # fontsize of the tick labels
21 plt.rc('ytick', labelsize=sml) # fontsize of the tick labels
22 plt.rc('legend', fontsize=sml) # legend fontsize
23 plt.rc('figure', titlesize=big) # fontsize of the figure title
24
25 def parallel_trees(m, fn, n_jobs=8):
26 return list(ProcessPoolExecutor(n_jobs).map(fn, m.estimators_))
27
28 def draw_tree(t, df, size=10, ratio=0.6, precision=0):
29 s=export_graphviz(t, out_file=None, feature_names=df.columns, filled=True,
30 special_characters=True, rotate=True, precision=precision)
31 IPython.display.display(graphviz.Source(re.sub('Tree {',
32 f'Tree {{ size={size}; ratio={ratio}', s)))
33
34 def combine_date(years, months=1, days=1, weeks=None, hours=None, minutes=None,
35 seconds=None, milliseconds=None, microseconds=None, nanoseconds=None):
36 years = np.asarray(years) - 1970
37 months = np.asarray(months) - 1
38 days = np.asarray(days) - 1
39 types = ('<M8[Y]', '<m8[M]', '<m8[D]', '<m8[W]', '<m8[h]',
40 '<m8[m]', '<m8[s]', '<m8[ms]', '<m8[us]', '<m8[ns]')
41 vals = (years, months, days, weeks, hours, minutes, seconds,
42 milliseconds, microseconds, nanoseconds)
43 return sum(np.asarray(v, dtype=t) for t, v in zip(types, vals)
44 if v is not None)
45
46 def get_nn_mappers(df, cat_vars, contin_vars):
47 # Replace nulls with 0 for continuous, "" for categorical.
48 for v in contin_vars: df[v] = df[v].fillna(df[v].max()+100,)
49 for v in cat_vars: df[v].fillna('#NA#', inplace=True)
50
51 # list of tuples, containing variable and instance of a transformer for that variable
52 # for categoricals, use LabelEncoder to map to integers. For continuous, standardize
53 cat_maps = [(o, LabelEncoder()) for o in cat_vars]
54 contin_maps = [([o], StandardScaler()) for o in contin_vars]
55 return DataFrameMapper(cat_maps).fit(df), DataFrameMapper(contin_maps).fit(df)
56
57 def get_sample(df,n):
58 idxs = sorted(np.random.permutation(len(df)))
59 return df.iloc[idxs[:n]].copy()
60
61 def add_datepart(df, fldname):
62 fld = df[fldname]
63 targ_pre = re.sub('[Dd]ate$', '', fldname)
64 for n in ('Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
65 'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start'):
66 df[targ_pre+n] = getattr(fld.dt,n.lower())
67 df[targ_pre+'Elapsed'] = fld.astype(np.int64) // 10**9
68 df.drop(fldname, axis=1, inplace=True)
69
70 def is_date(x): return np.issubdtype(x.dtype, np.datetime64)
71
72 def train_cats(df):
73 for n,c in df.items():
74 if is_string_dtype(c): df[n] = c.astype('category').cat.as_ordered()
75
76 def apply_cats(df, trn):
77 for n,c in df.items():
78 if trn[n].dtype.name=='category':
79 df[n] = pd.Categorical(c, categories=trn[n].cat.categories, ordered=True)
80
81 def fix_missing(df, col, name, na_dict):
82 if is_numeric_dtype(col):
83 if pd.isnull(col).sum() or (name in na_dict):
84 df[name+'_na'] = pd.isnull(col)
85 filler = na_dict[name] if name in na_dict else col.median()
86 df[name] = col.fillna(filler)
87 na_dict[name] = filler
88 return na_dict
89
90 def numericalize(df, col, name, max_n_cat):
91 if not is_numeric_dtype(col) and ( max_n_cat is None or col.nunique()>max_n_cat):
92 df[name] = col.cat.codes+1
93
94 def scale_vars(df):
95 warnings.filterwarnings('ignore', category=sklearn.exceptions.DataConversionWarning)
96 map_f = [([n],StandardScaler()) for n in df.columns if is_numeric_dtype(df[n])]
97 mapper = DataFrameMapper(map_f).fit(df)
98 df[mapper.transformed_names_] = mapper.transform(df)
99 return mapper
100
101 def proc_df(df, y_fld, skip_flds=None, do_scale=False, na_dict=None,
102 preproc_fn=None, max_n_cat=None, subset=None):
103 if not skip_flds: skip_flds=[]
104 if subset: df = get_sample(df,subset)
105 df = df.copy()
106 if preproc_fn: preproc_fn(df)
107 y = df[y_fld].values
108 df.drop(skip_flds+[y_fld], axis=1, inplace=True)
109
110 if na_dict is None: na_dict = {}
111 for n,c in df.items(): na_dict = fix_missing(df, c, n, na_dict)
112 if do_scale: mapper = scale_vars(df)
113 for n,c in df.items(): numericalize(df, c, n, max_n_cat)
114 res = [pd.get_dummies(df, dummy_na=True), y, na_dict]
115 if do_scale: res = res + [mapper]
116 return res
117
118
119 def rf_feat_importance(m, df):
120 return pd.DataFrame({'cols':df.columns, 'imp':m.feature_importances_}
121 ).sort_values('imp', ascending=False)
122
123 def set_rf_samples(n):
124 forest._generate_sample_indices = (lambda rs, n_samples:
125 forest.check_random_state(rs).randint(0, n_samples, n))
126
127 def reset_rf_samples():
128 forest._generate_sample_indices = (lambda rs, n_samples:
129 forest.check_random_state(rs).randint(0, n_samples, n_samples))
130
| 26 - warning: bad-indentation
1 - error: relative-beyond-top-level
1 - warning: wildcard-import
2 - error: relative-beyond-top-level
2 - warning: wildcard-import
3 - error: relative-beyond-top-level
3 - warning: wildcard-import
17 - error: undefined-variable
18 - error: undefined-variable
19 - error: undefined-variable
20 - error: undefined-variable
21 - error: undefined-variable
22 - error: undefined-variable
23 - error: undefined-variable
31 - error: undefined-variable
34 - refactor: too-many-arguments
34 - refactor: too-many-positional-arguments
36 - error: undefined-variable
37 - error: undefined-variable
38 - error: undefined-variable
43 - error: undefined-variable
58 - error: undefined-variable
63 - error: undefined-variable
67 - error: undefined-variable
70 - error: undefined-variable
70 - error: undefined-variable
79 - error: undefined-variable
83 - error: undefined-variable
84 - error: undefined-variable
101 - refactor: too-many-arguments
101 - refactor: too-many-positional-arguments
114 - error: undefined-variable
120 - error: undefined-variable
124 - warning: protected-access
128 - warning: protected-access
8 - warning: unused-import
10 - warning: unused-import
|
1
2 import json
3 import csv
4 import re
5
6 def load_json(path):
7 '''
8 Loads collected data in json format, checks it and then converts to csv format
9
10 Input: path - path and file name to the collected json data (type: string)
11
12 Output: keys - list of features/keys of the dataframe (type: list of strings)
13 df_list - list containing all the dataframes from the json data (type: list of dataframes)
14 '''
15 if not path.endswith('.json'):
16 print('File path not JSON file...')
17 return None
18
19 with open(path, 'r', encoding='utf8') as handle:
20 df_list = [json.loads(line) for line in handle]
21
22 nr_keys = [len(df_list[i].keys()) for i in range(len(df_list))]
23 if not all(k == nr_keys[0] for k in nr_keys):
24 print('Some features missing, review the data!')
25 return None
26
27 else:
28 keys = df_list[0].keys()
29 return keys, df_list
30
31
32 def combine_and_label(paths, labels, train=True):
33 '''
34 Combining multiple collections of data files and adds corresponding label (i.e depressive or non-depressive).
35 List of labels in correct order with respect to the paths order must be specified manually
36
37 Input: paths - list containing all the paths to the json files (type: list of strings)
38 labels - list containing all the labels to the corresponding json files (type: list of strings)
39
40 Output: df_list - list of all the combined dataframes from the json data (type: list of dataframes)
41 '''
42
43 if not type(paths)==type(list()):
44 print('"paths" argument is not of type list! Please pass list of the paths to the collected data to be combined!')
45 return None
46 if train:
47 if not len(paths) == len(labels):
48 print(f'Number of datafile paths of {len(paths)} is not the same as number of labels of {len(labels)}!')
49 return None
50
51 df_list = []
52 for idx, path in enumerate(paths):
53 try:
54 curr_keys, curr_df_list = load_json(path)
55 except Exception as e:
56 print(f'Unable to load data from path "{path}", check path name and file!')
57 print(f'Exception:\n{e}')
58 return None
59 for df in curr_df_list:
60 if train:
61 df['label'] = labels[idx]
62 df_list.append(df)
63
64 return df_list
65
66
67
68 def datacleaning(paths, labels, hashtags_to_remove = [], save_path=None, train=True):
69 '''
70 Cleans the data based on unwanted hashtags, duplication of tweets occured due
71 to sharing of keywords, removal of mentions, urls, non-english alphabetic tokens
72 and empty tweets obtained after cleaning
73
74 Input: paths - list containing all the paths to the json files (type: list of strings)
75 labels - list containing all the labels to the corresponding json files (type: list of strings)
76 hashtags_to_remove - list containing hashtags wished to be removed (type: list of strings)
77 save_path - path and file name to were to save the cleaned dataset (type: string or None)
78 train - specify if it is training mode or not, i.e if to use labels or not (type: boolean)
79
80 Output: dataset_doc - list of all the text documents and corresponding labels if train (type: list of strings)
81 keys - list of features/keys of the dataframe (type: list of strings)
82
83 '''
84 if len(labels) > 0:
85 train = True
86
87 df_list = combine_and_label(paths, labels, train=train)
88
89 # Remove tweets with specific hashtags
90 nr_removed_tweets = 0
91 for idx, df in enumerate(df_list):
92 hashtags = df.copy()['hashtags']
93 if any([h in hashtags_to_remove for h in hashtags]):
94 df_list.pop(idx)
95 print(f'Tweet nr {idx} removed!')
96 nr_removed_tweets += 1
97
98 print(f'Removed total of {nr_removed_tweets} tweets')
99
100 # Removes duplicate of tweets
101 unique_ids = {}
102 for idx, df in enumerate(df_list):
103 tweet_id = df.copy()['id']
104 if not tweet_id in unique_ids:
105 unique_ids[str(tweet_id)] = 1
106 else:
107 print('Found douplicate of tweet id, removing the duplicate!')
108 df_list.pop(idx)
109
110
111 # Cleaning the tweet texts
112 for idx, df in enumerate(df_list):
113 tweet = df.copy()['tweet']
114 # Removing URLs
115 tweet = re.sub(r"http\S+", " ", tweet)
116 tweet = re.sub(r"\S+\.com\S", " ", tweet)
117
118 # Remove mentions
119 tweet = re.sub(r'\@\w+', ' ', tweet)
120
121 # Remove non-alphabetic tokens
122 tweet = re.sub('[^A-Za-z]', ' ', tweet.lower())
123
124 # Remove double spacings
125 tweet = re.sub(' +', ' ', tweet)
126
127 # Remove from dataset if tweet empty after cleaning
128 if tweet == 0:
129 df_list.pop(idx)
130 else:
131 df['tweet'] = tweet
132
133 print('Successfully cleaned data!')
134
135
136 # Saving list of tweet dicts to csv format
137
138 if save_path:
139 print(f'Saving data...')
140 if not save_path.endswith('.csv'):
141 print('Save path is missing .csv format extension!')
142 save_path = save_path + '.csv'
143 try:
144 with open(save_path, 'w', encoding='utf8', newline='') as output_file:
145 csv_file = csv.DictWriter(output_file,
146 fieldnames=df_list[0].keys(),
147 )
148
149 csv_file.writeheader()
150 csv_file.writerows(df_list)
151 print(f'Data succesfully saved to "{save_path}"')
152
153 except Exception as e:
154 print(f'Unable to save data to "{save_path}", check the path and data!')
155 print(f'Exception:\n{e}')
156
157 dataset_docs = [df['tweet'] for df in df_list]
158 keys = df_list[0].keys()
159 if train:
160 dataset_labels = [df['label'] for df in df_list]
161 return [dataset_docs, dataset_labels], keys
162 else:
163 return dataset_docs, keys
| 23 - refactor: no-else-return
43 - refactor: use-list-literal
55 - warning: broad-exception-caught
54 - warning: unused-variable
68 - warning: dangerous-default-value
68 - refactor: too-many-locals
93 - refactor: use-a-generator
139 - warning: f-string-without-interpolation
153 - warning: broad-exception-caught
159 - refactor: no-else-return
68 - refactor: too-many-branches
|
1 import torch
2 import torch.nn as nn
3 import preprocessing
4 import os
5 import numpy as np
6
7
8 class ModelUtils:
9 '''
10 A utility class to save and load model weights
11 '''
12 def save_model(save_path, model):
13 root, ext = os.path.splitext(save_path)
14 if not ext:
15 save_path = root + '.pth'
16 try:
17 torch.save(model.state_dict(), save_path)
18 print(f'Successfully saved to model to "{save_path}"!')
19 except Exception as e:
20 print(f'Unable to save model, check save path!')
21 print(f'Exception:\n{e}')
22 return None
23
24 def load_model(load_path, model):
25 try:
26 model.load_state_dict(torch.load(load_path))
27 print(f'Successfully loaded the model from path "{load_path}"')
28
29 except Exception as e:
30 print(f'Unable to load the weights, check if different model or incorrect path!')
31 print(f'Exception:\n{e}')
32 return None
33
34 class RNNModel(nn.Module):
35 '''
36 RNN classifier with different available RNN types (basic RNN, LSTM, GRU)
37 '''
38
39 def __init__(self, rnn_type, nr_layers, voc_size, emb_dim, rnn_size, dropout, n_classes):
40 '''
41 Initiates the RNN model
42
43 Input: rnn_type - specifies the rnn model type between "rnn", "lstm" or "gru" (type: string)
44 nr_layers - number of rnn layers (type: int)
45 voc_size - size of vocabulary of the encoded input data (type: int)
46 emb_dim - size of embedding layer (type: int)
47 rnn_size - number of hidden layers in RNN model (type: int)
48 dropout - probability of dropout layers (type: float in between [0, 1])
49 n_classes - number of different classes/labels (type: int)
50 '''
51 super().__init__()
52 self.rnn_size = rnn_size
53 self.rnn_type = rnn_type
54 self.nr_layers = nr_layers
55 self.embedding = nn.Embedding(voc_size, emb_dim)
56
57 if self.rnn_type == 'rnn':
58 self.rnn = nn.RNN(input_size=emb_dim, hidden_size=rnn_size, dropout=dropout if nr_layers > 1 else 0,
59 bidirectional=False, num_layers=nr_layers, batch_first=True)
60
61 elif self.rnn_type == 'lstm':
62 self.rnn = nn.LSTM(input_size=emb_dim, hidden_size=rnn_size, dropout=dropout if nr_layers > 1 else 0,
63 bidirectional=False, num_layers=nr_layers, batch_first=True)
64
65 elif self.rnn_type == 'gru':
66 self.rnn = nn.GRU(input_size=emb_dim, hidden_size=rnn_size, dropout=dropout if nr_layers > 1 else 0,
67 bidirectional=False, num_layers=nr_layers, batch_first=True)
68
69 else:
70 print('Invalid or no choice for RNN type, please choose one of "rnn", "lstm" or "gru"')
71
72
73 self.dropout = nn.Dropout(dropout)
74 self.linear = nn.Linear(in_features=rnn_size, out_features=n_classes)
75 self.sigmoid = nn.Sigmoid()
76
77 def forward(self, X, hidden):
78 '''
79 Forward propagation of the RNN model
80
81 Input: X - batch of input data (type: torch tensor)
82 hidden - batch of input to the hidden cells (type: torch tensor)
83
84 Output: out - model prediction (type: torch tensor)
85 hidden - output of the hidden cells (torch.tensor)
86 '''
87 self.batch_size = X.size(0)
88 embedded = self.embedding(X)
89
90
91 if self.rnn_type == 'rnn' or self.rnn_type == 'gru':
92 rnn_out, hidden = self.rnn(embedded, hidden)
93
94 elif self.rnn_type == 'lstm':
95 rnn_out, hidden = self.rnn(embedded, hidden)
96
97 else:
98 print(f'Invalid rnn type! Rebuild the model with a correct rnn type!')
99 return None
100
101 rnn_out = rnn_out.contiguous().view(-1, self.rnn_size)
102 drop = self.dropout(rnn_out)
103 out = self.linear(drop)
104 out = self.sigmoid(out)
105 # reshape such that batch size is first and get labels of last batch
106 out = out.view(self.batch_size, -1)
107 out = out[:, -1]
108
109 return out, hidden
110
111 def init_hidden(self, batch_size, device):
112 '''
113 Initializes hidden state
114 '''
115 # initialized to zero, for hidden state and cell state of LSTM
116 h0 = torch.zeros((self.nr_layers, batch_size, self.rnn_size)).to(device)
117 c0 = torch.zeros((self.nr_layers, batch_size, self.rnn_size)).to(device)
118 hidden = (h0, c0)
119 return hidden
120
121
122
| 2 - refactor: consider-using-from-import
12 - error: no-self-argument
15 - warning: self-cls-assignment
19 - warning: broad-exception-caught
20 - warning: f-string-without-interpolation
12 - refactor: inconsistent-return-statements
12 - refactor: useless-return
24 - error: no-self-argument
29 - warning: broad-exception-caught
30 - warning: f-string-without-interpolation
24 - refactor: inconsistent-return-statements
24 - refactor: useless-return
34 - refactor: too-many-instance-attributes
39 - refactor: too-many-arguments
39 - refactor: too-many-positional-arguments
91 - refactor: consider-using-in
98 - warning: f-string-without-interpolation
87 - warning: attribute-defined-outside-init
3 - warning: unused-import
5 - warning: unused-import
|
1 # NOTE: TWINT NEEDS TO BE INSTALLEED BY THE FOLLOWING COMMAND:
2 # pip install --user --upgrade git+https://github.com/twintproject/twint.git@origin/master#egg=twint
3 # OTHERWISE IT WON'T WORK
4
5
6 import twint
7 import nest_asyncio
8 nest_asyncio.apply()
9 from dateutil import rrule
10 from datetime import datetime, timedelta
11
12 def get_weeks(start_date, end_date):
13 '''
14 Finds collection of weeks chronologically from a starting date to a final date
15
16 Input: start_date - date of which to start collecting with format [year, month, day] (type: list of ints)
17 end_date - date of which to stop collecting with format [year, month, day] (type: list of ints)
18
19 Output: weeks - list containing the lists of starting and ending date for each week with format
20 "%Y-%m-%d %h-%m-%s" (type: list of lists of strings)
21 '''
22 start_year, start_month, start_day = start_date
23 final_year, final_month, final_day = end_date
24 start = datetime(start_year, start_month, start_day)
25 end = datetime(final_year, final_month, final_day)
26 dates = rrule.rrule(rrule.WEEKLY, dtstart=start, until=end)
27 nr_weeks = 0
28 for _ in dates:
29 nr_weeks+=1
30 weeks = []
31 for idx, dt in enumerate(dates):
32 if idx < nr_weeks-1:
33 week = [dates[idx].date().strftime('%Y-%m-%d %H:%M:%S'),
34 dates[idx+1].date().strftime('%Y-%m-%d %H:%M:%S')]
35 weeks.append(week)
36
37 return weeks
38
39
40
41 def collect_tweets(keywords = None, nr_tweets = None,
42 output_file=None, coord=None, timespan=[None, None]):
43 '''
44 Collectiing tweets using twint based on different attributes and save to json file
45
46 Input: keywords - keywords that the tweet should contain (type: string)
47 nr_tweets - number of tweets to collect (type: int)
48 output_file - path and name to where the file should be saved (type: string, extension: .json)
49 near - location or city of which the tweets were tweeted (type: string)
50 timespan - timespan of when the tweet was tweeted in format "%Y-%m-%d %h-%m-%s" (type: string)
51
52 Output: Returns twint object
53 '''
54 # configuration
55 config = twint.Config()
56 # Search keyword
57 config.Search = keywords
58 # Language
59 config.Lang = "en"
60 # Number of tweets
61 config.Limit = nr_tweets
62 #Dates
63 config.Since = timespan[0]
64 config.Until = timespan[1]
65 # Output file format (alternatives: json, csv, SQLite)
66 config.Store_json = True
67 # Name of output file with format extension (i.e NAME.json, NAME.csv etc)
68 config.Output = output_file
69
70 config.Geo = coord
71
72 # running search
73 twint.run.Search(config)
74 return twint
75
76
77 # EXAMPLE
78 def test():
79 config = twint.Config()
80 config.Search = None
81 config.Near = "london"
82 config.Lang = "en"
83 config.Limit = 10
84 config.Since = "2016-10-29 00:00:00"
85 config.Until = "2016-11-29 12:15:19"
86 config.Store_json = True
87 config.Output = "test2.json"
88
89 #running search
90 twint.run.Search(config)
91
92
93 #test()
| 12 - refactor: too-many-locals
31 - warning: unused-variable
41 - warning: dangerous-default-value
10 - warning: unused-import
|
1 import train
2 import preprocessing
3
4
5 def run():
6 '''
7 Training function to run the training process after specifying parameters
8 '''
9
10 preprocessing.config.paths = ['./training_data/depressive1.json',
11 './training_data/depressive2.json',
12 './training_data/depressive3.json',
13 './training_data/depressive4.json',
14 './training_data/depressive5.json',
15 './training_data/depressive6.json',
16 './training_data/non-depressive1.json',
17 './training_data/non-depressive2.json',
18 './training_data/non-depressive3.json',
19 './training_data/non-depressive4.json',
20 './training_data/non-depressive5.json',
21 './training_data/non-depressive6.json']
22
23 preprocessing.config.save_path = './training_data/all_training_data.csv'
24
25
26 preprocessing.config.labels = ['depressive', 'depressive', 'depressive', 'depressive', 'depressive', 'depressive',
27 'not-depressive', 'not-depressive', 'not-depressive', 'not-depressive',
28 'not-depressive', 'not-depressive']
29
30 preprocessing.config.keywords = ['depressed', 'lonely', 'sad', 'depression', 'tired', 'anxious',
31 'happy', 'joy', 'thankful', 'health', 'hopeful', 'glad']
32
33 preprocessing.config.nr_of_tweets = [1000, 1000, 1000, 1000, 1000, 1000,
34 1000, 1000, 1000, 1000, 1000, 1000]
35
36 history, early_stop_check = train.train_rnn(save_path='./weights/lstm_model_2.pth', collect=True) # Collect=False if already collected data
37
38 train.show_progress(history=history, save_name='./plots/training_progress.png')
39
40 train.animate_progress(history=history, save_path='./plots/training_animation_progress_REAL.gif',
41 early_stop_check=early_stop_check)
42
43 run()
44
45
| Clean Code: No Issues Detected
|
1 from dotenv import load_dotenv
2 import os
3 import redis
4
5 load_dotenv()
6
7 class ApplicationConfig:
8 SECRET_KEY = os.environ["SECRET_KEY"]
9
10 SQLALCHEMY_TRACK_MODIFICATIONS = False
11 SQLALCHEMY_ECHO = True
12 SQLALCHEMY_DATABASE_URI = r"sqlite:///./db.sqlite"
13
14 SESSION_TYPE = "redis"
15 SESSION_PERMANENT = False
16 SESSION_USE_SIGNER = True
17 SESSION_REDIS = redis.from_url("redis://127.0.0.1:6379") | 7 - refactor: too-few-public-methods
|
1 from flask import Flask, request, jsonify, session
2 from flask_bcrypt import Bcrypt
3 from flask_cors import CORS, cross_origin
4 from flask_session import Session
5 from config import ApplicationConfig
6 from models import db, User
7
8 app = Flask(__name__)
9 app.config.from_object(ApplicationConfig)
10
11 bcrypt = Bcrypt(app)
12 CORS(app, supports_credentials=True)
13 server_session = Session(app)
14 db.init_app(app)
15
16 with app.app_context():
17 db.create_all()
18
19 @app.route("/@me")
20 def get_current_user():
21 user_id = session.get("user_id")
22
23 if not user_id:
24 return jsonify({"error": "Unauthorized"}), 401
25
26 user = User.query.filter_by(id=user_id).first()
27 return jsonify({
28 "id": user.id,
29 "email": user.email
30 })
31
32 @app.route("/register", methods=["POST"])
33 def register_user():
34 email = request.json["email"]
35 password = request.json["password"]
36
37 user_exists = User.query.filter_by(email=email).first() is not None
38
39 if user_exists:
40 return jsonify({"error": "User already exists"}), 409
41
42 hashed_password = bcrypt.generate_password_hash(password)
43 new_user = User(email=email, password=hashed_password)
44 db.session.add(new_user)
45 db.session.commit()
46
47 session["user_id"] = new_user.id
48
49 return jsonify({
50 "id": new_user.id,
51 "email": new_user.email
52 })
53
54 @app.route("/login", methods=["POST"])
55 def login_user():
56 email = request.json["email"]
57 password = request.json["password"]
58
59 user = User.query.filter_by(email=email).first()
60
61 if user is None:
62 return jsonify({"error": "Unauthorized"}), 401
63
64 if not bcrypt.check_password_hash(user.password, password):
65 return jsonify({"error": "Unauthorized"}), 401
66
67 session["user_id"] = user.id
68
69 return jsonify({
70 "id": user.id,
71 "email": user.email
72 })
73
74 @app.route("/logout", methods=["POST"])
75 def logout_user():
76 session.pop("user_id")
77 return "200"
78
79 if __name__ == "__main__":
80 app.run(debug=True) | 3 - warning: unused-import
|
1 from flask_sqlalchemy import SQLAlchemy
2 from uuid import uuid4
3
4 db = SQLAlchemy()
5
6 def get_uuid():
7 return uuid4().hex
8
9 class User(db.Model):
10 __tablename__ = "users"
11 id = db.Column(db.String(32), primary_key=True, unique=True, default=get_uuid)
12 email = db.Column(db.String(345), unique=True)
13 password = db.Column(db.Text, nullable=False)
| 9 - refactor: too-few-public-methods
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Sat Mar 3 11:30:41 2018
4
5 @author: ERIC
6 """
7 import numpy as np
8 import lmfit
9 from epg import cpmg_epg_b1 as cpmg_epg_b1_c
10
11 from scipy import integrate
12
13 mxyz90 = np.fromfile( 'epg/mxyz90.txt', sep=' ' )
14 mxyz180 = np.fromfile('epg/mxyz180.txt', sep=' ')
15
16 mxyz90 = mxyz90.reshape(5,512)
17 mxyz180 = mxyz180.reshape(5,512)
18
19 offset=130
20 step=10
21 epg_slice_xxx =mxyz90[0][offset:-offset+step:step] # mm
22 epg_p90 = mxyz90[-1][offset:-offset+step:step] # degrees
23 epg_p180 = mxyz180[-1][offset:-offset+step:step] # degrees
24 epg_dx=epg_slice_xxx[1]-epg_slice_xxx[0]
25
26
27 def fit_cpmg_epg_muscle_philips_hargreaves_c( params, xxx, dx, p90_array, p180_array, yyy_exp=None):
28
29 parvals = params.valuesdict()
30
31 T1fat = parvals[ 'T1fat' ] # fixed
32 T1muscle = parvals[ 'T1muscle' ] # fixed
33 echo = parvals[ 'echo' ] # fixed
34 T2fat = parvals[ 'T2fat' ] # fixed/optimized
35 T2muscle = parvals['T2muscle'] # optimized
36 Afat = parvals[ 'Afat'] # optimized
37 Amuscle = parvals['Amuscle'] # optimized
38 B1scale = parvals['B1scale']
39
40 Nechos = len(xxx)
41 Ngauss = len(p90_array)
42
43 signal = np.zeros([Ngauss,Nechos])
44 fat_signal = np.zeros(Nechos)
45 muscle_signal = np.zeros(Nechos)
46
47 for i,(p90,p180) in enumerate(zip(p90_array,p180_array)):
48
49 cpmg_epg_b1_c( fat_signal, p90, p180, T1fat, T2fat, echo, B1scale )
50 cpmg_epg_b1_c( muscle_signal, p90, p180, T1muscle, T2muscle, echo, B1scale )
51
52 signal[i] = Afat*fat_signal+Amuscle*muscle_signal
53
54 int_signal = integrate.simps(signal, dx=dx,axis=0)
55 if isinstance(yyy_exp, np.ndarray):
56 return( int_signal-yyy_exp)
57 else:
58 return(int_signal)
59
60
61
62 def calculate_T2values_on_slice_muscleEPG(lmparams, yyy_exp):
63
64 # params = lmfit.Parameters()
65 # params.add('T2fat', value = 180.0, min=0, max=5000, vary=False)
66 # params.add('T2muscle', value = 35, min=0, max=100, vary=True )
67 # params.add('Afat', value = 0.01, min=0, max=10, vary=True )
68 # params.add('Amuscle', value = 0.1, min=0, max=10, vary=True )
69 # params.add('T1fat', value = 365.0, vary=False)
70 # params.add('T1muscle', value = 1400, vary=False)
71 # params.add('echo', value = 10.0, vary=False)
72
73 params = lmparams['epgt2fitparams']
74 echo_time = params['echo'].value
75 num_echoes = yyy_exp.size
76
77 parvals = params.valuesdict()
78
79 print("parvals")
80 for k,v in parvals.items():
81 print(k,v)
82
83 print("EPG echo time =", echo_time)
84 xxx = np.linspace( echo_time, echo_time*num_echoes, num_echoes)
85 dx = xxx[1]-xxx[0]
86
87
88 yyy_exp_max =yyy_exp.max()
89 if yyy_exp_max == 0:
90 yyy_exp_max = 1.0
91 yyy_exp_norm = yyy_exp/yyy_exp_max
92
93 fitModel = lmfit.Minimizer(fit_cpmg_epg_muscle_philips_hargreaves_c, lmparams['epgt2fitparams'], fcn_args=( xxx, dx, epg_p90, epg_p180, yyy_exp_norm))
94 results = fitModel.minimize()
95
96 fit_plot = np.zeros(num_echoes)
97
98 if results.success:
99 fit_plot = results.residual + yyy_exp_norm
100
101 return( fit_plot, yyy_exp_norm, results, xxx)
102
103
104
105
106 def calculate_T2values_on_slice_muscleAzz(lmparams, yyy_exp):
107
108 params = lmparams['azzt2fitparams']
109 echo_time = params['echo'].value
110 num_echoes = yyy_exp.size
111
112 model = lmfit.models.ExpressionModel('Afat * (c_l*exp(-x/t2_fl)+c_s*exp(-x/t2_fs)) + Amuscle * (exp(-x/T2muscle))')
113
114 parvals = params.valuesdict()
115 print("parvals")
116 for k,v in parvals.items():
117 print(k,v)
118
119 print("azzabou echo time", echo_time)
120
121 # saved_output = {'T2muscle_value': [],
122 # 'T2muscle_stderr': [],
123 # 'Amuscle_value': [],
124 # 'Amuscle_stderr': [],
125 # 'Afat_value': [],
126 # 'Afat_stderr': [],
127 # 'chisqr': [],
128 # 'redchi':[],
129 # 'AIC':[],
130 # 'BIC':[],
131 # 'slice':[],
132 # 'pixel_index':[],
133 # }
134
135 xxx = np.linspace( echo_time, echo_time*num_echoes, num_echoes)
136
137
138 yyy_exp_max = yyy_exp.max()
139 fit_plot = np.zeros(num_echoes-2)
140 if yyy_exp_max == 0.0:
141 yyy_exp_max = 1.0
142 yyy_exp_norm = yyy_exp/yyy_exp_max
143
144
145 print("fitting data")
146
147 results = model.fit(yyy_exp_norm[2:] , x=xxx[2:], params=lmparams['azzt2fitparams'])
148 #mi.plot()
149 #saved_output['name'].append('t2_m')
150 # saved_output['T2muscle_value'].append(results.params['T2muscle'].value)
151 # saved_output['T2muscle_stderr'].append(results.params['T2muscle'].stderr)
152 # saved_output['chisqr'].append(results.chisqr)
153 # saved_output['redchi'].append(results.redchi)
154 # saved_output['AIC'].append(results.aic)
155 # saved_output['BIC'].append(results.bic)
156 #
157 #
158 # saved_output['Amuscle_value'].append(results.params['Amuscle'].value)
159 # saved_output['Amuscle_stderr'].append(results.params['Amuscle'].stderr)
160
161 # saved_output['Afat_value'].append(results.params['Afat'].value)
162 # saved_output['Afat_stderr'].append(results.params['Afat'].stderr)
163
164 fit_plot = results.residual + yyy_exp_norm[2:]
165
166 return( fit_plot, yyy_exp_norm, results, xxx) | 27 - refactor: too-many-arguments
27 - refactor: too-many-positional-arguments
27 - refactor: too-many-locals
55 - refactor: no-else-return
|
1 from generated import summer_pb2, summer_pb2_grpc
2 import grpc
3 import sys
4 import typing
5
6
7 def run(values: typing.List[int]):
8 with grpc.insecure_channel('localhost:50051') as channel:
9 stub = summer_pb2_grpc.SummerStub(channel)
10
11 summed = stub.Sum(summer_pb2.ToSum(values=values))
12 print(summed)
13
14
15 if __name__ == "__main__":
16 values = [float(_) for _ in sys.argv[1:]]
17 run(values)
| 7 - warning: redefined-outer-name
|
1 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
2 import grpc
3
4 from generated import summer_pb2 as generated_dot_summer__pb2
5
6
7 class SummerStub(object):
8 """Missing associated documentation comment in .proto file"""
9
10 def __init__(self, channel):
11 """Constructor.
12
13 Args:
14 channel: A grpc.Channel.
15 """
16 self.Sum = channel.unary_unary(
17 '/Summer/Sum',
18 request_serializer=generated_dot_summer__pb2.ToSum.SerializeToString,
19 response_deserializer=generated_dot_summer__pb2.Summed.FromString,
20 )
21
22
23 class SummerServicer(object):
24 """Missing associated documentation comment in .proto file"""
25
26 def Sum(self, request, context):
27 """Missing associated documentation comment in .proto file"""
28 context.set_code(grpc.StatusCode.UNIMPLEMENTED)
29 context.set_details('Method not implemented!')
30 raise NotImplementedError('Method not implemented!')
31
32
33 def add_SummerServicer_to_server(servicer, server):
34 rpc_method_handlers = {
35 'Sum': grpc.unary_unary_rpc_method_handler(
36 servicer.Sum,
37 request_deserializer=generated_dot_summer__pb2.ToSum.FromString,
38 response_serializer=generated_dot_summer__pb2.Summed.SerializeToString,
39 ),
40 }
41 generic_handler = grpc.method_handlers_generic_handler(
42 'Summer', rpc_method_handlers)
43 server.add_generic_rpc_handlers((generic_handler,))
44
45
46 # This class is part of an EXPERIMENTAL API.
47 class Summer(object):
48 """Missing associated documentation comment in .proto file"""
49
50 @staticmethod
51 def Sum(request,
52 target,
53 options=(),
54 channel_credentials=None,
55 call_credentials=None,
56 compression=None,
57 wait_for_ready=None,
58 timeout=None,
59 metadata=None):
60 return grpc.experimental.unary_unary(request, target, '/Summer/Sum',
61 generated_dot_summer__pb2.ToSum.SerializeToString,
62 generated_dot_summer__pb2.Summed.FromString,
63 options, channel_credentials,
64 call_credentials, compression, wait_for_ready, timeout, metadata)
| 7 - refactor: useless-object-inheritance
7 - refactor: too-few-public-methods
23 - refactor: useless-object-inheritance
26 - warning: unused-argument
23 - refactor: too-few-public-methods
47 - refactor: useless-object-inheritance
51 - refactor: too-many-arguments
51 - refactor: too-many-positional-arguments
47 - refactor: too-few-public-methods
|
1 import pytest
2
3
4 @pytest.fixture(scope='module')
5 def grpc_add_to_server():
6 from generated.summer_pb2_grpc import add_SummerServicer_to_server
7 return add_SummerServicer_to_server
8
9
10 @pytest.fixture(scope='module')
11 def grpc_servicer():
12 from server import SummerServicer
13 return SummerServicer()
14
15
16 @pytest.fixture(scope='module')
17 def grpc_stub_cls(grpc_channel):
18 from generated.summer_pb2_grpc import SummerStub
19 return SummerStub
| 17 - warning: unused-argument
|
1 from concurrent import futures
2 from generated import summer_pb2, summer_pb2_grpc
3 import grpc
4 import logging
5
6
7 class SummerServicer(summer_pb2_grpc.SummerServicer):
8
9 def Sum(self, request: summer_pb2.ToSum, context):
10 logging.info("SummerServicer.Sum(%s)", request)
11 s = sum(request.values)
12 return summer_pb2.Summed(sum=s)
13
14
15 def serve():
16 server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
17 summer_pb2_grpc.add_SummerServicer_to_server(
18 SummerServicer(), server)
19 server.add_insecure_port('[::]:50051')
20 server.start()
21 server.wait_for_termination()
22
23
24 if __name__ == "__main__":
25 logging.basicConfig(level=logging.INFO)
26 serve()
| 9 - warning: unused-argument
7 - refactor: too-few-public-methods
|
1 from generated.summer_pb2 import ToSum
2 import pytest
3
4
5 @pytest.mark.parametrize(["values", "expected"], [
6 ([], 0),
7 ([1.0], 1.0),
8 ([1.0, 1.0], 2.0),
9 ])
10 def test_some(grpc_stub, values, expected):
11 response = grpc_stub.Sum(ToSum(values=values))
12 assert response.sum == pytest.approx(expected)
| Clean Code: No Issues Detected
|
1 # encoding=utf-8
2
3 import re
4 import os
5 import utils
6 import urllib2
7 from sqlhelper import SqlHelper
8 from bs4 import BeautifulSoup as bs
9
10 class Crawler(object):
11 def __init__(self):
12 super(Crawler, self).__init__()
13 self.album_prefix = 'https://mm.taobao.com/self/album/open_album_list.htm?_charset=utf-8&user_id%20={0}&page={1}'
14 self.image_prefix = 'https://mm.taobao.com/album/json/get_album_photo_list.htm?user_id={0}&album_id={1}&page={2}'
15 self.image_pattern = re.compile('''img.*290x10000.jpg''', re.U)
16 self.image_name_pattern = re.compile('''"picId":"(.*?)"''', re.U)
17 self.model_pattern = re.compile('''<a class="lady-name" href="(.*?)".*>(.*?)</a>''', re.U)
18 self.album_pattern = re.compile('''.*album_id=(.*?)&.*''', re.U)
19 self.links = []
20 self.ids= []
21 self.names= []
22 self.sql = SqlHelper()
23
24
25 def readHtml(self, html):
26 response = urllib2.urlopen(html)
27 return response.read()
28
29 def getLinkIdAndNames(self, htmlData):
30 items = re.findall(self.model_pattern, htmlData)
31 self.links = [link for link, name in items]
32 self.names = [name.decode('gbk') for link, name in items]
33 self.ids = [link[link.index('=')+1:] for link in self.links]
34
35 def getAlbums(self):
36 for i, model_id in enumerate(self.ids):
37
38 utils.log('start downloading:%s' % self.names[i])
39 # print 'start downloading', self.names[i]
40
41 # 插入用户
42 command = self.sql.insert_data_to_users()
43 msg = (model_id, self.names[i], "",)
44
45 try:
46 self.sql.insert_data(command, msg, commit = True)
47 except Exception, e:
48 utils.log('insert users data errors')
49
50
51 for page in xrange(1, 10):
52 utils.log('current page:%s' % page)
53 # print 'current page', page
54
55 model_url = self.album_prefix.format(model_id, page)
56 soup = bs(self.readHtml(model_url), 'html.parser')
57 albums = soup.find_all('div', class_ = 'mm-photo-cell-middle')
58 if not albums:
59 break
60 for album in albums:
61 album_name = album.find('h4').a.string.strip().rstrip('.')
62 album_link= album.find('h4').a['href']
63 album_id = re.findall(self.album_pattern, album_link)[0]
64 album_create_time = album.find('p', class_ = 'mm-photo-date').string.strip(u'创建时间: ').strip(u'´´½¨Ê±¼ä:')
65 album_img_count = album.find('span', class_ = 'mm-pic-number').string.strip('()').strip(u'张').strip(u'ÕÅ')
66
67 # print ">>>>>>>>>>>>>>>>>>>>>>"
68 # print album.find('p', class_ = 'mm-photo-date').string
69 # print album_create_time
70 # print ">>>>>>>>>>>>>>>>>>>>>>"
71
72 # 插入相册
73 command = self.sql.insert_data_to_albums()
74 msg = (album_id, model_id, album_name, album_create_time, "", 1, album_img_count)
75 try:
76 self.sql.insert_data(command, msg, commit = True)
77 except Exception, e:
78 utils.log('insert albums data errors')
79
80
81 utils.log('start in album:%s, total size: %s' % (album_name, album_img_count))
82
83 self.getImages(model_id, album_id, album_img_count)
84
85
86 def getImages(self, model_id, album_id, image_count):
87 # print 'start downloading album', album_id, image_count, '张'
88 for page in xrange(1, (int(image_count)-1)/16+2):
89 link = self.image_prefix.format(model_id, album_id, page)
90 body = self.readHtml(link).decode('gbk')
91 images = re.findall(self.image_pattern, body)
92 # tried to use des as names, however, it duplicates times. So i chose pic ids.
93 names = re.findall(self.image_name_pattern, body)
94 for idx, image in enumerate(images):
95 image = image.replace('290', '620')
96 try:
97 img_url = ('http://'+image).replace('jpg_620x10000.jpg','jpg')
98 except Exception as e:
99 img_url = ('http://'+image)
100
101 # id , album_id , name , url , kind
102
103 # 插入图片
104 command = self.sql.insert_data_to_photos()
105 msg = (None, album_id, "", img_url, 1)
106 try:
107 self.sql.insert_data(command, msg, commit = True)
108 except Exception, e:
109 utils.log('insert photos data errors')
110
111
112 # print 'created photos success'
113
114
115 if __name__ == '__main__':
116 test_html = 'https://mm.taobao.com/json/request_top_list.htm?page={0}'
117 for page in xrange(1, 100):
118 c = Crawler()
119 data = c.readHtml(test_html.format(page))
120 c.getLinkIdAndNames(data)
121 c.getAlbums()
| 47 - error: syntax-error
|
1 # encoding=utf-8
2
3 import logging
4 import os
5 import config
6 import traceback
7 import datetime
8
9
10
11
12 # 自定义的日志输出
13 def log(msg, level = logging.DEBUG):
14 if not os.path.exists('log'):
15 os.makedirs('log')
16
17 logging.basicConfig(
18 filename = 'log/run.log',
19 format = '%(asctime)s: %(message)s',
20 level = logging.DEBUG
21 )
22 logging.log(level, msg)
23 print('%s [%s], msg:%s' % (datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), level, msg))
24
25 if level == logging.WARNING or level == logging.ERROR:
26 for line in traceback.format_stack():
27 print(line.strip())
28
29 for line in traceback.format_stack():
30 logging.log(level, line.strip())
31
32
33 def make_dir(dir):
34 log('make dir:%s' % dir)
35 if not os.path.exists(dir):
36 os.makedirs(dir)
| 25 - refactor: consider-using-in
33 - warning: redefined-builtin
5 - warning: unused-import
|
1 # coding=utf-8
2
3 import utils
4 import logging
5 import config
6 import pymysql
7
8 class SqlHelper(object):
9 def __init__(self):
10 self.conn = pymysql.connect(**config.database_config)
11 self.cursor = self.conn.cursor()
12
13 try:
14 self.conn.select_db(config.database)
15 except:
16 self.create_database()
17
18 self.conn.select_db(config.database)
19
20 def create_database(self):
21 try:
22 command = 'CREATE DATABASE IF NOT EXISTS %s DEFAULT CHARACTER SET \'utf8\' ' % config.database
23 utils.log('create_database command:%s' % command)
24 self.cursor.execute(command)
25 self.conn.commit()
26 except Exception, e:
27 utils.log('SqlHelper create_database exception:%s' % str(e), logging.WARNING)
28
29 def create_table(self, command):
30 try:
31 utils.log('create_table command:%s' % command)
32 x = self.cursor.execute(command)
33 self.conn.commit()
34 return x
35 except Exception, e:
36 utils.log('create_table exception:%s' % str(e), logging.WARNING)
37
38 def insert_data(self, command, data, commit = False):
39 try:
40 # utils.log('insert_data command:%s, data:%s' % (command, data))
41 x = self.cursor.execute(command, data)
42 if commit:
43 self.conn.commit()
44 return x
45 except Exception, e:
46 utils.log('insert_data exception msg:%s' % str(e), logging.WARNING)
47
48 def commit(self):
49 self.conn.commit()
50
51 def execute(self, command, commit = True):
52 try:
53 utils.log('execute command:%s' % command)
54 data = self.cursor.execute(command)
55 if commit:
56 self.conn.commit()
57 return data
58 except Exception, e:
59 utils.log('execute exception msg:%s' % str(e))
60 return None
61
62 def query(self, command, commit = False):
63 try:
64 utils.log('execute command:%s' % command)
65
66 self.cursor.execute(command)
67 data = self.cursor.fetchall()
68 if commit:
69 self.conn.commit()
70 return data
71 except Exception, e:
72 utils.log('execute exception msg:%s' % str(e))
73 return None
74
75 def query_one(self, command, commit = False):
76 try:
77 utils.log('execute command:%s' % command)
78
79 self.cursor.execute(command)
80 data = self.cursor.fetchone()
81 if commit:
82 self.conn.commit()
83
84 return data
85 except Exception, e:
86 utils.log('execute exception msg:%s' % str(e))
87 return None
88
89 def insert_data_to_users(self):
90 command = ("INSERT INTO users "
91 "(id, name, remark)"
92 "VALUES(%s, %s, %s)")
93 return command
94
95 def insert_data_to_albums(self):
96 command = ("INSERT INTO albums "
97 "(id, user_id, name, created_at, remark, kind, total)"
98 "VALUES(%s, %s, %s, %s, %s, %s, %s)")
99 return command
100
101 def insert_data_to_photos(self):
102 command = ("INSERT INTO photos "
103 "(id, album_id, name, url, kind)"
104 "VALUES(%s, %s, %s, %s, %s)")
105 return command
106
107 #创建表
108 sql = SqlHelper()
109 sql.create_table("create table IF NOT EXISTS users(id bigint, name varchar(255), remark text )")
110 sql.create_table("create table IF NOT EXISTS albums(id bigint, user_id bigint, name varchar(255), created_at date, remark text, kind int, total float) ")
111 sql.create_table("create table IF NOT EXISTS photos(id bigint, album_id bigint, name varchar(255), url varchar(255), kind int) ")
| 26 - error: syntax-error
|
1 # encoding=utf-8
2 from sqlhelper import SqlHelper
3
4 sql = SqlHelper()
5
6 def insert_data_to_users():
7 command = ("INSERT IGNORE INTO users "
8 "(id, name, created_at, remark)"
9 "VALUES(%s, %s, %s, %s)")
10 return command
11
12
13 command = insert_data_to_users()
14
15 msg = (None, "112", "", "",)
16
17 sql.insert_data(command, msg, commit = True)
18
19 print 'created user success'
| 19 - error: syntax-error
|
1 """ Q->) design a calculator which will correctly solve all the problem except the following one
2 45*3 = 123, 85+2 = 546, 33-23 = 582 | your program should take operator and two numbers as input from the user and then return the result """
3 try:
4 num1 = int(input("enter first number:"))
5 num2 = int(input("enter second number:"))
6 op = input("choose operatot => | +, -, *, /| :")
7 if (num1 == 45 and num2 == 3 or num1 == 85 and num2 == 2 or num1 == 33 and num2 == 23):
8 if op == "+":
9 print(546)
10 elif op == "-":
11 print(582)
12 elif op == "*":
13 print(123)
14 else:
15 if op == "+":
16 print(num1 + num2)
17 elif op == "-":
18 print(num1 - num2)
19 elif op == "*":
20 print(num1 * num2)
21 elif op == "/":
22 print(num1 / num2)
23 except Exception as e:
24 print("Please Enter a Valid Integer Value") | 23 - warning: broad-exception-caught
7 - refactor: too-many-boolean-expressions
|
1 #!/usr/bin/env python
2 from __future__ import print_function
3 import ROOT
4 from BranchTools import Collection
5 from BranchTools import AddCollectionsModule
6 import array
7 import os
8 import math
9 import numpy as np
10
11 # MET X/Y correction
12 class METXY(AddCollectionsModule):
13
14 def __init__(self, year):
15 super(METXY, self).__init__()
16 self.debug = 'XBBDEBUG' in os.environ
17 self.year = int(year)
18 self.quickloadWarningShown = False
19
20 self.systematics = ['jer','jesAbsoluteStat','jesAbsoluteScale','jesAbsoluteFlavMap','jesAbsoluteMPFBias','jesFragmentation','jesSinglePionECAL','jesSinglePionHCAL','jesFlavorQCD','jesRelativeJEREC1','jesRelativeJEREC2','jesRelativeJERHF','jesRelativePtBB','jesRelativePtEC1','jesRelativePtEC2','jesRelativePtHF','jesRelativeBal','jesRelativeFSR','jesRelativeStatFSR','jesRelativeStatEC','jesRelativeStatHF','jesPileUpDataMC','jesPileUpPtRef','jesPileUpPtBB','jesPileUpPtEC1','jesPileUpPtEC2','jesPileUpPtHF','jesPileUpMuZero','jesPileUpEnvelope','jesTotal','unclustEn']
21
22 def customInit(self, initVars):
23 self.sampleTree = initVars['sampleTree']
24 self.sample = initVars['sample']
25 self.config = initVars['config']
26
27 # load METXYCorr_Met_MetPhi from VHbb namespace
28 VHbbNameSpace = self.config.get('VHbbNameSpace', 'library')
29 ROOT.gSystem.Load(VHbbNameSpace)
30
31 self.MET_Pt = array.array('f', [0.0])
32 self.MET_Phi = array.array('f', [0.0])
33 self.sampleTree.tree.SetBranchAddress("MET_Pt", self.MET_Pt)
34 self.sampleTree.tree.SetBranchAddress("MET_Phi", self.MET_Phi)
35
36 self.addBranch("MET_Pt_uncorrected")
37 self.addBranch("MET_Phi_uncorrected")
38
39 if self.sample.isMC():
40 self.MET_Pt_syst = {}
41 self.MET_Phi_syst = {}
42 for syst in self.systematics:
43 self.MET_Pt_syst[syst] = {}
44 self.MET_Phi_syst[syst] = {}
45 for Q in self._variations(syst):
46 self.MET_Pt_syst[syst][Q] = array.array('f', [0.0])
47 self.MET_Phi_syst[syst][Q] = array.array('f', [0.0])
48 self.sampleTree.tree.SetBranchAddress("MET_pt_"+syst+Q, self.MET_Pt_syst[syst][Q])
49 self.sampleTree.tree.SetBranchAddress("MET_phi_"+syst+Q, self.MET_Phi_syst[syst][Q])
50
51 self.addBranch("MET_pt_uncorrected_"+syst+Q)
52 self.addBranch("MET_phi_uncorrected_"+syst+Q)
53
54
55 def processEvent(self, tree):
56 if not self.hasBeenProcessed(tree):
57 self.markProcessed(tree)
58
59 # backup uncorrected branches
60 self._b('MET_Pt_uncorrected')[0] = tree.MET_Pt
61 self._b('MET_Phi_uncorrected')[0] = tree.MET_Phi
62
63 MET_Pt_corrected, MET_Phi_corrected = ROOT.VHbb.METXYCorr_Met_MetPhi(tree.MET_Pt, tree.MET_Phi, tree.run, self.year, self.sample.isMC(), tree.PV_npvs)
64
65 # overwrite MET_Pt, MET_Phi branches
66 self.MET_Pt[0] = MET_Pt_corrected
67 self.MET_Phi[0] = MET_Phi_corrected
68
69 if self.sample.isMC():
70 for syst in self.systematics:
71 for Q in self._variations(syst):
72
73 # backup uncorrected branches
74 self._b("MET_pt_uncorrected_"+syst+Q)[0] = self.MET_Pt_syst[syst][Q][0]
75 self._b("MET_phi_uncorrected_"+syst+Q)[0] = self.MET_Phi_syst[syst][Q][0]
76
77 MET_Pt_corrected, MET_Phi_corrected = ROOT.VHbb.METXYCorr_Met_MetPhi(self.MET_Pt_syst[syst][Q][0], self.MET_Phi_syst[syst][Q][0], tree.run, self.year, self.sample.isMC(), tree.PV_npvs)
78
79 # overwrite MET_Pt, MET_Phi branches
80 self.MET_Pt_syst[syst][Q][0] = MET_Pt_corrected
81 self.MET_Phi_syst[syst][Q][0] = MET_Phi_corrected
82
83 # formulas by default reload the branch content when evaluating the first instance of the object!
84 # SetQuickLoad(1) turns off this behavior
85 for formulaName, treeFormula in self.sampleTree.formulas.items():
86 if 'MET' in formulaName:
87 if not self.quickloadWarningShown:
88 self.quickloadWarningShown = True
89 print("INFO: SetQuickLoad(1) called for formula:", formulaName)
90 print("INFO: -> EvalInstance(0) on formulas will not re-load branches but will take values from memory, which might have been modified by this module.")
91 treeFormula.SetQuickLoad(1)
92
93
| 12 - refactor: too-many-instance-attributes
15 - refactor: super-with-arguments
23 - warning: attribute-defined-outside-init
24 - warning: attribute-defined-outside-init
25 - warning: attribute-defined-outside-init
31 - warning: attribute-defined-outside-init
32 - warning: attribute-defined-outside-init
40 - warning: attribute-defined-outside-init
41 - warning: attribute-defined-outside-init
4 - warning: unused-import
8 - warning: unused-import
9 - warning: unused-import
|
1 #! /usr/bin/env python
2
3 import os
4 import sys
5 import glob
6
7 from optparse import OptionParser
8
9 from myutils.BetterConfigParser import BetterConfigParser
10 from myutils.FileList import FileList
11 from myutils.FileLocator import FileLocator
12 from myutils.copytreePSI import filelist
13 from myutils.sample_parser import ParseInfo
14
15
16 def get_config(opts):
17 # From submit.py
18
19 pathconfig = BetterConfigParser()
20 pathconfig.read('%sconfig/paths.ini' % opts.tag)
21
22 try:
23 _configs = [x for x in pathconfig.get('Configuration', 'List').split(" ") if len(x.strip()) > 0]
24 if 'volatile.ini' in _configs:
25 _configs.remove('volatile.ini')
26 configs = ['%sconfig/' % (opts.tag) + c for c in _configs]
27
28 except Exception as e:
29 print("\x1b[31mERROR:" + str(e) + "\x1b[0m")
30 print("\x1b[31mERROR: configuration file not found. Check config-tag specified with -T and presence of '[Configuration] List' in .ini files.\x1b[0m")
31 raise Exception("ConfigNotFound")
32
33 # read config
34 config = BetterConfigParser()
35 config.read(configs)
36
37 return config
38
39
40 def add_to_config(condor_config, template, sample, config, locator):
41
42 if os.environ.get('XBBDEBUG'):
43 print('Adding %s:' % sample)
44
45 sampledir = os.path.join(config.get('Directories', 'CONDORout'), sample)
46 if not os.path.exists(sampledir):
47 os.makedirs(sampledir)
48
49 for part, infile in enumerate(filelist(config.get('Directories', 'samplefiles'), sample)):
50
51 job = {
52 'log': '%s_part%s' % (sample, part),
53 'part': part,
54 'sample': sample,
55 'filelist': FileList.compress(infile),
56 'outfile': locator.getFilenameAfterPrep(infile)
57 }
58
59 output_file = os.path.join(sampledir, job['outfile'])
60
61 if os.path.exists(output_file) and os.stat(output_file).st_size:
62 continue
63
64 condor_config.write(template.format(**job))
65
66
67 if __name__ == '__main__':
68
69 parser = OptionParser()
70
71 parser.add_option('-T', '--tag', dest='tag', default='default',
72 help='Tag to run the analysis with, example \'8TeV\' uses 8TeVconfig to run the analysis')
73 parser.add_option('-S','--samples',dest='samples',default='*', help='samples you want to run on')
74 parser.add_option('-o', '--output', dest='output', default='condor', help='output prefix')
75
76 parser.add_option('-c', '--config', dest='config', default=None, help='Display a config value instead of making a submit file')
77
78 (opts, args) = parser.parse_args(sys.argv)
79
80 config = get_config(opts)
81
82 if opts.config:
83 print(config.get(*opts.config.split(':')))
84 exit(0)
85
86 filelocator = FileLocator(config)
87 parseinfo = ParseInfo(samples_path=config.get('Directories', 'PREPin'), config=config)
88
89 with open('batch/condor/mit_header.sub', 'r') as header_file:
90 header = header_file.read()
91
92 logdir = os.path.join('/home/dabercro/public_html/xbb', config.get('Directories', 'Dname'))
93
94 if not os.path.exists(logdir):
95 os.makedirs(logdir)
96
97 with open('batch/condor/mit_template.sub', 'r') as template_file:
98 template = template_file.read().format(
99 logdir=logdir,
100 tag=opts.tag,
101 outdir=config.get('Directories', 'CONDORin'),
102 condorout=config.get('Directories', 'CONDORout'),
103 log='{log}', part='{part}', sample='{sample}',
104 filelist='{filelist}', outfile='{outfile}'
105 )
106
107 with open('%s_%s.cfg' % (opts.output, opts.tag), 'w') as condor_config:
108
109 condor_config.write(header)
110
111 for sample_file in glob.iglob('%s/%s.txt' % (config.get('Directories', 'samplefiles'), opts.samples)):
112
113 if sample_file.endswith('.root.txt'):
114 continue
115
116 sample = os.path.basename(sample_file).split('.')[0]
117
118 samples = parseinfo.find(sample)
119
120 if os.environ.get('XBBDEBUG'):
121 print(samples)
122
123 if len(samples) == 1:
124 add_to_config(condor_config, template, sample, config, filelocator)
| 7 - warning: deprecated-module
16 - warning: redefined-outer-name
34 - warning: redefined-outer-name
31 - warning: raise-missing-from
31 - warning: broad-exception-raised
40 - warning: redefined-outer-name
40 - warning: redefined-outer-name
40 - warning: redefined-outer-name
40 - warning: redefined-outer-name
84 - refactor: consider-using-sys-exit
89 - warning: unspecified-encoding
97 - warning: unspecified-encoding
107 - warning: unspecified-encoding
|
1 #!/usr/bin/env python
2 from __future__ import print_function
3 import ROOT
4 from BranchTools import Collection
5 from BranchTools import AddCollectionsModule
6 import array
7 import os
8 import math
9 import numpy as np
10
11 # applies the smearing to MC jet resolution and modifies the Jet_PtReg* branches of the tree
12 class JetSmearer(AddCollectionsModule):
13
14 def __init__(self, year, unsmearPreviousCorrection=True, backupPreviousCorrection=True):
15 super(JetSmearer, self).__init__()
16 self.debug = 'XBBDEBUG' in os.environ
17 self.unsmearPreviousCorrection = unsmearPreviousCorrection
18 self.backupPreviousCorrection = backupPreviousCorrection
19 self.quickloadWarningShown = False
20
21 self.year = year if type(year) == str else str(year)
22 self.smear_params = {
23 #'2016': [1.0, 0.0, 0.0, 0.0],
24 '2017': [1.0029846959, 0.0212893588055, 0.030684, 0.052497],
25 '2018': [0.98667384694, 0.0197153848807, 0.038481, 0.053924],
26 }
27 if self.year not in self.smear_params:
28 print("ERROR: smearing for year", self.year, " not available!")
29 raise Exception("SmearingError")
30
31 self.scale, self.scale_err, self.smear, self.smear_err = self.smear_params[self.year]
32
33 def customInit(self, initVars):
34 self.sampleTree = initVars['sampleTree']
35 self.isData = initVars['sample'].isData()
36 self.sample = initVars['sample']
37
38 if self.sample.isMC():
39 # resolutions used in post-processor smearing
40 self.unsmearResNom = 1.1
41 self.unsmearResUp = 1.2
42 self.unsmearResDown = 1.0
43
44 self.maxNjet = 256
45 self.PtReg = array.array('f', [0.0]*self.maxNjet)
46 self.PtRegUp = array.array('f', [0.0]*self.maxNjet)
47 self.PtRegDown = array.array('f', [0.0]*self.maxNjet)
48 self.sampleTree.tree.SetBranchAddress("Jet_PtReg", self.PtReg)
49 self.sampleTree.tree.SetBranchAddress("Jet_PtRegUp", self.PtRegUp)
50 self.sampleTree.tree.SetBranchAddress("Jet_PtRegDown", self.PtRegDown)
51
52 if self.backupPreviousCorrection:
53 self.addVectorBranch("Jet_PtRegOld", default=0.0, branchType='f', length=self.maxNjet, leaflist="Jet_PtRegOld[nJet]/F")
54 self.addVectorBranch("Jet_PtRegOldUp", default=0.0, branchType='f', length=self.maxNjet, leaflist="Jet_PtRegOldUp[nJet]/F")
55 self.addVectorBranch("Jet_PtRegOldDown", default=0.0, branchType='f', length=self.maxNjet, leaflist="Jet_PtRegOldDown[nJet]/F")
56
57 def processEvent(self, tree):
58 if not self.hasBeenProcessed(tree) and self.sample.isMC():
59 self.markProcessed(tree)
60
61 nJet = tree.nJet
62
63 # backup the Jet_PtReg branches with the old smearing
64 if self.backupPreviousCorrection:
65 for i in range(nJet):
66 self._b("Jet_PtRegOld")[i] = self.PtReg[i]
67 self._b("Jet_PtRegOldUp")[i] = self.PtRegUp[i]
68 self._b("Jet_PtRegOldDown")[i] = self.PtRegDown[i]
69
70 # original post-procesor smearing which is undone:
71 # if isMC:
72 # # until we have final post-regression smearing factors we assume a flat 10%
73 # if sysVar==0: # nominal
74 # resSmear = 1.1
75 # elif sysVar==1: # up
76 # resSmear = 1.2
77 # elif sysVar==-1: # down
78 # resSmear = 1.0
79 # smearedPt = jet.pt*jet.bRegCorr
80 # if jet.genJetIdx >=0 and jet.genJetIdx < len(self.genJetsWithNeutrinos) :
81 # genJet=self.genJetsWithNeutrinos[jet.genJetIdx]
82 # dPt = smearedPt - genJet.Pt()
83 # smearedPt=genJet.Pt()+resSmear*dPt
84 # return smearedPt
85
86 # undo old smearing
87 if self.unsmearPreviousCorrection:
88 for i in range(nJet):
89 genJetIdx = tree.Jet_genJetIdx[i]
90 if genJetIdx > -1 and genJetIdx < len(tree.GenJetWithNeutrinos_pt):
91 genJetPt = tree.GenJetWithNeutrinos_pt[genJetIdx]
92
93 self.PtReg[i] = genJetPt + (self.PtReg[i] - genJetPt)/self.unsmearResNom
94 self.PtRegUp[i] = genJetPt + (self.PtRegUp[i] - genJetPt)/self.unsmearResUp
95 self.PtRegDown[i] = genJetPt + (self.PtRegDown[i] - genJetPt)/self.unsmearResDown
96
97 # after undoing the smearing, check if up/down variations are the same
98 assert (max(abs(self.PtReg[i]-self.PtRegUp[i]),abs(self.PtRegUp[i]-self.PtRegDown[i])) < 0.001 or self.PtReg[i] < 0)
99
100 # apply new smearing
101 for i in range(nJet):
102 genJetIdx = tree.Jet_genJetIdx[i]
103 if genJetIdx > -1 and genJetIdx < len(tree.GenJetWithNeutrinos_pt):
104 gen_pt = tree.GenJetWithNeutrinos_pt[genJetIdx]
105
106 # reference: https://github.com/dabercro/hbb/blob/b86589128a6839a12efaf041f579fe88c1d1be38/nanoslimmer/applysmearing/applysmearing.py
107 regressed = self.PtReg[i]
108 no_smear = regressed * self.scale
109 gen_diff = regressed - gen_pt
110 nominal = max(0.0, (gen_pt + gen_diff * (1.0 + self.smear)) * self.scale)
111 band = math.sqrt(pow(nominal/self.scale * self.scale_err, 2) + pow(gen_diff * self.scale * self.smear_err, 2))
112
113 down, up = (max(nominal - band, no_smear), nominal + band) if regressed > gen_pt else (min(nominal + band, no_smear), nominal - band)
114
115 self.PtReg[i] = nominal
116 self.PtRegUp[i] = up
117 self.PtRegDown[i] = down
118
119
120 # formulas by default reload the branch content when evaluating the first instance of the object!
121 # SetQuickLoad(1) turns off this behavior
122 for formulaName, treeFormula in self.sampleTree.formulas.items():
123 if 'Jet_PtReg' in formulaName:
124 if not self.quickloadWarningShown:
125 self.quickloadWarningShown = True
126 print("INFO: SetQuickLoad(1) called for formula:", formulaName)
127 print("INFO: -> EvalInstance(0) on formulas will not re-load branches but will take values from memory, which might have been modified by this module.")
128 treeFormula.SetQuickLoad(1)
129 # print("\x1b[31mERROR: this module can't be used together with others which use formulas based on branches changed inside this module!\x1b[0m")
130 # raise Exception("NotImplemented")
131
132
| 12 - refactor: too-many-instance-attributes
15 - refactor: super-with-arguments
29 - warning: broad-exception-raised
57 - refactor: too-many-locals
90 - refactor: chained-comparison
103 - refactor: chained-comparison
34 - warning: attribute-defined-outside-init
35 - warning: attribute-defined-outside-init
36 - warning: attribute-defined-outside-init
40 - warning: attribute-defined-outside-init
41 - warning: attribute-defined-outside-init
42 - warning: attribute-defined-outside-init
44 - warning: attribute-defined-outside-init
45 - warning: attribute-defined-outside-init
46 - warning: attribute-defined-outside-init
47 - warning: attribute-defined-outside-init
3 - warning: unused-import
4 - warning: unused-import
9 - warning: unused-import
|
1 #!/usr/bin/env python
2 import ROOT
3 import numpy as np
4 import array
5 import os
6 from BranchTools import Collection
7 from BranchTools import AddCollectionsModule
8 from XbbTools import XbbTools
9
10 class isBoosted(AddCollectionsModule):
11
12 def __init__(self, branchName='isBoosted', cutName='all_BOOST'):
13 super(isBoosted, self).__init__()
14 self.branchName = branchName
15 self.cutName = cutName
16 self.version = 3
17 self.variations = self._variations("isBoosted")
18
19 # returns cut string with variables replaced by their systematic variations
20 def getSystVarCut(self, cut, syst, UD):
21 replacementRulesList = XbbTools.getReplacementRulesList(self.config, syst)
22 systVarCut = XbbTools.getSystematicsVariationTemplate(cut, replacementRulesList)
23 systVarCut = systVarCut.replace('{syst}', syst).replace('{UD}', UD)
24 return systVarCut
25
26 def customInit(self, initVars):
27 self.sample = initVars['sample']
28 self.sampleTree = initVars['sampleTree']
29 self.config = initVars['config']
30
31 self.boostedCut = self.config.get('Cuts', self.cutName)
32 self.systVarCuts = {}
33
34 self.systematics = sorted(list(set(sum([eval(self.config.get('LimitGeneral', x)) for x in ['sys_cr', 'sys_BDT', 'sys_Mjj']], []))))
35
36 # Nominal
37 self.addIntegerBranch(self.branchName)
38 self.sampleTree.addFormula(self.boostedCut)
39
40 # systematic variations
41 if self.sample.isMC():
42 for syst in self.systematics:
43 for UD in self.variations:
44 systVarBranchName = self._v(self.branchName, syst, UD)
45 self.addIntegerBranch(systVarBranchName)
46 self.systVarCuts[systVarBranchName] = self.getSystVarCut(self.boostedCut, syst=syst, UD=UD)
47 self.sampleTree.addFormula(self.systVarCuts[systVarBranchName])
48
49 def processEvent(self, tree):
50 # if current entry has not been processed yet
51 if not self.hasBeenProcessed(tree):
52 self.markProcessed(tree)
53
54 # Nominal
55 b = int(self.sampleTree.evaluate(self.boostedCut))
56 self._b(self._v(self.branchName))[0] = 1 if b > 0 else 0
57
58 # systematic variations
59 if self.sample.isMC():
60 for syst in self.systematics:
61 for UD in self.variations:
62 systVarBranchName = self._v(self.branchName, syst, UD)
63 b = int(self.sampleTree.evaluate(self.systVarCuts[systVarBranchName]))
64 self._b(systVarBranchName)[0] = 1 if b > 0 else 0
65
66
| 10 - refactor: too-many-instance-attributes
13 - refactor: super-with-arguments
34 - warning: eval-used
27 - warning: attribute-defined-outside-init
28 - warning: attribute-defined-outside-init
29 - warning: attribute-defined-outside-init
31 - warning: attribute-defined-outside-init
32 - warning: attribute-defined-outside-init
34 - warning: attribute-defined-outside-init
2 - warning: unused-import
3 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
|
1 """
2 ip_reader
3 ---------
4 Reads the Machine IP and emails if it has changed
5
6 Mac - Linux
7 crontab
8
9 Windows:
10
11 Command line as follows:
12
13 schtasks /Create /SC HOURLY /TN PythonTask /TR "PATH_TO_PYTHON_EXE PATH_TO_PYTHON_SCRIPT"
14
15 That will create an hourly task called 'PythonTask'. You can replace HOURLY with DAILY, WEEKLY etc.
16 PATH_TO_PYTHON_EXE will be something like: C:\python25\python.exe.
17
18 Otherwise you can open the Task Scheduler and do it through the GUI. Hope this helps.
19
20 """
21
22 import collections
23 import base64
24 import json
25
26 from httplib import HTTPSConnection
27 from urllib import urlencode
28 from urllib2 import urlopen
29
30
31 def encode_params(data):
32 """Encode parameters in a piece of data.
33
34 Will successfully encode parameters when passed as a dict or a list of
35 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
36 if parameters are supplied as a dict.
37 """
38
39 if isinstance(data, (str, bytes)):
40 return data
41 elif hasattr(data, 'read'):
42 return data
43 elif hasattr(data, '__iter__'):
44 result = []
45 for k, vs in to_key_val_list(data):
46 if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
47 vs = [vs]
48 for v in vs:
49 if v is not None:
50 result.append(
51 (k.encode('utf-8') if isinstance(k, str) else k,
52 v.encode('utf-8') if isinstance(v, str) else v))
53 return urlencode(result, doseq=True)
54 else:
55 return data
56
57
58 def to_key_val_list(value):
59 """Take an object and test to see if it can be represented as a
60 dictionary. If it can be, return a list of tuples, e.g.,
61
62 ::
63
64 >>> to_key_val_list([('key', 'val')])
65 [('key', 'val')]
66 >>> to_key_val_list({'key': 'val'})
67 [('key', 'val')]
68 >>> to_key_val_list('string')
69 ValueError: cannot encode objects that are not 2-tuples.
70
71 :rtype: list
72 """
73 if value is None:
74 return None
75
76 if isinstance(value, (str, bytes, bool, int)):
77 raise ValueError('cannot encode objects that are not 2-tuples')
78
79 if isinstance(value, collections.Mapping):
80 value = value.items()
81
82 return list(value)
83
84
85 file_path = 'ip.txt'
86 my_ip = json.load(urlopen('https://api.ipify.org/?format=json'))['ip']
87
88 try:
89 with open(file_path, 'r') as the_file:
90 file_ip = the_file.read()
91 except:
92 file_ip = u''
93
94 if my_ip != file_ip:
95
96 http = 'http://'
97 url = 'api.mailgun.net'
98 request = '/v3/sandboxee586e52376a457d8b274c437718a56e.mailgun.org/messages'
99
100 key = 'key-29caea072852af2816e0b02f6733b751'
101 base64string = base64.encodestring('api:'+key).replace('\n', '')
102
103 headers = {'Authorization': 'Basic %s' % base64string,
104 'content-type': 'application/x-www-form-urlencoded'}
105 payload = {"from": "PostMaster <postmaster@sandboxee586e52376a457d8b274c437718a56e.mailgun.org>",
106 "to": "Juan Pablo <jp.urzua.t@gmail.com>",
107 "subject": "La IP de la oficina ha cambiado!",
108 "text": "La nueva IP es: " + my_ip}
109
110 body = encode_params(payload)
111
112 http_connection = HTTPSConnection(url)
113 http_connection.request(method="POST", url=request, body=body, headers=headers)
114 response = json.loads(http_connection.getresponse().read())
115 print response
116
117 if response['message'] == 'Queued. Thank you.':
118 with open(file_path, 'w') as the_file:
119 the_file.write(my_ip)
120 print "Escrito"
121
122 else:
123 print 'Same IP'
124
125
| 115 - error: syntax-error
|
1 from aima3.search import *
2 from utils import *
3 import numpy as np
4 import cv2 as cv
5 import matplotlib.pyplot as plt
6
7 # file che contine l'implementazione del problema basata con AIMA
8
9 class BlocksWorld(Problem):
10
11 def __init__(self, initial, goal):
12 super().__init__(initial, goal)
13
14
15 # restituisce il numero di blocchi
16 def get_blocks_number(self):
17 return len(self.initial)
18
19
20 # restituisce la lista delle possibili azioni nello stato corrente
21 def actions(self, state):
22 blocks = [*state[0:-1]]
23 size = state[-1]
24 columns = {}
25 tops = []
26 for block in blocks:
27 n, i, j = block
28 if j not in columns:
29 columns[j] = (n, i, j)
30 else:
31 if i > columns[j][1]:
32 columns[j] = (n, i, j)
33 for col in columns:
34 tops.append(columns[col])
35 actions = []
36 for block in tops:
37 n, i, j = block
38 for col in range(size):
39 if col != j:
40 if col in columns:
41 actions.append((n, columns[col][1]+1, col))
42 else:
43 actions.append((n, 0, col))
44 return actions
45
46
47 #
48 def result(self, state, actions):
49 blocks = [*state[0:-1]]
50 size = state[-1]
51 to_delete = ()
52 for block in blocks:
53 if block[0] == actions[0]:
54 to_delete = block
55 blocks.remove(to_delete)
56 blocks.append((actions))
57 blocks.append(size)
58 return tuple(blocks)
59
60
61 # verifica se lo stato passato è lo stato finale
62 def goal_test(self, state):
63 op_1 = [*state[0:-1]]
64 op_2 = [*self.goal[0:-1]]
65 op_1.sort(key=lambda l: l[0])
66 op_2.sort(key=lambda l: l[0])
67 return str(op_1) == str(op_2)
68
69
70 # restituisce i blocchi che possono essere spostati nello stato che viene passato
71 def get_movable(self, state):
72 blocks = [*state[0:-1]]
73 size = state[-1]
74 columns = {}
75 tops = []
76 for block in blocks:
77 n, i, j = block
78 if j not in columns:
79 columns[j] = (n, i, j)
80 else:
81 if i > columns[j][1]:
82 columns[j] = (n, i, j)
83 for col in columns:
84 tops.append(columns[col])
85 return tops
86
87
88 # euristica che calcola il numero di blocchi in posizione errata
89 def misplaced_blocks(self, node):
90 blocks = [*node.state[0:-1]]
91 target = [*self.goal[0:-1]]
92 target.sort(key=lambda l: l[0])
93 value = 0
94 for block in blocks:
95 n, i, j = block
96 if target[n-1][1:3] != (i, j):
97 value += 1
98 # if block not in self.get_movable(node.state):
99 # value += 1
100 return value
101
102
103 # ritorna la profondità di un nodo nell'albero di ricerca
104 def depth(self, node):
105 return node.depth
106
107
108 # stampa la lista delle azioni che portano dallo stato iniziale allo stato finale
109 def solution(self, actions, output=True):
110 if len(actions) is None:
111 return
112 state = self.initial
113 successor = None
114 n = 1
115 print("Lunghezza soluzione: " + str(len(actions)))
116 for action in actions:
117 print(action)
118 successor = self.result(state, action)
119 if output:
120 figue_1 = self.draw_state(state)
121 figue_2 = self.draw_state(successor)
122 _, axarr = plt.subplots(1, 2)
123 axarr[0].imshow(figue_1, cmap=plt.cm.binary)
124 axarr[0].set_xticks([])
125 axarr[0].set_yticks([])
126 axarr[0].set_xlabel(f"\nStato {n}")
127 axarr[1].imshow(figue_2, cmap=plt.cm.binary)
128 axarr[1].set_xticks([])
129 axarr[1].set_yticks([])
130 axarr[1].set_xlabel(f"\nStato {n+1}")
131 figManager = plt.get_current_fig_manager()
132 figManager.full_screen_toggle()
133 plt.show()
134 state = successor
135 n += 1
136
137
138 # metodo che fornisce una rappresentazione grafica dello stato che gli viene passato
139 def draw_state(self, state):
140 blocks = [*state[0:-1]]
141 w = state[-1]
142 blocks.sort(key=lambda l: l[1], reverse=True)
143 h = blocks[0][1]
144 image = np.zeros(((h+1)*100, w*100), np.uint8)
145 for block in blocks:
146 n, i, j = block
147 i = h - i
148 digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0)
149 digit = cv.resize(digit, (100, 100))
150 image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit
151 size = (len(state) - 1)*100
152 adjust = np.zeros((size, w*100), np.uint8)
153 adjust[size - (h+1)*100 : size, :] = image
154 return adjust | 1 - warning: wildcard-import
2 - warning: wildcard-import
9 - error: undefined-variable
11 - warning: useless-parent-delegation
73 - warning: unused-variable
|
1 import tkinter as tk
2 from tkinter.filedialog import askopenfilename
3 from PIL import Image, ImageTk
4 from load_state import prepare_image
5 from utils import draw_state
6 from blocks_world import BlocksWorld
7 from search_algs import *
8
9 # file che contiene l'implementazione dell'interfaccia grafica per utilizzare il programma
10
11 class Window(tk.Frame):
12
13
14 def __init__(self, master=None):
15 super().__init__(master)
16 self.master = master
17 self.pack()
18 self.initial_state = None
19 self.goal_state = None
20 self.create_widgets()
21 self.create_images("insert_image.png", "insert_image.png")
22
23
24 def create_widgets(self):
25 initial_label = tk.Label(self, text = "Seleziona stato iniziale:")
26 goal_label = tk.Label(self, text = "Seleziona stato finale:")
27 initial_label.grid(row = 0, column = 0, padx = 10, pady = 10)
28 goal_label.grid(row = 0, column = 2, padx = 10, pady = 10)
29
30 initial_button = tk.Button(self, text="Seleziona file", command=self.open_initial)
31 goal_button = tk.Button(self, text="Seleziona file", command=self.open_goal)
32 initial_button.grid(row = 1, column = 0, padx = 10, pady = 10)
33 goal_button.grid(row = 1, column = 2, padx = 10, pady = 10)
34
35 alg_label = tk.Label(self, text = "Seleziona algoritmo di ricerca:")
36 alg_label.grid(row = 0, column = 1, padx = 10, pady = 10)
37
38 frame = tk.Frame(self)
39 frame.grid(row = 1, column = 1, padx = 10, pady = 10)
40
41 self.selected = tk.StringVar(self)
42 self.selected.set("BFS")
43 select_alg_menu = tk.OptionMenu(frame, self.selected, "BFS", "DFS", "IDS", "UCS", "A*", "RBFS", command=self.read_algorithm).pack()
44
45 start_button = tk.Button(frame, text="Start search", command=self.start_search).pack()
46
47
48 def create_images(self, initial, goal):
49 self.initial_image_path = initial
50 self.initial_image = ImageTk.PhotoImage(Image.open("./images/" + initial).resize((300, 300)))
51 initial_image_label = tk.Label(self, image=self.initial_image)
52 initial_image_label.grid(row = 2, column = 0, padx = 10, pady = 10)
53
54 self.goal_image_path = goal
55 self.goal_image = ImageTk.PhotoImage(Image.open("./images/" + goal).resize((300, 300)))
56 goal_image_label = tk.Label(self, image=self.goal_image)
57 goal_image_label.grid(row = 2, column = 2, padx = 10, pady = 10)
58
59
60 def open_initial(self):
61 self.initial_file = askopenfilename()
62 if self.initial_file == ():
63 return
64 self.initial_state = prepare_image(self.initial_file, False)
65 print(self.initial_state)
66 draw_state(self.initial_state, "initial")
67 self.create_images("/temp/initial.jpg", self.goal_image_path)
68
69
70 def read_algorithm(self, alg):
71 return alg
72
73
74 def open_goal(self):
75 self.goal_file = askopenfilename()
76 if self.goal_file == ():
77 return
78 self.goal_state = prepare_image(self.goal_file, False)
79 print(self.goal_state)
80 draw_state(self.goal_state, "goal")
81 self.create_images(self.initial_image_path, "/temp/goal.jpg")
82
83
84 def start_search(self):
85 if self.goal_state is None and self.initial_state is None:
86 return
87 alg = self.selected.get()
88 problem = BlocksWorld(self.initial_state, self.goal_state)
89 print("Inizio ricerca:")
90 if alg == "BFS":
91 problem.solution(graph_bfs(problem).solution())
92 if alg == "A*":
93 problem.solution(a_star(problem, lambda n: problem.misplaced_blocks(n)).solution())
94 if alg == "DFS":
95 problem.solution(graph_dfs(problem).solution())
96 if alg == "IDS":
97 problem.solution(ids(problem).solution())
98 if alg == "RBFS":
99 problem.solution(rbfs(problem, lambda n: problem.misplaced_blocks(n)).solution())
100 if alg == "UCS":
101 problem.solution(a_star(problem, lambda n: problem.depth(n)).solution())
102
103
104
105
106 root = tk.Tk()
107 root.title("Blocks World")
108 root.resizable(0, 0)
109 app = Window(master=root)
110 app.mainloop() | 7 - warning: wildcard-import
11 - refactor: too-many-instance-attributes
43 - error: assignment-from-no-return
45 - error: assignment-from-no-return
43 - warning: unused-variable
45 - warning: unused-variable
91 - error: undefined-variable
93 - error: undefined-variable
93 - warning: unnecessary-lambda
95 - error: undefined-variable
97 - error: undefined-variable
99 - error: undefined-variable
99 - warning: unnecessary-lambda
101 - error: undefined-variable
101 - warning: unnecessary-lambda
61 - warning: attribute-defined-outside-init
75 - warning: attribute-defined-outside-init
|
1 import numpy as np
2 import matplotlib.pyplot as plt
3 from keras.datasets import mnist
4 from keras.layers import Conv2D
5 from keras.layers import MaxPool2D
6 from keras.layers import Flatten
7 from keras.layers import Dense
8 from keras.layers import Dropout
9 from keras import Sequential
10
11 # file per allenare e salvare la rete neurale che effettua il riconoscimento delle cifre
12 # il modello viene allenato sul dataset del MNIST
13
14 BATCH_SIZE = 64
15 EPOCHS = 10
16
17 # si estraggono e si
18 (x_train, y_train), (x_test, y_test) = mnist.load_data()
19
20 # si aggiunge la dimensione del canale e si normalizza il valore dei pixel tra 0 e 1
21 x_train = np.expand_dims(x_train, -1)
22 x_train = x_train / 255
23 x_test = np.expand_dims(x_test, -1)
24 x_test = x_test / 255
25
26 # definizione del modello
27 model = Sequential()
28 model.add(Conv2D(filters=24, kernel_size=(3, 3), activation="relu"))
29 model.add(MaxPool2D(pool_size=(2, 2)))
30 model.add(Dropout(0.5))
31 model.add(Conv2D(filters=36, kernel_size=(3, 3)))
32 model.add(MaxPool2D(pool_size=(2, 2)))
33 model.add(Dropout(0.5))
34 model.add(Flatten())
35 model.add(Dense(128, activation="relu"))
36 model.add(Dense(10, activation="softmax"))
37
38 model.predict(x_train[[0]])
39
40 model.summary()
41
42 model.compile(optimizer="adam",
43 loss='sparse_categorical_crossentropy',
44 metrics=['accuracy'])
45
46 # allenamento del modello
47 history = model.fit(x_train, y_train, batch_size=BATCH_SIZE, epochs=EPOCHS, validation_data=(x_test, y_test))
48
49 # calcolo della precisione e dell'errore nel validation set
50 test_loss, test_acc = model.evaluate(x_test, y_test)
51
52 print('Test loss', test_loss)
53 print('Test accuracy:', test_acc)
54
55 # plot dei grafici relativi all'andamento di accuracy e loss
56 plt.plot(history.history['accuracy'])
57 plt.plot(history.history['val_accuracy'])
58 plt.title('Model accuracy')
59 plt.ylabel('Accuracy')
60 plt.xlabel('Epoch')
61 plt.legend(['Train', 'Val'], loc='upper left')
62 plt.show()
63
64 plt.plot(history.history['loss'])
65 plt.plot(history.history['val_loss'])
66 plt.title('Model loss')
67 plt.ylabel('Loss')
68 plt.xlabel('Epoch')
69 plt.legend(['Train', 'Val'], loc='upper left')
70 plt.show()
71
72 model.save("./model/model.h5")
| Clean Code: No Issues Detected
|
1 from PIL import Image, ImageTk
2 from load_state import prepare_image
3 from utils import draw_state
4 from blocks_world import BlocksWorld
5 from search_algs import *
6 import argparse
7 from inspect import getfullargspec
8
9 # file che definisce lo script da linea di comando per utilizzare il programma
10
11
12 if __name__ == "__main__":
13
14 search_algs = {
15 "astar": a_star,
16 "ucs": ucs,
17 "rbfs": rbfs,
18 "bfs": graph_bfs,
19 "dfs": graph_dfs,
20 "ids": ids
21 }
22
23 parser = argparse.ArgumentParser(description="Blocks World")
24 parser.add_argument("--initial", "-i", type=str, default=None, required=True, help="The image representing the initial state")
25 parser.add_argument("--goal", "-g", type=str, default=None, required=True, help="The image representing the goal state")
26 parser.add_argument("--algorithm", "-a", type=str, default=None, required=True, help="The search algorithm used")
27 parser.add_argument("--debug", "-d", default=False, required=False, action='store_true', help="Shows the steps of the image processing")
28 parser.add_argument("--output", "-o", default=False, required=False, action='store_true', help="The solution is printed graphically")
29 args = vars(parser.parse_args())
30
31 initial_state_path = args["initial"]
32 goal_state_path = args["goal"]
33 search_alg = args["algorithm"]
34 debug = args["debug"]
35 output = args["output"]
36
37 initial_state = prepare_image(initial_state_path, debug)
38 goal_state = prepare_image(goal_state_path, debug)
39 print(initial_state)
40 print(goal_state)
41
42 functions = {
43 "ucs": lambda n: problem.depth(n),
44 "astar": lambda n: problem.misplaced_blocks(n),
45 "rbfs": lambda n: problem.misplaced_blocks(n)
46 }
47
48 problem = BlocksWorld(initial_state, goal_state)
49
50 if len(getfullargspec(search_algs[search_alg]).args) == 2:
51 problem.solution(search_algs[search_alg](problem, functions[search_alg]).solution(), output)
52 else:
53 problem.solution(search_algs[search_alg](problem).solution(), output) | 5 - warning: wildcard-import
15 - error: undefined-variable
16 - error: undefined-variable
17 - error: undefined-variable
18 - error: undefined-variable
19 - error: undefined-variable
20 - error: undefined-variable
43 - warning: unnecessary-lambda
44 - warning: unnecessary-lambda
45 - warning: unnecessary-lambda
1 - warning: unused-import
1 - warning: unused-import
3 - warning: unused-import
|
1 import heapq
2 import functools
3 import numpy as np
4 import cv2 as cv
5 import matplotlib.pyplot as plt
6
7 class PriorityQueue:
8 """A Queue in which the minimum (or maximum) element (as determined by f and
9 order) is returned first.
10 If order is 'min', the item with minimum f(x) is
11 returned first; if order is 'max', then it is the item with maximum f(x).
12 Also supports dict-like lookup."""
13
14 def __init__(self, order='min', f=lambda x: x):
15 self.heap = []
16 if order == 'min':
17 self.f = f
18 elif order == 'max': # now item with max f(x)
19 self.f = lambda x: -f(x) # will be popped first
20 else:
21 raise ValueError("Order must be either 'min' or 'max'.")
22
23 def append(self, item):
24 """Insert item at its correct position."""
25 heapq.heappush(self.heap, (self.f(item), item))
26
27 def extend(self, items):
28 """Insert each item in items at its correct position."""
29 for item in items:
30 self.append(item)
31
32 def pop(self):
33 """Pop and return the item (with min or max f(x) value)
34 depending on the order."""
35 if self.heap:
36 return heapq.heappop(self.heap)[1]
37 else:
38 raise Exception('Trying to pop from empty PriorityQueue.')
39
40 def __len__(self):
41 """Return current capacity of PriorityQueue."""
42 return len(self.heap)
43
44 def __contains__(self, key):
45 """Return True if the key is in PriorityQueue."""
46 return any([item == key for _, item in self.heap])
47
48 def __getitem__(self, key):
49 """Returns the first value associated with key in PriorityQueue.
50 Raises KeyError if key is not present."""
51 for value, item in self.heap:
52 if item == key:
53 return value
54 raise KeyError(str(key) + " is not in the priority queue")
55
56 def __delitem__(self, key):
57 """Delete the first occurrence of key."""
58 try:
59 del self.heap[[item == key for _, item in self.heap].index(True)]
60 except ValueError:
61 raise KeyError(str(key) + " is not in the priority queue")
62 heapq.heapify(self.heap)
63
64 def get_item(self, key):
65 """Returns the first node associated with key in PriorityQueue.
66 Raises KeyError if key is not present."""
67 for _, item in self.heap:
68 if item == key:
69 return item
70 raise KeyError(str(key) + " is not in the priority queue")
71
72
73 def is_in(elt, seq):
74 """Similar to (elt in seq), but compares with 'is', not '=='."""
75 return any(x is elt for x in seq)
76
77
78 def memoize(fn, slot=None, maxsize=32):
79 """Memoize fn: make it remember the computed value for any argument list.
80 If slot is specified, store result in that slot of first argument.
81 If slot is false, use lru_cache for caching the values."""
82 if slot:
83 def memoized_fn(obj, *args):
84 if hasattr(obj, slot):
85 return getattr(obj, slot)
86 else:
87 val = fn(obj, *args)
88 setattr(obj, slot, val)
89 return val
90 else:
91 @functools.lru_cache(maxsize=maxsize)
92 def memoized_fn(*args):
93 return fn(*args)
94
95 return memoized_fn
96
97
98 def draw_state(state, file_path):
99 blocks = [*state[0:-1]]
100 w = state[-1]
101 blocks.sort(key=lambda l: l[1], reverse=True)
102 h = blocks[0][1]
103 image = np.zeros(((h+1)*100, w*100), np.uint8)
104 for block in blocks:
105 n, i, j = block
106 i = h - i
107 digit = cv.imread("./images/digits/" + str(n) + ".jpg", 0)
108 digit = cv.resize(digit, (100, 100))
109 image[i*100:i*100 + 100, j*100:j*100 + 100] = ~digit
110 size = (len(state) - 1)*100
111 padded = np.zeros((size, w*100), np.uint8)
112 padded[size - (h+1)*100 : size, :] = image
113 h = len(state) - 1
114 bg = np.zeros((h*100 + 40, w*100 + 40), np.uint8)
115 bg[20: h*100 + 20, 20: w*100 + 20] = padded
116 bg[0:10, :] = 255
117 bg[h*100 + 30 : h*100 + 40, :] = 255
118 bg[:, 0:10] = 255
119 bg[h*100 + 30 : h*100 + 40, :] = 255
120 bg[:,w*100 + 30 : w*100 + 40] = 255
121 w, h = (w*100 + 40, h*100 + 40)
122 l = max(w, h)
123 adjust = np.zeros((l, l), np.uint8)
124 d_w = (l - w) // 2
125 d_h = (l - h) // 2
126 adjust[d_h: d_h + h, d_w: d_w + w] = bg
127 cv.imwrite("./images/temp/" + str(file_path) + ".jpg", ~adjust) | 35 - refactor: no-else-return
38 - warning: broad-exception-raised
46 - refactor: use-a-generator
61 - warning: raise-missing-from
84 - refactor: no-else-return
98 - refactor: too-many-locals
5 - warning: unused-import
|
1
2 import sys, datetime, requests
3 from bs4 import BeautifulSoup
4 from pymongo import MongoClient
5
6 # Fetch website HTML and parse jobs data out of it
7 def fetch(keyword):
8
9 SEARCH_URL = 'https://weworkremotely.com/jobs/search?term=%s'
10 CSS_QUERY = '#category-2 > article > ul > li a'
11
12 response = requests.get(SEARCH_URL % (keyword), timeout=10)
13
14 if response.status_code != requests.codes.ok:
15 return False
16
17 html = BeautifulSoup(response.text)
18 jobs = html.select(CSS_QUERY)
19
20 # If there's only one item in the list, then it's just a category
21 if len(jobs) <= 1:
22 return False
23
24 # We don't need the category...
25 del jobs[-1]
26
27 months = {
28 'Jan': '01',
29 'Feb': '02',
30 'Mar': '03',
31 'Apr': '04',
32 'May': '05',
33 'Jun': '06',
34 'Jul': '07',
35 'Aug': '08',
36 'Sep': '09',
37 'Oct': '10',
38 'Nov': '11',
39 'Dec': '12'
40 };
41 current_date = datetime.datetime.now()
42
43 result = []
44
45 for job in jobs:
46 job_id = job['href'].strip('/').split('/')[1].strip()
47 if not job_id:
48 continue
49 job_details = job.find_all('span')
50 # We should have exactly 3 "span" tags
51 if len(job_details) != 3:
52 continue
53 date_parts = ' '.join(job_details[2].string.split()).split(' ')
54 # Ugly hack, I know... but works perfectly
55 if len(date_parts[1]) == 1:
56 date_parts[1] = str('0' + date_parts[1])
57 result.append({
58 'job_id': job_id,
59 'title': job_details[1].string.strip(),
60 'company': job_details[0].string.strip(),
61 'date': '%s-%s-%s' % (current_date.year, months[date_parts[0]], date_parts[1])
62 })
63
64 return result
65
66 # Insert jobs in the database
67 def insert(jobs):
68 db = MongoClient()
69 for job in jobs:
70 db.we_work_remotely.jobs.update(
71 {
72 'job_id': job['job_id']
73 },
74 {
75 '$setOnInsert': job
76 },
77 True
78 )
79
80
81 # Helper function to terminate program execution gracefully
82 def exit_program(message='You shall not pass!'):
83 print(message)
84 sys.exit(0)
85
86 # Handle search keyword argument
87 SEARCH_TERM = 'php'
88 if len(sys.argv) == 2:
89 SEARCH_TERM = sys.argv[1].strip()
90
91 # Main script controller
92 def main():
93 try:
94 jobs = fetch(SEARCH_TERM)
95 if jobs == False:
96 exit_program()
97 insert(jobs)
98 except:
99 exit_program('Blame it on a boogie!..')
100
101 # Gimme some lovin'
102 if __name__ == '__main__':
103 main()
| 40 - warning: unnecessary-semicolon
14 - error: no-member
98 - warning: bare-except
|
1 from flask import Flask, Response, session, flash, request, redirect, render_template, g
2 import sys
3 import os
4 import base64
5 from flask_login import LoginManager, UserMixin, current_user, login_required, login_user, logout_user
6 import hashlib
7 from flask_openid import OpenID
8
9 errors = []
10
11 try:
12 from application import db
13 from application.models import Product, User, Image
14 import yaml
15
16 with open("db.yml") as db_file:
17 db_entries = yaml.safe_load(db_file)
18
19 db.create_all()
20 for user in db_entries["users"]:
21 usr = User(user["username"], user["password_hash"])
22 db.session.add(usr)
23 db.session.commit()
24 for project in db_entries["projects"]:
25 proj = Product(project["name"], project["description"], project["default_image"], 1, 0)
26 db.session.add(proj)
27 db.session.commit()
28 for i in range(0, len(project["images"])):
29 img = Image(project['name'], project["images"][i], i)
30 db.session.add(img)
31 db.session.commit()
32 db.session.close()
33 except Exception as err:
34 errors.append(err.message)
35
36 # EB looks for an 'application' callable by default.
37 application = Flask(__name__)
38
39 # config
40 application.config.update(
41 DEBUG = True,
42 SECRET_KEY = os.urandom(24)
43 )
44
45 @application.route("/login", methods=["GET", "POST"])
46 def login():
47 if str(request.method) == 'GET':
48 if not session.get('logged_in'):
49 return render_template('login.html')
50 else:
51 redirect("/")
52 username = request.form['username']
53 password = request.form['password']
54 password = hashlib.sha224(password.encode('utf-8')).hexdigest()
55 user = User.query.filter_by(username=username, password=password).first()
56 if user is not None:
57 session['logged_in'] = True
58 return redirect("/")
59 return redirect("/login")
60
61 @application.route("/logout")
62 def logout():
63 session['logged_in'] = False
64 return redirect('/')
65
66 @application.route('/')
67 def index():
68 return render_template('home.html')
69
70 @application.route('/gallery')
71 def gallery():
72 products = Product.query.order_by(Product.id.asc())
73 return render_template('products.html', products=products)
74
75 @application.route('/about')
76 def about():
77 return render_template('about.html')
78
79 @application.route('/contact')
80 def contact():
81 return render_template('contact.html')
82
83 @application.errorhandler(404)
84 def page_not_found(e):
85 return render_template('404.html'), 404
86
87 @application.route('/dir')
88 def stuff():
89 return str(dir(Product.id))
90
91 @application.route('/add', methods=['GET', 'POST'])
92 def add():
93 if not session.get('logged_in'):
94 return render_template('login.html')
95 if str(request.method) == 'POST':
96 try:
97 vals = request.form.to_dict()
98 files = request.files.getlist("image")
99 for i in range(0, len(files)):
100 file = files[i]
101 ext = file.filename.rsplit('.', 1)[1].lower()
102 if ext in ['png', 'jpg', 'jpeg']:
103 filename = "/static/images/" + base64.urlsafe_b64encode(file.filename) + "." + ext
104 file.save("." + filename)
105 if i == 0:
106 product = Product(vals['name'], vals['description'], filename, 1, 0)
107 db.session.add(product)
108 db.session.commit()
109 db.session.close()
110 img = Image(vals['name'], filename, i)
111 db.session.add(img)
112 db.session.commit()
113 db.session.close()
114 except Exception as err:
115 db.session.rollback()
116 return err.message
117 return render_template('add_product.html')
118
119 @application.route('/errors')
120 def get_errors():
121 return str(errors)
122
123 @application.route('/products')
124 def get_products():
125 products = Product.query.order_by(Product.id.desc())
126 stuff = [x.name for x in products]
127 return str(stuff)
128
129 @application.route('/pin/<pin_id>')
130 def pin_enlarge(pin_id):
131 p = Product.query.filter_by(id=pin_id).first()
132 images = Image.query.filter_by(name=p.name).order_by(Image.display_number.asc())
133 return render_template('pin_focus.html', p=p, images=images)
134
135 @application.route('/delete/<pin_id>')
136 def delete(pin_id):
137 Product.query.filter_by(id = pin_id).delete()
138 db.session.commit()
139 db.session.close()
140 return redirect("/gallery")
141
142 # run the app.
143 if __name__ == "__main__":
144 # Setting debug to True enables debug output. This line should be
145 # removed before deploying a production app.
146 application.debug = True
147 application.run()
| 25 - error: syntax-error
|
1 from application import db
2
3 class Product(db.Model):
4 id = db.Column('id', db.Integer, primary_key=True)
5 name = db.Column('name', db.String(128), nullable=False)
6 description = db.Column('description', db.TEXT, nullable=False)
7 image_path = db.Column('image_path', db.String(128), nullable=True)
8 quantity = db.Column('quantity', db.Integer, default=1)
9 price = db.Column('price', db.FLOAT, default=0.0)
10
11 def __init__(self, name, description, image_path='', quantity=1, price=0.0):
12 self.name = name
13 self.description = description
14 self.image_path = image_path
15 self.quantity = quantity
16 self.price = price
17
18 def __repr__(self):
19 return str({'name':self.name, 'description':self.description, 'image_path': self.image_path, 'quantity': self.quantity, 'price': self.price})
20
21 class User(db.Model):
22 id = db.Column(db.Integer, primary_key=True)
23 username = db.Column(db.String(128), index=True, unique=True)
24 password = db.Column(db.String(256), nullable=False)
25
26 def __init__(self, username, password):
27 self.username = username
28 self.password = password
29
30 def __repr__(self):
31 return '<User %r>' % (self.username)
32
33 class Image(db.Model):
34 id = db.Column('id', db.Integer, primary_key=True)
35 name = db.Column('name', db.String(128), nullable=False)
36 image_path = db.Column('image_path', db.String(128), nullable=False)
37 display_number = db.Column('display_number', db.Integer, nullable=False)
38
39 def __init__(self, name, image_path, display_number):
40 self.name = name
41 self.image_path = image_path
42 self.display_number = display_number
43
44 def __repr__(self):
45 return str({'name': self.name, 'image_path': self.image_path, 'display_number': self.display_number})
| 11 - refactor: too-many-arguments
11 - refactor: too-many-positional-arguments
3 - refactor: too-few-public-methods
21 - refactor: too-few-public-methods
33 - refactor: too-few-public-methods
|
1 from flask import Flask
2 from flask.ext.sqlalchemy import SQLAlchemy
3 import os
4
5 def get_config():
6 config = {}
7 if 'RDS_HOSTNAME' in os.environ:
8 env = {
9 'NAME': os.environ['RDS_DB_NAME'],
10 'USER': os.environ['RDS_USERNAME'],
11 'PASSWORD': os.environ['RDS_PASSWORD'],
12 'HOST': os.environ['RDS_HOSTNAME'],
13 'PORT': os.environ['RDS_PORT'],
14 }
15 config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://' + env['USER'] + ':' + env['PASSWORD'] + '@' + env['HOST'] + ':' + env['PORT'] + '/' + env['NAME']
16 config['SQLALCHEMY_POOL_RECYCLE'] = 3600
17 config['WTF_CSRF_ENABLED'] = True
18 else:
19 config = None
20 return config
21
22 config = get_config()
23 application = Flask(__name__)
24 db = None
25 if config is not None:
26 application.config.from_object(config)
27 try:
28 db = SQLAlchemy(application)
29 except Exception as err:
30 print(err.message)
| 6 - warning: redefined-outer-name
29 - warning: broad-exception-caught
30 - error: no-member
|
1 from argparse import ArgumentParser
2 from viaastatus.server import wsgi
3 import logging
4
5
6 def argparser():
7 """
8 Get the help and arguments specific to this module
9 """
10 parser = ArgumentParser(prog='status', description='A service that supplies status information about our platforms')
11
12 parser.add_argument('--debug', action='store_true',
13 help='run in debug mode')
14 parser.add_argument('--host',
15 help='hostname or ip to serve api')
16 parser.add_argument('--port', type=int, default=8080,
17 help='port used by the server')
18 parser.add_argument('--log-level', type=str.lower, default='warning', dest='log_level',
19 choices=list(map(str.lower, logging._nameToLevel.keys())),
20 help='set the logging output level')
21
22 return parser
23
24
25 def main():
26 args = argparser().parse_args()
27 logging.basicConfig(level=args.log_level.upper())
28 logging.getLogger().setLevel(args.log_level.upper())
29 del args.log_level
30 wsgi.create_app().run(**args)
31
32
33 if __name__ == '__main__':
34 main()
| 19 - warning: protected-access
|
1 from functools import wraps, partial
2 from flask import request, render_template
3
4
5 def cached(key='view/%s', cache=None, **extra_cache_kwargs):
6 def decorator(f):
7 @wraps(f)
8 def decorated(*args, **kwargs):
9 cache_key = key % request.path
10 rv = cache.get(cache_key)
11 if rv is not None:
12 return rv
13 rv = f(*args, **kwargs)
14 cache.set(cache_key, rv, **extra_cache_kwargs)
15 return rv
16 return decorated
17 return decorator
18
19
20 def cacher(cache, **kwargs):
21 return partial(cached, cache=cache, **kwargs)
22
23
24 def templated(template=None):
25 def decorator(f):
26 @wraps(f)
27 def decorated(*args, **kwargs):
28 template_name = template
29 if template_name is None:
30 template_name = request.endpoint \
31 .replace('.', '/') + '.html'
32 ctx = f(*args, **kwargs)
33 if ctx is None:
34 ctx = {}
35 elif not isinstance(ctx, dict):
36 return ctx
37 return render_template(template_name, **ctx)
38 return decorated
39 return decorator
| Clean Code: No Issues Detected
|
1 from setuptools import setup, find_packages
2
3 with open('README.md') as f:
4 long_description = f.read()
5
6 with open('requirements.txt') as f:
7 requirements = list(map(str.rstrip, f.readlines()))
8
9 setup(
10 name='viaastatus',
11 url='https://github.com/viaacode/status/',
12 version='0.0.3',
13 author='VIAA',
14 author_email='support@viaa.be',
15 descriptiona='Status services',
16 long_description=long_description,
17 classifiers=[
18 'Programming Language :: Python',
19 'Programming Language :: Python :: 3',
20 ],
21 python_requires='>=3.4',
22 packages=find_packages("src"),
23 package_dir={"": "src"},
24 package_data={'viaastatus': ['server/static/*']},
25 include_package_data=True,
26 install_requires=requirements,
27 extras_require={
28 'test': [
29 "pytest>=4.2.0"
30 ],
31 'loadtest': [
32 "locustio>=0.11.0"
33 ],
34 'gunicorn': [
35 'gunicorn>=19.9.0'
36 ],
37 'uwsgi': [
38 'uWSGI>=2.0.18'
39 ],
40 'waitress': [
41 'waitress>=1.2.1'
42 ]
43 },
44 platforms='any'
45 )
| 3 - warning: unspecified-encoding
6 - warning: unspecified-encoding
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.