code
stringlengths
20
13.2k
label
stringlengths
21
6.26k
1 # ---------------------------------------------- 2 # -*- coding: utf-8 -*- 3 # @Time : 2020-03-04 15:31 4 # @Author : 吴林江 5 # @Email : wulinjiang1@kingsoft.com 6 # @File : test09.py 7 # ---------------------------------------------- 8 import pymysql 9 10 # 打开数据库连接 11 db = pymysql.connect("host", "username", "pw", "db") 12 13 # 创建一个游标对象 14 cursor = db.cursor() 15 16 # 执行查询 17 cursor.execute("select * from db.tb") 18 19 # 获取数据 20 data = cursor.fetchone() 21 print(data) 22 23 # 关闭连接 24 db.close() 25 26 # 数据库的三范式 27 # 1. 确保每列保持原子性,每一列的数据都是不可分解的原子值,根据需求而定哈 28 # 2. 确保表中的每列都和主键相关,不能只和一部分主键相关(主要针对联合主键而言) 29 # 3. 确保每列都和主键直接相关,而不能间接相关
Clean Code: No Issues Detected
1 from contextlib import contextmanager 2 from collections import defaultdict 3 4 5 class Exchange: 6 def __init__(self): 7 self._subscribers = set() 8 9 def attach(self, task): 10 self._subscribers.add(task) 11 12 def detach(self, task): 13 self._subscribers.remove(task) 14 15 @contextmanager 16 def subscribe(self, *tasks): 17 for task in tasks: 18 self.attach(task) 19 try: 20 yield 21 finally: 22 for task in tasks: 23 self.detach(task) 24 25 def send(self, msg): 26 for subscriber in self._subscribers: 27 subscriber.send(msg) 28 29 30 class Task: 31 def send(self, msg): 32 print(msg) 33 34 35 _changes = defaultdict(Exchange) 36 37 38 def get_change(name): 39 return _changes[name] 40 41 42 if __name__ == "__main__": 43 data = {'new1': 1, 'new3': 2, 'new2': 3} 44 # new = sorted(data.items()) 45 print(dict(sorted(data.items()))) 46 # exc = get_change('name') 47 # task_a = Task() 48 # task_b = Task() 49 # with exc.subscribe(task_a, task_b): 50 # exc.send('msg1') 51 # exc.send('msg2')
30 - refactor: too-few-public-methods
1 from collections import deque 2 3 4 def search(lines, pattern, history): 5 pre_lines = deque(maxlen=history) 6 for line in lines: 7 if pattern in line: 8 pre_lines.append(line) 9 return pre_lines 10 11 12 if __name__ == "__main__": 13 with open('tmp/test', 'r') as f: 14 s = search(f, 'python', 5) 15 print(s) 16 s.append('python9') 17 s.appendleft('python') 18 s.pop() 19 s.popleft() 20 for line in s: 21 print(line) 22 print("end")
6 - warning: redefined-outer-name 13 - warning: unspecified-encoding
1 class Structure1: 2 _fields = [] 3 4 def __init__(self, *args, **kwargs): 5 if len(args) > len(self._fields): 6 raise TypeError('Excepted {} arguments'.format(len(self._fields))) 7 8 for name, value in zip(self._fields, args): 9 setattr(self, name, value) 10 11 for name in self._fields[len(args):]: 12 setattr(self, name, kwargs.pop(name)) 13 14 if kwargs: 15 raise TypeError('Invalid arguments {}'.format(','.join(kwargs))) 16 17 18 class Stock(Structure1): 19 _fields = ["name", "age", "career"] 20 21 22 class Structure2: 23 _fields = ["name", "age", "career"] 24 25 def __init__(self, *args, **kwargs): 26 if len(args) != len(self._fields): 27 raise TypeError('Excepted {} arguments'.format(len(self._fields))) 28 29 for name, value in zip(self._fields, args): 30 setattr(self, name, value) 31 32 extra_args = kwargs - self._fields 33 for name in extra_args: 34 setattr(self, name, kwargs.pop(name)) 35 36 if kwargs: 37 raise TypeError('Invalid arguments {}'.format(','.join(kwargs))) 38 39 40 if __name__ == "__main__": 41 data = ["test1", "test2", "name"] 42 kwargs = {"name": "wulj", "age": 23} 43 print(kwargs.keys()-data) 44 test_dict = {"name": "value", "test": "new"} 45 print(','.join(test_dict)) 46 s1 = Stock("Alex", 23, "programmer") 47 print(s1.name, s1.age, s1.career) 48 s2 = Stock("lucy", age=22, career="teacher") 49 print(s2) 50 s3 = Stock("Mary", 23, "player", "like") 51 print(s3)
4 - warning: redefined-outer-name 1 - refactor: too-few-public-methods 18 - refactor: too-few-public-methods 25 - warning: redefined-outer-name 22 - refactor: too-few-public-methods 47 - error: no-member 47 - error: no-member 47 - error: no-member
1 # ---------------------------------------------- 2 # -*- coding: utf-8 -*- 3 # @Time : 2020-03-07 18:11 4 # @Author : 吴林江 5 # @Email : wulinjiang1@kingsoft.com 6 # @File : test16.py 7 # ---------------------------------------------- 8 import re 9 10 11 if __name__ == "__main__": 12 # 使用正则表达式匹配地址 13 s = "www.baidu.com.jkjh" 14 if re.match(r'(.*).(.*).(.*)', s): 15 print("pass") 16 r = re.findall(r'(.*)\.(.*)\.(.*)', s) 17 print(r) 18 s = " 98 100 102 " 19 s = re.sub(r' (\d+) (\d+) (\d+) ', r'\3/\2/\1', s) 20 print(s) 21 # 正则匹配中 (.*) 和 (.*?) 的区别是一个是最长匹配,一个是最短匹配 22 text = 'Computer says "no." Phone says "yes."' 23 t1 = re.findall(r'"(.*)"', text) 24 t2 = re.findall(r'"(.*?)"', text) 25 print(t1) 26 print(t2) 27 # 匹配邮箱的正则表达式 28 text1 = "test@wulj.com, test2@wulinjiang.com," 29 t3 = re.findall(r'\s*(.*?)@(.*?).com,\s*', text1) 30 print(t3)
Clean Code: No Issues Detected
1 # ---------------------------------------------- 2 # -*- coding: utf-8 -*- 3 # @Time : 2019-11-25 17:49 4 # @Author : 吴林江 5 # @Email : wulinjiang1@kingsoft.com 6 # @File : test-elasticsearch.py 7 # ---------------------------------------------- 8 from elasticsearch import Elasticsearch 9 from ssl import create_default_context 10 11 12 if __name__ == "__main__": 13 context = create_default_context(cafile="./ca.crt") 14 es = Elasticsearch( 15 ['10.100.51.164'], 16 http_auth=('elastic', 'K6fgGGmOu359V4GY3TOw'), 17 scheme="https", 18 port=9200, 19 ssl_context=context 20 ) 21 print(es.info())
Clean Code: No Issues Detected
1 from keras.models import Sequential 2 from keras import layers 3 from keras.preprocessing.text import Tokenizer 4 import pandas as pd 5 from sklearn import preprocessing 6 from sklearn.model_selection import train_test_split 7 8 # read the file 9 df = pd.read_csv('train.tsv', 10 header=None, 11 delimiter='\t', low_memory=False) 12 # labels columns 13 df.columns = ['PhraseID', 'SentenceID', 'Phrase', 'Sentiment'] 14 sentences = df['Phrase'].values 15 y = df['Sentiment'].values 16 17 tokenizer = Tokenizer(num_words=2000) 18 tokenizer.fit_on_texts(sentences) 19 sentences = tokenizer.texts_to_matrix(sentences) 20 21 le = preprocessing.LabelEncoder() 22 y = le.fit_transform(y) 23 X_train, X_test, y_train, y_test = train_test_split(sentences, y, test_size=0.25, random_state=1000) 24 25 # Number of features 26 # print(input_dim) 27 model = Sequential() 28 model.add(layers.Dense(300, input_dim=2000, activation='relu')) 29 model.add(layers.Dense(10, activation='softmax')) 30 model.compile(loss='sparse_categorical_crossentropy',optimizer='adam',metrics=['acc']) 31 history=model.fit(X_train,y_train, epochs=5, verbose=True, validation_data=(X_test,y_test), batch_size=256)
Clean Code: No Issues Detected
1 #!/usr/bin/env python 2 3 4 import re 5 import numpy 6 import yaml 7 import sys 8 import argparse 9 try: 10 from pymclevel import mclevel 11 from pymclevel.box import BoundingBox 12 except: 13 print ("\nERROR: pymclevel could not be imported") 14 print (" Get it with git clone git://github.com/mcedit/pymclevel.git\n\n") 15 raise 16 import os 17 18 ############################################################################ 19 ############################################################################ 20 class Map2d2Minecraft(): 21 ############################################################################ 22 ############################################################################ 23 ############################################### 24 def __init__(self): 25 ############################################### 26 self.settings = {} 27 28 ############################################### 29 def readBlockInfo(self, keyword): 30 ############################################### 31 blockID, data = map(int, keyword.split(":")) 32 blockInfo = self.level.materials.blockWithID(blockID, data) 33 return blockInfo 34 35 ############################################### 36 def read_settings(self, filename): 37 ############################################### 38 39 defaults = { 40 "level_name" : "robot_map", 41 "map_file" : "/home/jfstepha/ros_workspace/maps/map_whole_house_13_02_17_fixed.pgm", 42 "occ_thresh" : 200, 43 "empty_thresh" : 250, 44 "empty_item" : "12:0", 45 "empty_height" : 1, 46 "occupied_item" : "5:0", 47 "occupied_height" : 15, 48 "unexplored_item" : "3:0", 49 "origin_x" : 0, 50 "origin_y" : 100, 51 "origin_z" : 0, 52 "spawn_x" : 246, 53 "spawn_y" : 1, 54 "spawn_z" : 77, 55 "oversize" : 100, 56 "clear_height" : 256, 57 "do_ceiling" : True, 58 "ceiling_item" : "89:0"} 59 60 parser = argparse.ArgumentParser(description='Translate a ROS map to a minecraft world') 61 parser.add_argument("--settings", default=filename, dest="filename") 62 for setting in defaults.keys(): 63 parser.add_argument("--"+setting, dest=setting) 64 65 args = parser.parse_args() 66 67 print( "reading settings from %s" % args.filename) 68 this_dir, this_file = os.path.split( os.path.realpath(__file__) ) 69 stream = open( os.path.join( this_dir, args.filename ) ) 70 settings_file = yaml.load(stream) 71 72 for setting in defaults.keys(): 73 if vars(args)[setting] == None: 74 if setting in settings_file: 75 self.settings[ setting ] = settings_file[ setting ] 76 else: 77 self.settings[ setting ] = defaults[ setting ] 78 else: 79 self.settings[ setting ] = vars(args)[setting] 80 81 print( "settings: %s" % (str(self.settings))) 82 83 ############################################### 84 def do_convert(self, image): 85 ############################################### 86 filename = self.settings["level_name"] 87 self.level = mclevel.fromFile(filename) 88 self.level.setPlayerGameType(1, "Player") 89 pos = [self.settings["spawn_x"], self.settings["spawn_y"], self.settings["spawn_z"]] 90 91 self.level.setPlayerPosition( pos ) 92 self.level.setPlayerSpawnPosition( pos ) 93 94 rows = image.shape[0] 95 cols = image.shape[1] 96 97 o_x = self.settings["origin_x"] 98 o_y = self.settings["origin_y"] 99 o_z = self.settings["origin_z"] 100 ovs = self.settings["oversize"] 101 102 box = BoundingBox( (o_x - ovs, o_y - ovs, o_z - ovs ), 103 ( rows + ovs * 2, ovs * 2, cols + ovs * 2)) 104 105 print("creating chunks") 106 chunksCreated = self.level.createChunksInBox( box ) 107 print("Created %d chunks" % len( chunksCreated ) ) 108 109 print("filling air") 110 self.level.fillBlocks( box, self.level.materials.blockWithID(0,0) ) 111 print("filled %d blocks" % box.volume ) 112 113 print("filling base layer") 114 box = BoundingBox( (o_x - ovs, o_y - 10, o_z - ovs ), 115 ( rows + ovs * 2, 10, cols + ovs * 2)) 116 item = self.readBlockInfo( self.settings["unexplored_item"] ) 117 self.level.fillBlocks( box, item ) 118 print("filled %d blocks" % box.volume ) 119 120 print("creating map") 121 122 for r in range( rows ): 123 124 125 print(" row %d / %d" % (r, rows) ); 126 127 for c in range( cols ): 128 x = o_x + r 129 y = o_y 130 z = o_z + c 131 132 if image[rows-r-1,c] > self.settings["empty_thresh"]: 133 item = self.readBlockInfo( self.settings["empty_item"]) 134 self.level.setBlockAt(x,y,z, item.ID) 135 if self.settings["do_ceiling"] : 136 item = self.readBlockInfo( self.settings["ceiling_item"]) 137 y2 = y + self.settings["occupied_height"] 138 self.level.setBlockAt(x,y2,z, item.ID) 139 if image[rows-r-1,c] < self.settings["occ_thresh"]: 140 h = self.settings["occupied_height"] 141 item = self.readBlockInfo( self.settings["occupied_item"]) 142 box = BoundingBox( (x,y,z),(1,h,1) ) 143 144 self.level.fillBlocks( box, item ) 145 print("saving map") 146 self.level.saveInPlace() 147 148 print("done") 149 150 ############################################### 151 def read_pgm(self, filename, byteorder='>'): 152 ############################################### 153 """Return image data from a raw PGM file as numpy array. 154 155 Format specification: http://netpbm.sourceforge.net/doc/pgm.html 156 157 """ 158 with open(filename, 'rb') as f: 159 buffer = f.read() 160 try: 161 header, width, height, maxval = re.search( 162 b"(^P5\s(?:\s*#.*[\r\n])*" 163 b"(\d+)\s(?:\s*#.*[\r\n])*" 164 b"(\d+)\s(?:\s*#.*[\r\n])*" 165 b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups() 166 except AttributeError: 167 raise ValueError("Not a raw PGM file: '%s'" % filename) 168 return numpy.frombuffer(buffer, 169 dtype='u1' if int(maxval) < 256 else byteorder+'u2', 170 count=int(width)*int(height), 171 offset=len(header) 172 ).reshape((int(height), int(width))) 173 174 ############################################### 175 def create_map(self): 176 ############################################### 177 if (os.path.exists( self.settings["level_name"])) : 178 print("ERROR: %s directory already exists. Delete it or pick a new name" % self.settings["level_name"]) 179 sys.exit() 180 if (os.path.exists( os.getenv("HOME") + "/.minecraft/saves/" + self.settings["level_name"])) : 181 print("ERROR: Minecraft world %s already exists. Delete it (at ~/.minecraft/saves/%s) or pick a new name" % (self.settings["level_name"], self.settings["level_name"])) 182 sys.exit() 183 print("creating map file") 184 os.system("pymclevel/mce.py " + self.settings["level_name"] + " create") 185 ############################################### 186 def move_map(self): 187 ############################################### 188 print("moving to minecraft saves") 189 os.system("mv %s ~/.minecraft/saves/" % self.settings["level_name"]) 190 191 192 if __name__ == "__main__": 193 map2d2minecraft = Map2d2Minecraft() 194 map2d2minecraft.read_settings("map_2d.yaml") 195 image = map2d2minecraft.read_pgm(map2d2minecraft.settings["map_file"], byteorder='<') 196 map2d2minecraft.create_map() 197 map2d2minecraft.do_convert( image ) 198 map2d2minecraft.move_map() 199
125 - warning: unnecessary-semicolon 162 - warning: anomalous-backslash-in-string 162 - warning: anomalous-backslash-in-string 163 - warning: anomalous-backslash-in-string 163 - warning: anomalous-backslash-in-string 163 - warning: anomalous-backslash-in-string 164 - warning: anomalous-backslash-in-string 164 - warning: anomalous-backslash-in-string 164 - warning: anomalous-backslash-in-string 165 - warning: anomalous-backslash-in-string 165 - warning: anomalous-backslash-in-string 165 - warning: anomalous-backslash-in-string 165 - warning: anomalous-backslash-in-string 69 - warning: unspecified-encoding 70 - error: no-value-for-parameter 69 - refactor: consider-using-with 68 - warning: unused-variable 84 - refactor: too-many-locals 84 - warning: redefined-outer-name 167 - warning: raise-missing-from 87 - warning: attribute-defined-outside-init
1 import pygame 2 import random 3 from Intel import * 4 5 #Classe du Niveau(placement des murs) 6 7 class Level: 8 9 #Preparation de la classe 10 def __init__(self, map_pool): 11 self.map_pool = map_pool 12 self.map_structure = [] 13 self.position_x = 0 14 self.position_y = 0 15 self.sprite_x = int(0 /30) 16 self.sprite_y = int(0 /30) 17 self.image_Macgyver = pygame.image.load(MacGyver).convert_alpha() 18 self.image_Guardian = pygame.image.load(Guardian).convert_alpha() 19 self.background = pygame.image.load(Background).convert() 20 21 #Prépartion de la liste pour le fichier map 22 def level(self): 23 with open (self.map_pool, "r") as map_pool: 24 level_structure = [] 25 for line in map_pool: 26 line_level = [] 27 for char in line: 28 if char != '/n': 29 line_level.append(char) 30 level_structure.append(line_level) 31 self.map_structure = level_structure 32 33 #Placement des murs 34 def display_wall (self, screen): 35 36 wall = pygame.image.load(Wall).convert_alpha() 37 screen.blit(self.background, (0, 0)) 38 num_line = 0 39 for ligne_horiz in self.map_structure: 40 num_col = 0 41 for ligne_verti in ligne_horiz: 42 position_x = num_col * Sprite_Size 43 position_y = num_line * Sprite_Size 44 if ligne_verti == str(1): 45 screen.blit(wall, (position_x, position_y)) 46 num_col +=1 47 num_line +=1
3 - warning: wildcard-import 7 - refactor: too-many-instance-attributes 17 - error: undefined-variable 18 - error: undefined-variable 19 - error: undefined-variable 23 - warning: unspecified-encoding 36 - error: undefined-variable 42 - error: undefined-variable 43 - error: undefined-variable 2 - warning: unused-import
1 import pygame 2 import random 3 from Intel import * 4 5 6 #Classe des placements d'objets 7 8 class Items: 9 10 #Preparation de la classe 11 def __init__(self, map_pool): 12 self.item_needle = pygame.image.load(Object_N).convert_alpha() 13 self.item_ether = pygame.image.load(Object_E).convert_alpha() 14 self.item_tube = pygame.image.load(Object_T).convert_alpha() 15 16 #Méthode de spawn des objets 17 def items_spawn(self, screen): 18 while items: 19 rand_x = random.randint(0, 14) 20 rand_y = random.randint(0, 14) 21 if self.map_structure [rand_x][rand_y] == 0: 22 screen.blit(self.image_(Object_N), (rand_x, rand_y))
3 - warning: wildcard-import 12 - error: undefined-variable 13 - error: undefined-variable 14 - error: undefined-variable 11 - warning: unused-argument 18 - error: undefined-variable 21 - error: no-member 22 - error: no-member 22 - error: undefined-variable 8 - refactor: too-few-public-methods
1 # Information des variables Global et des images 2 3 Sprite_Size_Level = 15 4 Sprite_Size = 30 5 Size_Level = Sprite_Size_Level * Sprite_Size 6 7 Background = 'images/Background.jpg' 8 Wall = 'images/Wall.png' 9 MacGyver = 'images/MacGyver.png' 10 Guardian = 'images/Guardian.png' 11 Object_N = 'images/Needle.png' 12 Object_E = 'images/Ether.png' 13 Object_T = 'images/Tube.png' 14 items = ["Object_N","Object_E","Object_T"]
Clean Code: No Issues Detected
1 import pygame 2 from Maze import * 3 from Intel import * 4 from Characters import * 5 from Items import * 6 from pygame import K_DOWN, K_UP, K_LEFT, K_RIGHT 7 8 #Classe Main du jeux avec gestion des movements et l'affichage 9 class Master: 10 11 def master(): 12 pygame.init() 13 screen = pygame.display.set_mode((Size_Level, Size_Level)) 14 maze = Level("Map.txt") 15 maze.level() 16 #Boucle de rafraichisement 17 while 1: 18 for event in pygame.event.get(): 19 if event.type == pygame.KEYDOWN: 20 if event.key == K_DOWN: 21 Characters.move_mg(maze, 'down', screen) 22 if event.key == K_UP: 23 Characters.move_mg(maze, 'up', screen) 24 if event.key == K_LEFT: 25 Characters.move_mg(maze, 'left', screen) 26 if event.key == K_RIGHT: 27 Characters.move_mg(maze, 'right', screen) 28 maze.display_wall(screen) 29 Characters.blit_mg(maze, screen) 30 Characters.move_mg(maze, 'direction', screen) 31 Characters.blit_g(maze, screen) 32 Items. items_spawn(maze, screen) 33 pygame.display.flip() 34 35 if __name__ =="__main__": 36 master()
2 - warning: wildcard-import 3 - warning: wildcard-import 4 - warning: wildcard-import 5 - warning: wildcard-import 11 - error: no-method-argument 13 - error: undefined-variable 13 - error: undefined-variable 14 - error: undefined-variable 21 - error: undefined-variable 23 - error: undefined-variable 25 - error: undefined-variable 27 - error: undefined-variable 29 - error: undefined-variable 30 - error: undefined-variable 31 - error: undefined-variable 32 - error: undefined-variable 9 - refactor: too-few-public-methods
1 import pygame 2 from Intel import * 3 4 class Characters: 5 6 def __init__(self, map_pool): 7 self.map_pool = map_pool 8 self.position_x = 0 9 self.position_y = 0 10 self.sprite_x = int(0 /30) 11 self.sprite_y = int(0 /30) 12 self.image_Macgyver = pygame.image.load(MacGyver).convert_alpha() 13 self.image_Guardian = pygame.image.load(Guardian).convert_alpha() 14 15 #Placement du Gardien 16 def blit_mg(self, screen): 17 screen.blit(self.image_Macgyver, (self.position_x, self.position_y)) 18 #Placement de Macgyver 19 def blit_g(self, screen): 20 num_line = 14 21 for line in self.map_structure: 22 num_col = 14 23 for ligne_verti in line: 24 position_x = num_col * Sprite_Size 25 position_y = num_line * Sprite_Size 26 if ligne_verti == str(3): 27 screen.blit(self.image_Guardian, (position_x, position_y)) 28 else: 29 if ligne_verti == str(3): 30 self.available_tiles.append((num_col, num_line)) 31 32 #Méthode de déplacement de Macgyver(player) 33 def move_mg(self, direction, screen): 34 if direction == 'down': 35 if self.sprite_y < (Sprite_Size_Level - 1): 36 if self.map_structure[self.sprite_y+1][self.sprite_x] != '1': 37 self.position_y += 30 38 self.sprite_y += 1 39 40 elif direction == 'up': 41 if self.sprite_y > 0: 42 if self.map_structure[self.sprite_y-1][self.sprite_x] != '1': 43 self.position_y -= 30 44 self.sprite_y -= 1 45 46 elif direction == 'left': 47 if self.sprite_x > 0: 48 if self.map_structure[self.sprite_y][self.sprite_x-1] != '1': 49 self.position_x -= 30 50 self.sprite_x -= 1 51 52 elif direction == 'right': 53 if self.sprite_x < (Sprite_Size_Level - 1): 54 if self.map_structure[self.sprite_y][self.sprite_x+1] != '1': 55 self.position_x += 30 56 self.sprite_x += 1
6 - warning: bad-indentation 7 - warning: bad-indentation 8 - warning: bad-indentation 9 - warning: bad-indentation 10 - warning: bad-indentation 11 - warning: bad-indentation 12 - warning: bad-indentation 13 - warning: bad-indentation 16 - warning: bad-indentation 17 - warning: bad-indentation 19 - warning: bad-indentation 20 - warning: bad-indentation 21 - warning: bad-indentation 22 - warning: bad-indentation 23 - warning: bad-indentation 24 - warning: bad-indentation 25 - warning: bad-indentation 26 - warning: bad-indentation 27 - warning: bad-indentation 28 - warning: bad-indentation 29 - warning: bad-indentation 30 - warning: bad-indentation 33 - warning: bad-indentation 34 - warning: bad-indentation 35 - warning: bad-indentation 36 - warning: bad-indentation 37 - warning: bad-indentation 38 - warning: bad-indentation 40 - warning: bad-indentation 41 - warning: bad-indentation 42 - warning: bad-indentation 43 - warning: bad-indentation 44 - warning: bad-indentation 46 - warning: bad-indentation 47 - warning: bad-indentation 48 - warning: bad-indentation 49 - warning: bad-indentation 50 - warning: bad-indentation 52 - warning: bad-indentation 53 - warning: bad-indentation 54 - warning: bad-indentation 55 - warning: bad-indentation 56 - warning: bad-indentation 2 - warning: wildcard-import 12 - error: undefined-variable 13 - error: undefined-variable 21 - error: no-member 24 - error: undefined-variable 25 - error: undefined-variable 30 - error: no-member 35 - error: undefined-variable 36 - error: no-member 42 - error: no-member 48 - error: no-member 53 - error: undefined-variable 54 - error: no-member 33 - warning: unused-argument
1 #import tabledef 2 #from tabledef import User, MentoreeTopic, Topic 3 import requests 4 print requests 5 # import pdb 6 7 8 # def send_message(recipient, subject, text): 9 # return requests.post( 10 # "https://api.mailgun.net/v2/samples.mailgun.org/messages", 11 # auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"), 12 # data={"from": "Mentoree Match <mentoreematch@app27934969.mailgun.org>", 13 # "to": recipient.email_address, 14 # "subject": subject, 15 # "text": "Testing some Mailgun awesomness!"}) 16 17 def send_message(): 18 # pdb.set_trace() 19 print dir(requests) 20 x = requests.post( 21 "https://api.mailgun.net/v2/samples.mailgun.org/messages", 22 auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"), 23 data={"from": "Mentoree Match <mentoreematch@app27934969.mailgun.org>", 24 "to": "Daphnejwang@gmail.com", 25 "subject": "testing email", 26 "text": "Testing some Mailgun awesomness!"}) 27 return 'hi' 28 # key = 'YOUR API KEY HERE' 29 # sandbox = 'YOUR SANDBOX URL HERE' 30 # recipient = 'YOUR EMAIL HERE' 31 32 # request_url = 'https://api.mailgun.net/v2/{0}/messages'.format(sandbox) 33 # request = requests.post(request_url, auth=('api', key), data={ 34 # 'from': 'hello@example.com', 35 # 'to': recipient, 36 # 'subject': 'Hello', 37 # 'text': 'Hello from Mailgun' 38 # }) 39 40 # print 'Status: {0}'.format(request.status_code) 41 # print 'Body: {0}'.format(request.text) 42 43 send_message()
4 - error: syntax-error
1 # from flask import Flask, render_template, redirect, request, flash, url_for, session 2 # import jinja2 3 # import tabledef 4 # from tabledef import Users, MentorCareer, MentorSkills 5 # from xml.dom.minidom import parseString 6 # import os 7 # import urllib 8 9 # app = Flask(__name__) 10 # app.secret_key = "topsecretkey" 11 # app.jinja_env.undefined = jinja2.StrictUndefined 12 13 # @app.route("/") 14 # def index(): 15 # print "hello" 16 # return "hello" 17 18 # @app.route("/login", methods=["GET"]) 19 # def get_userlogin(): 20 # error = None 21 # f = urllib.urlopen("http://127.0.0.1:5000/login") 22 # print "!~~~~!~~~~!" 23 # print f.read() 24 # # url = os.environ['HTTP_HOST'] 25 # # xmlDoc = parseString(url) 26 # # print xmlDoc 27 # # linkedin_auth = {} 28 # return render_template("login.html", error = error) 29 30 # @app.route("/login", methods=["POST"]) 31 # def login_user(): 32 # found_user = tabledef.dbsession.query(User).filter_by(email=request.form['email']).first() 33 # print "found user", found_user 34 # error = None 35 # if found_user: 36 # print "User found" 37 # session['user'] = found_user.id 38 # return redirect("/") 39 # else: 40 # print "User not found" 41 # #flash('Invalid username/password.') 42 # error = "Invalid Username" 43 # return render_template('login.html', error = error) 44 # # return redirect("/") 45 46 # @app.route("/create_newuser", methods=["GET"]) 47 # def get_newuser(): 48 # return render_template("newuser.html") 49 50 # @app.route("/create_newuser", methods=["POST"]) 51 # def create_newuser(): 52 # # print "SESSION", tabledef.dbsession 53 # user_exists = tabledef.dbsession.query(User).filter_by(email=request.form['email']).first() 54 # print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" 55 # print "USER EXISTS", user_exists 56 # if user_exists != None: 57 # flash(" User already exists. Please login") 58 # return redirect("/create_newuser") 59 # else: 60 # user = User(email=request.form['email'], password= request.form['password'], age=request.form['age'], sex=request.form['sex'], occupation=request.form['occupation'], zipcode=request.form['zipcode']) 61 # tabledef.dbsession.add(user) 62 # tabledef.dbsession.commit() 63 # flash("Successfully added new user!") 64 # return redirect("/") 65 66 67 # if __name__ == "__main__": 68 # app.run(debug = True)
Clean Code: No Issues Detected
1 from flask_oauthlib.client import OAuth 2 from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session 3 import jinja2 4 import tabledef 5 from tabledef import * 6 from sqlalchemy import update 7 from xml.dom.minidom import parseString 8 import os 9 import urllib 10 import json 11 from Project import app 12 import pdb 13 from tabledef import User 14 15 oauth = OAuth(app) 16 17 linkedin = oauth.remote_app( 18 'linkedin', 19 consumer_key='75ifkmbvuebxtg', 20 consumer_secret='LAUPNTnEbsBu7axq', 21 request_token_params={ 22 'scope': 'r_fullprofile,r_basicprofile,r_emailaddress', 23 'state': 'RandomString', 24 }, 25 base_url='https://api.linkedin.com/v1/', 26 request_token_url=None, 27 access_token_method='POST', 28 access_token_url='https://www.linkedin.com/uas/oauth2/accessToken', 29 authorize_url='https://www.linkedin.com/uas/oauth2/authorization', 30 ) 31 32 33 def authorized(resp): 34 if resp is None: 35 return 'Access denied: reason=%s error=%s' % ( 36 request.args['error_reason'], 37 request.args['error_description'] 38 ) 39 session['linkedin_token'] = (resp['access_token'], '') 40 linkedin_json_string = linkedin.get('people/~:(id,first-name,last-name,industry,headline,site-standard-profile-request,certifications,educations,summary,specialties,positions,picture-url,email-address)') 41 session['linkedin_id'] = linkedin_json_string.data['id'] 42 43 tabledef.import_linkedin_user(linkedin_json_string.data) 44 return jsonify(linkedin_json_string.data) 45 46 47 @linkedin.tokengetter 48 def get_linkedin_oauth_token(): 49 return session.get('linkedin_token') 50 51 def change_linkedin_query(uri, headers, body): 52 auth = headers.pop('Authorization') 53 headers['x-li-format'] = 'json' 54 if auth: 55 auth = auth.replace('Bearer', '').strip() 56 if '?' in uri: 57 uri += '&oauth2_access_token=' + auth 58 else: 59 uri += '?oauth2_access_token=' + auth 60 return uri, headers, body 61 62 def save_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics): 63 tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=session['linkedin_id']).update({ 64 'mentor': mentoree_choice, 65 'age':age_range, 66 'gender':gender_input, 67 'description':description_input, 68 'new_user':False}) 69 70 for topics in mentor_topics: 71 mentor_selected_topics = tabledef.MentoreeTopic(topic_id = topics, mentor_id=session['linkedin_id']) 72 tabledef.dbsession.add(mentor_selected_topics) 73 return tabledef.dbsession.commit() 74 75 def update_additional_user_data(mentoree_choice, age_range, gender_input, description_input, mentor_topics): 76 user = tabledef.dbsession.query(User).filter_by(linkedin_id=session['linkedin_id']).first() 77 78 user.mentor = mentoree_choice 79 user.age = age_range 80 user.gender = gender_input 81 user.description = description_input 82 83 current_selected_topics = tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(mentor_id=session['linkedin_id']).all() 84 for curr_topics in current_selected_topics: 85 tabledef.dbsession.delete(curr_topics) 86 # pdb.set_trace() 87 for topics in mentor_topics: 88 mentor_selected_topics = tabledef.MentoreeTopic(topic_id = topics, mentor_id=session['linkedin_id']) 89 tabledef.dbsession.add(mentor_selected_topics) 90 91 return tabledef.dbsession.commit() 92 93 linkedin.pre_request = change_linkedin_query
5 - warning: wildcard-import 2 - warning: unused-import 2 - warning: unused-import 2 - warning: unused-import 2 - warning: unused-import 2 - warning: unused-import 3 - warning: unused-import 6 - warning: unused-import 7 - warning: unused-import 8 - warning: unused-import 9 - warning: unused-import 10 - warning: unused-import 12 - warning: unused-import
1 import tabledef 2 from tabledef import Topic 3 4 TOPICS = {1: "Arts & Crafts", 5 2: "Career & Business", 6 3: "Community & Environment", 7 4: "Education & Learning", 8 5: "Fitness", 9 6: "Food & Drinks", 10 7: "Health & Well Being", 11 8: "Language & Ethnic Identity", 12 9: "Life Experiences", 13 10: "Literature & Writing", 14 11: "Motivation", 15 12: "New Age & Spirituality", 16 13: "Outdoors & Adventure", 17 14: "Parents & Family", 18 15: "Peer Pressure", 19 16: "Pets & Animals", 20 17: "Religion & Beliefs", 21 18: "Self-improvement/Growth", 22 19: "Sports & Recreation", 23 20: "Support", 24 21: "Tech", 25 22: "Women"} 26 27 def seed_topic_table(): 28 topics = [] 29 for items in TOPICS: 30 topics.append(Topic(title=TOPICS[items])) 31 print "~~~~~ TOPICS ~~~~~~~" 32 print topics 33 tabledef.dbsession.add_all(topics) 34 tabledef.dbsession.commit() 35 36 seed_topic_table()
31 - error: syntax-error
1 from Project import app 2 # app.run(debug=True) 3 app.run(debug=True) 4 5 app.secret_key = 'development'
Clean Code: No Issues Detected
1 import tabledef 2 from tabledef import User, MentoreeTopic, Topic, Email, Endorsement 3 import requests 4 import sqlalchemy 5 from sqlalchemy import update 6 import datetime 7 from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session 8 9 # import pdb 10 11 def save_endorsement_info_to_database(sender, mentor, endorsement_title, endorsement_body): 12 today = datetime.datetime.now() 13 endorsement_info = tabledef.Endorsement(sender_id=sender, receiver_id=mentor, title=endorsement_title, endorsements_text=endorsement_body, sent_date=today) 14 print "!!~~~!!^^^ endorsement_info info" 15 print endorsement_info 16 tabledef.dbsession.add(endorsement_info) 17 return tabledef.dbsession.commit() 18 19 def get_endorsement_info_per_mentor(linkedin_id): 20 endorsement_hist = tabledef.dbsession.query(Endorsement).filter_by(receiver_id=linkedin_id).all() 21 # for endorsements in endorsement_hist: 22 # print "!^^^^^^^^^^^^^^^^endorsement history!! ^^^^^^^^^^^^^^^^^^^^^" 23 # print endorsements.sender.picture_url 24 return endorsement_hist 25 26 def get_endorsement_info_for_self(): 27 profile_endorsement_hist = tabledef.dbsession.query(Endorsement).filter_by(receiver_id=session['linkedin_id']).all() 28 for endorsements in profile_endorsement_hist: 29 print "!^^^^^^^^^^^^^^^^endorsements_text!!^^^^^^^^^^^^^^^^" 30 print endorsements.endorsements_text 31 return profile_endorsement_hist
14 - error: syntax-error
1 import tabledef 2 from tabledef import User, MentoreeTopic, Topic, Email 3 import requests 4 import sqlalchemy 5 from sqlalchemy import update 6 import datetime 7 from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session 8 9 # import pdb 10 11 def save_email_info_to_database(sender, mentor, subject, subject_body): 12 today = datetime.datetime.now() 13 email_info = tabledef.Email(sender_id=sender, receiver_id=mentor, subject=subject, text_body=subject_body, sent_date=today) 14 print "!!~~~!!^^^ email info" 15 print email_info 16 tabledef.dbsession.add(email_info) 17 return tabledef.dbsession.commit() 18 19 20 def send_email(sender_email, mentor_email, subject, subject_body): 21 return requests.post( 22 "https://api.mailgun.net/v2/app27934969.mailgun.org/messages", 23 auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"), 24 data={"from": sender_email, 25 "to": mentor_email, 26 "subject": subject, 27 "text": subject_body}) 28 29 def get_email_history_per_mentor(linkedin_id): 30 email_hist = tabledef.dbsession.query(Email).filter_by(sender_id=session['linkedin_id']).filter_by(receiver_id=linkedin_id).all() 31 return email_hist 32 33 def get_sent_email_history_per_sender(): 34 email_hist = tabledef.dbsession.query(Email).filter_by(sender_id=session['linkedin_id']).all() 35 return email_hist 36 37 def get_email_history(): 38 email_hist = tabledef.dbsession.query(Email).filter_by(receiver_id=session['linkedin_id']).all() 39 for mail in email_hist: 40 print "~!@#$%^&*( email history!! !@#$%^&" 41 print mail.subject 42 return email_hist 43 44 def get_email_with_id(email_id): 45 email_id = tabledef.dbsession.query(Email).filter_by(id=email_id).all() 46 eid = email_id[0] 47 return eid 48 49 def format_json(row): 50 formatted_json_dict={} 51 for column in row.__table__.columns: 52 formatted_json_dict[column.name] = str(getattr(row, column.name)) 53 return formatted_json_dict 54 55 def delete_email(id): 56 deleted_email=tabledef.dbsession.query(Email).filter_by(id=id).first() 57 tabledef.dbsession.delete(deleted_email) 58 tabledef.dbsession.commit() 59 # return requests.post( 60 # "https://api.mailgun.net/v2/app27934969.mailgun.org/messages", 61 # auth=("api", "key-21q1narswc35vqr1u3f9upn3vf6ncbb9"), 62 # data={"from": "Excited User <me@samples.mailgun.org>", 63 # "to": "daphnejwang@gmail.com", 64 # "subject": "Hello", 65 # "text": "Testing some Mailgun awesomness!"})
14 - error: syntax-error
1 from flask_oauthlib.client import OAuth 2 from flask import Flask, render_template, redirect, jsonify, request, flash, url_for, session 3 import jinja2 4 import tabledef 5 from tabledef import User, MentoreeTopic, Topic 6 import linkedin 7 from xml.dom.minidom import parseString 8 import pdb 9 # from Project import app 10 11 def search(searchtopics): 12 search_results=tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(topic_id=searchtopics).all() 13 return search_results 14 15 def search_topic_display(searchtopics): 16 search_results=tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(topic_id=searchtopics).all() 17 search_topic = tabledef.dbsession.query(tabledef.Topic).filter_by(topic_id=search_results[0].topic_id).first() 18 19 search_topic_title = search_topic.title 20 print search_topic_title 21 return search_topic_title 22 23 def mentor_detail_display(linkedin_id): 24 # pdb.set_trace() 25 ment_data = tabledef.dbsession.query(tabledef.User).filter_by(linkedin_id=linkedin_id).first() 26 # print "!!~~~~~~~~~~~ment_data.positions[0].positions_title~~~~~~~~~~~~~~~~~~~~~~!!" 27 # print ment_data.positions[0].positions_title 28 # ment_data.positions.positions_title 29 return ment_data 30 31 def mentor_personal_topics(linkedin_id): 32 # pdb.set_trace() 33 ment_pers_topics = tabledef.dbsession.query(tabledef.MentoreeTopic).filter_by(mentor_id=linkedin_id).all() 34 # for topics in ment_pers_topics: 35 # print "((((((~~~~~~~~~~~topics.topic_id~~~~~~~~~~~~~~~~~~~~~~))" 36 # print topics.topic_id 37 38 return ment_pers_topics
20 - error: syntax-error
1 def quick_sort(array): 2 """ 3 분할 정복을 이용한 퀵 정렬 재귀함수 4 :param array: 5 :return: 6 """ 7 if(len(array)<2): 8 return array 9 else: 10 pivot = array[0] 11 less = [i for i in array[1:] if i <= pivot] 12 greater = [i for i in array[1:] if i > pivot] 13 return quick_sort(less) + [pivot] + quick_sort(greater) 14 15 exam1 = [4, 2, 1, 7, 10] 16 print(quick_sort(exam1))
7 - refactor: no-else-return
1 def find_the_largest_num(arr): 2 """ 3 4 :param arr: 5 :return: 6 """
1 - warning: unused-argument
1 def sum_func(arr): 2 """ 3 4 :param arr: 5 :return: 6 """ 7 if len(arr) <1: 8 return 0 9 else: 10 return arr[0] + sum_func(arr[1:]) 11 12 13 arr1 = [1, 4, 5, 9] 14 print(sum_func(arr1))
7 - refactor: no-else-return
1 import unittest 2 from stack import STACK 3 4 class TestSTACK(unittest.TestCase): 5 @classmethod 6 def setUpClass(cls): 7 stack=STACK() 8 9 def test_isEmpty(self): 10 self.assertEqual(stack.isEmpty(), True) 11 12 def test_push_top(self): 13 self.assertEqual(stack.top(),1)
7 - warning: unused-variable 10 - error: undefined-variable 13 - error: undefined-variable
1 class STACK(): 2 def isEmpty(self): 3 return True 4 def top(self): 5 return 1
Clean Code: No Issues Detected
1 import sys 2 3 class Calc_price(): 4 def calculater_price(self, values): 5 round=lambda x:(x*2+1)//2 6 sum = 0 7 for value in values: 8 sum += int(value) 9 ans = sum * 1.1 10 ans = int(round(ans)) 11 return ans 12 13 def input_to_data(self, input): 14 result = [] 15 lines = [] 16 input = input.read() 17 input = input.split('\n') 18 for i in input: 19 i = i.split(',') 20 lines.append(i) 21 lines.pop(-1) 22 for i in lines: 23 if i == [''] : 24 result.append([]) 25 continue 26 result.append(list(map(lambda x: int(x), i))) 27 return result 28 29 def calculater(self,input): 30 result = [] 31 input = self.input_to_data(input) 32 for i in input: 33 result.append(self.calculater_price(i)) 34 35 return result 36 37 if __name__ == '__main__': 38 calc_price = Calc_price() 39 print(calc_price.calculater(sys.stdin))
5 - warning: redefined-builtin 6 - warning: redefined-builtin 13 - warning: redefined-builtin 26 - warning: unnecessary-lambda 29 - warning: redefined-builtin
1 import sys 2 import io 3 import unittest 4 from calc_price import Calc_price 5 from di_sample import SomeKVSUsingDynamoDB 6 7 8 class TestCalculatePrice(unittest.TestCase): 9 def test_calculater_price(self): 10 calc_price = Calc_price() 11 assert 24 == calc_price.calculater_price([10, 12]) 12 assert 62 == calc_price.calculater_price([40, 16]) 13 assert 160 == calc_price.calculater_price([100, 45]) 14 assert 171 == calc_price.calculater_price([50, 50, 55]) 15 assert 1100 == calc_price.calculater_price([1000]) 16 assert 66 == calc_price.calculater_price([20,40]) 17 assert 198 == calc_price.calculater_price([30,60,90]) 18 assert 40 == calc_price.calculater_price([11,12,13]) 19 20 def test_input_to_data(self): 21 calc_price = Calc_price() 22 23 input = io.StringIO('10,12,3\n40,16\n100,45\n') 24 calc_price.input_to_data(input) 25 26 input = io.StringIO('1,25,3\n40,16\n\n100,45\n') 27 calc_price.input_to_data(input) 28 29 def test_calculater(self): 30 calc_price = Calc_price() 31 self.assertEqual(calc_price.calculater(io.StringIO('1,25,3\n40,16\n\n100,45\n')),[32,62,0,160])
23 - warning: redefined-builtin 1 - warning: unused-import 5 - warning: unused-import
1 # Given a folder of images and a metadata.csv file, output an npz file with an imgs, mouse_x, and mouse_y columns. 2 import os 3 import numpy as np 4 import pandas as pd 5 import matplotlib.pyplot as plt 6 import glob 7 from PIL import Image 8 from scipy.misc import imread, imresize 9 10 folder_name = 'ball_dataset_classroom' 11 12 # Katya 13 data_path = '/home/ksoltan/catkin_ws/src/robot_learning/data_processing_utilities/data/' 14 # Anil 15 # data_path ='/home/anil/catkin_ws/src/comprobo18/robot_learning/data_processing_utilities/data/' 16 17 path = data_path + folder_name + '/' 18 19 metadata_name = 'metadata.csv' 20 21 os.chdir(path) 22 filenames = glob.glob("*.jpg") 23 24 # Get size of images 25 filename = filenames[0] 26 sample_img = Image.open(filename, 'r') 27 print("height: {}, width: {}, aspect: {}".format(sample_img.height, sample_img.width, 1.0 * sample_img.height/sample_img.width)) 28 aspect = 1.0 * sample_img.height / sample_img.width 29 width = 200 30 height = int(width*aspect) 31 new_size = width, height 32 33 34 # Create numpy array of all x and y mouse positions 35 METADATA_CSV = data_path + folder_name + '/' + metadata_name 36 df = pd.read_csv(METADATA_CSV, ',')[['image_file_name', 'object_from_scan_x', 'object_from_scan_y']] 37 print(df.head()) 38 print(df.info()) 39 40 images = [] 41 object_xs = [] 42 object_ys = [] 43 # Loop through lidar predicted object positions and save only those that do not contain 0, 0 44 for index in range(len(df.object_from_scan_x)): 45 x = df.object_from_scan_x[index] 46 y = df.object_from_scan_y[index] 47 if(x == 0.0 and y == 0.0): 48 continue 49 50 # Add image 51 img_name = filenames[index] 52 img = Image.open(img_name, 'r') 53 resize = img.resize(new_size) 54 array = np.array(resize) 55 images.append(array) 56 57 # Add object position 58 object_xs.append(x) 59 object_ys.append(y) 60 61 62 # 63 # # plt.imshow(data) 64 # # plt.show() 65 # index = 0 66 # images = [] 67 # # Create numpy array of resized images 68 # for name in filenames: 69 # img = Image.open(name, 'r') 70 # resize = img.resize(new_size) 71 # array = np.array(resize) 72 # # images[:,:,:,index] = array 73 # images.append(array) 74 # index += 1 75 76 77 SAVE_FILENAME = data_path + folder_name + '_data' '.npz' 78 np.savez_compressed(SAVE_FILENAME, imgs=images, object_x=object_xs, object_y=object_ys) 79 test_data = np.load(SAVE_FILENAME) 80 print(test_data['object_x'].shape)
5 - warning: unused-import 8 - warning: unused-import 8 - warning: unused-import
1 # Given a folder of images and a metadata.csv file, output an npz file with an imgs, spatial x, and spatial x dimensions. 2 import os 3 import numpy as np 4 import pandas as pd 5 import matplotlib.pyplot as plt 6 import glob 7 import math 8 from PIL import Image 9 from scipy.misc import imread, imresize 10 11 def process_scan(ranges): 12 """ 13 process a 360 point set of laser data in a certain viewing range. 14 15 inputs: list of ranges from the laser scan 16 output: lists of x and y points within viewing angle and range 17 """ 18 max_r = 1.5 19 view_angle = int(70 / 2) # only look at points in the forwardmost 70 degs 20 infront = range(-view_angle, view_angle) 21 # ranges[0:int(view_angle/2)]+ranges[int(360-view_angle/2):360] 22 xs = [] 23 ys = [] 24 25 # loop through and grab points in desired view range 26 for i in range(-view_angle, view_angle): 27 if ranges[i] != 0: 28 theta = math.radians(90 + i) 29 r = ranges[i] 30 xf = r * math.cos(theta) 31 yf = r * math.sin(theta) 32 xs.append(xf) 33 ys.append(yf) 34 35 return(xs, ys) 36 37 def center_of_mass(x, y): 38 """ 39 compute the center of mass in a lidar scan. 40 41 inputs: x and y lists of cleaned laser data 42 output: spatial x and y coordinate of the CoM 43 """ 44 if len(x) < 4: # if below a threshold of grouped points 45 return(np.inf, np.inf) 46 else: 47 x_cord = sum(x)/len(x) 48 y_cord = sum(y)/len(y) 49 50 plt.plot(x, y, 'ro') 51 plt.plot(0,0, 'bo', markersize=15) 52 plt.plot(x_cord, y_cord, 'go', markersize=15) 53 plt.ylim(-2,2) 54 plt.xlim(-2,2) # plt.show() 55 return (x_cord, y_cord) 56 57 def resize_image(img_name): 58 """ 59 load and resize images for the final numpy array. 60 61 inputs: filename of an image 62 output: resized image as a numpy array 63 """ 64 # new size definition 65 width = 200 66 height = 150 67 new_size = width, height 68 69 img = Image.open(img_name, 'r') 70 resize = img.resize(new_size) 71 array = np.array(resize) 72 return array 73 74 def find_corresponding_scan(image_time, scan_times, start_idx): 75 max_tolerance = 0.015 76 while start_idx < len(scan_times): 77 diff = abs(scan_times[start_idx] - image_time) 78 # print("Idx: {}, Diff: {}".format(start_idx, abs(scan_times[start_idx] - image_time))) 79 if diff < max_tolerance: 80 return (start_idx, diff) 81 start_idx += 1 82 return None 83 84 85 if __name__ == '__main__': 86 # location definitions 87 # # Katya 88 data_path = '/home/ksoltan/catkin_ws/src/robot_learning/data_processing_utilities/data/' 89 # Anil 90 # data_path ='/home/anil/catkin_ws/src/comprobo18/robot_learning/data_processing_utilities/data/' 91 folder_name = 'anil_shining_2' 92 # folder_name = 'latest_person' 93 94 path = data_path + folder_name + '/' 95 metadata_csv = data_path + folder_name + '/' + 'metadata.csv' 96 97 # image definitions 98 os.chdir(path) 99 filenames = glob.glob("*.jpg") 100 101 # pull from metadata 102 array_form = np.genfromtxt(metadata_csv, delimiter=",") 103 lidar_all = array_form[:,6:366] 104 pic_times = array_form[:,0] 105 lidar_times = array_form[:,-1] 106 107 images = [] 108 object_xs = [] 109 object_ys = [] 110 111 i_s = [] 112 j_s = [] 113 # loop through all images 114 for i in range(lidar_all.shape[0]-26): 115 for j in range(i,i+25): 116 delta = lidar_times[j]-pic_times[i] 117 if abs(delta) < 0.025: 118 i_s.append(i) 119 j_s.append(j) 120 121 # print('pic', i) 122 # print('lid', j) 123 # print('delta', delta) 124 # print('------------------') 125 break 126 127 imgs_a = [] 128 xs_a = [] 129 ys_a = [] 130 131 for i in range(len(i_s)): 132 img_ind = i_s[i] 133 lid_ind = j_s[i] 134 135 scan_now = lidar_all[lid_ind] # scan data for this index 136 137 # process if scan isn't NaN (laser hasn't fired yet) 138 if not np.isnan(scan_now[10]): 139 points_x, points_y = process_scan(scan_now) 140 xp, yp = center_of_mass(points_x, points_y) 141 142 # only add if CoM is defined, AKA object is in frame 143 if xp != np.inf: 144 # print(pic_times[img_ind]-lidar_times[lid_ind], xp, yp, round(math.degrees(math.atan2(xp, yp)),2)) 145 146 # add image 147 img_name = filenames[img_ind] 148 img_np = resize_image(img_name) 149 imgs_a.append(img_np) 150 151 # add object position 152 xs_a.append(xp) 153 ys_a.append(yp) 154 155 # verify 156 # plt.show() 157 158 plt.imshow(img_np) 159 # plt.show() 160 161 print(len(imgs_a)) 162 # save all data 163 save_path = data_path + folder_name + '_data' '.npz' 164 np.savez_compressed(save_path, imgs=imgs_a, object_x=xs_a, object_y=ys_a)
26 - warning: redefined-outer-name 18 - warning: unused-variable 20 - warning: unused-variable 44 - refactor: no-else-return 57 - warning: redefined-outer-name 4 - warning: unused-import 9 - warning: unused-import 9 - warning: unused-import
1 #!/usr/bin/env python 2 """quick script for trying to pull spatial x, y from metadata""" 3 4 from __future__ import print_function 5 from geometry_msgs.msg import PointStamped, PointStamped, Twist 6 from std_msgs.msg import Header 7 from neato_node.msg import Bump 8 from sensor_msgs.msg import LaserScan 9 import matplotlib.pyplot as plt 10 from datetime import datetime 11 import pandas as pd 12 import time, numpy, math, rospy, statistics 13 14 def process_scan(ranges): 15 """ process a 360 point set of laser data in a certain viewing range """ 16 max_r = 1.5 17 view_angle = 80 # only look at points in the forwardmost 70 degs 18 infront = ranges[0:int(view_angle/2)]+ranges[int(360-view_angle/2):360] 19 20 xs = [] 21 ys = [] 22 23 # loop through and grab points in desired view range 24 for i in range(len(ranges)): 25 if i<len(infront): 26 if infront[i] !=0 and infront[i] < max_r: 27 if i >= view_angle/2: 28 theta = math.radians(90-(view_angle-i)) 29 else: 30 theta = math.radians(i+90) 31 r = infront[i] 32 xf = math.cos(theta)*r 33 yf = math.sin(theta)*r 34 xs.append(xf) 35 ys.append(yf) 36 37 return(xs, ys) 38 39 def center_of_mass(x, y): 40 """ with arguments as lists of x and y values, compute center of mass """ 41 if len(x) < 4: # if below a threshold of grouped points 42 return(0, 0) # TODO pick a return value for poor scans 43 44 x_cord = sum(x)/len(x) 45 y_cord = sum(y)/len(y) 46 plt.plot(x_cord, y_cord, 'go', markersize=15) 47 return (x_cord, y_cord) 48 49 if __name__ == '__main__': 50 path = '/home/anil/catkin_ws/src/comprobo18/robot_learning/data_processing_utilities/data/' 51 folder = 'mydataset' 52 look_in = path+folder + '/' # final path for metadata 53 54 filename = 'metadata.csv' 55 file_csv = look_in + filename 56 57 array_form = numpy.genfromtxt(file_csv, delimiter=",") 58 lidar_all = array_form[:, 6:366] 59 lidar_label = [] 60 61 ind = 0 62 for i in range(lidar_all.shape[0]): 63 scan_now = lidar_all[i,:] 64 65 if not numpy.isnan(scan_now[10]): 66 points_x, points_y = process_scan(scan_now) 67 xp, yp = center_of_mass(points_x, points_y) 68 69 if xp != 0: 70 # lidar_label[ind,0] = i 71 # lidar_label[ind,1] = xp 72 # lidar_label[ind,2] = yp 73 # ind += 1 74 lidar_label.append([i, xp, yp]) 75 print(ind, i, xp, yp, math.degrees(math.atan2(xp, yp))) 76 77 # plt.plot(points_x, points_y, 'ro') 78 # plt.plot(0,0, 'bo', markersize=15) 79 # plt.show() 80 81 lidar_label = numpy.array(lidar_label) 82 print(lidar_label[:,0]) 83 SAVE_FILENAME = path + folder + '.npz' 84 numpy.savez_compressed(SAVE_FILENAME, indices=lidar_label[:,0], xs=lidar_label[:,1], ys=lidar_label[:,2]) 85 """ 86 # loop through images and get spatial x and y 87 for i in range(lidar_all.shape[0]): 88 lidar_here = lidar_all[i,:] 89 xs, ys = process_scan(lidar_here) 90 xp, yp = center_of_mass(xs, ys) 91 92 lidar_label[i,0] = xp 93 lidar_label[i,1] = yp 94 print(xp, yp) 95 """
42 - warning: fixme 5 - warning: reimported 24 - warning: redefined-outer-name 85 - warning: pointless-string-statement 5 - warning: unused-import 5 - warning: unused-import 6 - warning: unused-import 7 - warning: unused-import 8 - warning: unused-import 10 - warning: unused-import 11 - warning: unused-import 12 - warning: unused-import 12 - warning: unused-import 12 - warning: unused-import
1 import sys, requests, json 2 from io import BytesIO 3 from PIL import Image 4 from pycolors import * 5 from funcs import * 6 7 8 print( pycol.BOLD + pycol.HEADER + "Welcome to the pokedex, ask for a pokemon: " + pycol.ENDC, end="" ) 9 pokemon = input() 10 11 while True: 12 response = getPokemon(pokemon) 13 14 if response.status_code == 404: 15 print( "This pokemon name is not valid, try again: ", end="" ) 16 pokemon = input() 17 continue 18 19 data = response.json() 20 21 22 ############################################################# 23 ########################### IMAGE ########################### 24 ############################################################# 25 #imgburl = "https://assets.pokemon.com/assets/cms2/img/pokedex/full/" + str(data["id"]) + ".png" 26 imgburl = "https://img.pokemondb.net/artwork/" + str(data["name"]) + ".jpg" 27 imgr = requests.get(imgburl) 28 img = Image.open(BytesIO(imgr.content)) 29 w, h = img.size 30 img.resize((w, h)).show() 31 32 33 ############################################################# 34 ######################### BASE INFO ######################### 35 ############################################################# 36 print( "\n" + pycol.BOLD + pycol.UNDERLINE + data["name"].capitalize() + pycol.ENDC + " (ID: " + str(data["id"]) + ")" + "\n" + 37 "Weight: " + str(data["weight"]/10) + "kg\n" + 38 "Height: " + str(data["height"]/10) + "m\n" + 39 "Base experience: " + str(data["base_experience"]) ) 40 ########################### TYPES ########################### 41 types, abilities = [], [] 42 for t in data["types"]: 43 types.append(t["type"]["name"]) 44 print( "Types: " + ', '.join(types) ) 45 ######################### ABILITIES ######################### 46 for a in data["abilities"]: 47 ab = a["ability"]["name"] 48 if a["is_hidden"]: 49 ab = ab + " (hidden ab.)" 50 abilities.append(ab) 51 print( "Abilities: " ) 52 for ab in abilities: 53 print( " - " + ab.capitalize() ) 54 ########################### STATS ########################### 55 print( "Stats: " ) 56 for s in data["stats"]: 57 print(getStrBar((s["stat"]["name"] + ":").ljust(17), s["base_stat"])) 58 ######################## EVOL CHAIN ######################### 59 print("Evolutions:\n" + " " + getEvolChain(data["id"])) 60 print() 61 ############################################################# 62 63 64 65 ############################################################# 66 ######################## END OF LOOP ######################## 67 ############################################################# 68 print( "Do you wanna ask for another pokemon? (Y/n) ", end="" ) 69 answer = input() 70 if answer == 'n': 71 break 72 else: 73 print( "Enter the pokemon name: ", end="" ) 74 pokemon = input()
4 - warning: wildcard-import 5 - warning: wildcard-import 8 - error: undefined-variable 8 - error: undefined-variable 8 - error: undefined-variable 12 - error: undefined-variable 27 - warning: missing-timeout 36 - error: undefined-variable 36 - error: undefined-variable 36 - error: undefined-variable 57 - error: undefined-variable 59 - error: undefined-variable 70 - refactor: no-else-break 1 - warning: unused-import 1 - warning: unused-import
1 import requests, math 2 3 def getPokemon(pokemon): 4 return requests.get("http://pokeapi.co/api/v2/pokemon/"+pokemon) 5 6 def getEvolChain(id): 7 url = "http://pokeapi.co/api/v2/pokemon-species/" + str(id) 8 resp = requests.get(url) 9 data = resp.json() 10 evol = requests.get(data["evolution_chain"]["url"]).json()["chain"] 11 evols = evol["species"]["name"].capitalize() 12 while evol["evolves_to"]: 13 evol = evol["evolves_to"][0] 14 evols = evols + " -> " + evol["species"]["name"].capitalize() 15 return evols 16 17 18 def getStrBar(stat, base): 19 # ▓▓▓▓▓▓▓▓░░░░░░░ 20 num = math.ceil(base/20) 21 stat = stat.capitalize() 22 statStr = " - " + stat + "▓" * num + "░" * (10-num) + " " + str(base) 23 return statStr 24 25 26 if __name__ == "__main__": 27 print(getStrBar("speed", 90)) 28 #print(getPokemon("pikachu"))
4 - warning: missing-timeout 6 - warning: redefined-builtin 8 - warning: missing-timeout 10 - warning: missing-timeout
1 import sys 2 import copy 3 4 def parse_info(claim): 5 offsets = claim.strip().split("@")[1].split(":")[0].split(",") 6 inches_from_left = int(offsets[0].strip()) 7 inches_from_top = int(offsets[1].strip()) 8 9 dims = claim.strip().split("@")[1].split(":")[1].split("x") 10 width = int(dims[0].strip()) 11 height = int(dims[1].strip()) 12 13 return (inches_from_left, inches_from_top, width, height) 14 15 def part_one(square, input): 16 collision_cnt = 0 17 18 for claim in input: 19 info = parse_info(claim) 20 21 for i in range(info[1], info[1] + info[3]): 22 for j in range(info[0], info[0] + info[2]): 23 if square[i][j] == "#": 24 square[i][j] = "X" 25 collision_cnt += 1 26 elif square[i][j] == ".": 27 square[i][j] = "#" 28 29 print("sol p1: " + str(collision_cnt)) 30 return square 31 32 def part_two(filled_square, input): 33 for claim in input: 34 info = parse_info(claim) 35 overlapping = False 36 37 for i in range(info[1], info[1] + info[3]): 38 if overlapping: 39 break 40 for j in range(info[0], info[0] + info[2]): 41 if filled_square[i][j] == "X": 42 overlapping = True 43 break 44 45 if not overlapping: 46 print("sol p2: " + claim.split("#")[1].split("@")[0].strip()) 47 48 if __name__ == '__main__': 49 input = sys.stdin.readlines() 50 lst = ["." for _ in range(0, 1000)] 51 square = [copy.copy(lst) for _ in range(0, 1000)] 52 53 filled_square = part_one(square, input) 54 part_two(filled_square, input)
49 - warning: redefined-builtin 15 - warning: redefined-outer-name 15 - warning: redefined-outer-name 32 - warning: redefined-outer-name 32 - warning: redefined-outer-name
1 import sys 2 import copy 3 from string import ascii_lowercase 4 5 def manhattan_dist(c1, c2): 6 return abs(c1[1] - c2[1]) + abs(c1[0] - c2[0]) 7 8 def part_two(): 9 10 total = 0 11 for i in range(0, 1000): 12 for j in range(0, 1000): 13 sum = 0 14 for c in coord_by_name.keys(): 15 sum += manhattan_dist((j, i), coord_by_name[c]) 16 if sum < 10000: 17 total += 1 18 19 print("sol p2: " + str(total)) 20 21 def part_one(): 22 23 for i in range(0, 1000): 24 for j in range(0, 1000): 25 26 if square[i][j] == ".": 27 min_dist = 99999 28 name = "" 29 collision_dist = min_dist 30 for coords in list_of_coords: 31 distance = abs(i - coords[1]) + abs(j - coords[0]) 32 if distance < min_dist: 33 min_dist = distance 34 name = coordinate_names[coords].lower() 35 elif distance == min_dist: 36 collision_dist = min_dist 37 38 if collision_dist == min_dist: 39 square[i][j] = "." 40 else: 41 square[i][j] = name 42 43 area_cnt = dict() 44 45 y_min = 2000 46 x_min = 2000 47 x_max = 0 48 y_max = 0 49 x_min_remove = [] 50 x_max_remove = [] 51 y_min_remove = [] 52 y_max_remove = [] 53 54 for c in list_of_coords: 55 if c[0] <= x_min: 56 x_min = c[0] 57 x_min_remove.append(coordinate_names[c]) 58 for i in x_min_remove: 59 if coord_by_name[i][0] > x_min: 60 x_min_remove.remove(i) 61 if c[0] >= x_max: 62 x_max = c[0] 63 x_max_remove.append(coordinate_names[c]) 64 for i in x_max_remove: 65 if coord_by_name[i][0] < x_max: 66 x_max_remove.remove(i) 67 if c[1] <= y_min: 68 y_min = c[1] 69 y_min_remove.append(coordinate_names[c]) 70 for i in y_min_remove: 71 if coord_by_name[i][1] > y_min: 72 y_min_remove.remove(i) 73 if c[1] >= y_max: 74 y_max = c[1] 75 y_max_remove.append(coordinate_names[c]) 76 for i in y_max_remove: 77 if coord_by_name[i][1] < y_max: 78 y_max_remove.remove(i) 79 80 for i in coordinate_names.values(): 81 82 dist = abs(coord_by_name[i][1] - x_max) 83 man_dists = [] 84 for j in coordinate_names.values(): 85 if coord_by_name[j][1] == x_max: 86 man_dist = manhattan_dist((coord_by_name[i][0], x_max), coord_by_name[j]) 87 man_dists.append(man_dist) 88 if min(man_dists) > dist: 89 x_max_remove.append(i) 90 91 dist = abs(coord_by_name[i][1] - x_min) 92 man_dists = [] 93 for j in coordinate_names.values(): 94 if coord_by_name[j][1] == x_min: 95 man_dist = manhattan_dist((coord_by_name[i][0], x_min), coord_by_name[j]) 96 man_dists.append(man_dist) 97 if min(man_dists) > dist: 98 x_min_remove.append(i) 99 100 dist = abs(coord_by_name[i][0] - y_max) 101 man_dists = [] 102 for j in coordinate_names.values(): 103 if coord_by_name[j][0] == y_max: 104 man_dist = manhattan_dist((y_max, coord_by_name[i][1]), coord_by_name[j]) 105 man_dists.append(man_dist) 106 if min(man_dists) > dist: 107 y_max_remove.append(i) 108 109 dist = abs(coord_by_name[i][0] - y_min) 110 man_dists = [] 111 for j in coordinate_names.values(): 112 if coord_by_name[j][0] == y_min: 113 man_dist = manhattan_dist((y_min, coord_by_name[i][1]), coord_by_name[j]) 114 man_dists.append(man_dist) 115 if min(man_dists) > dist: 116 y_min_remove.append(i) 117 118 area_cnt[i] = 0 119 120 for i in range(0, 1000): 121 for j in range(0, 1000): 122 123 if square[i][j].islower(): 124 if square[i][j].upper() not in x_max_remove and square[i][j].upper() not in x_min_remove and square[i][j].upper() not in y_max_remove and square[i][j].upper() not in y_min_remove: 125 area_cnt[square[i][j].upper()] += 1 126 127 max = 0 128 caused_by = "" 129 for i in area_cnt.keys(): 130 cnt = 0 131 if i != 0: 132 cnt = area_cnt[i] + 1 133 134 if cnt > max: 135 max = cnt 136 caused_by = i 137 138 print(caused_by + ": " + str(max)) 139 140 if __name__ == '__main__': 141 142 input = sys.stdin.readlines() 143 144 test = dict() 145 tmp_cnt = 0 146 147 for c in ascii_lowercase: 148 test[tmp_cnt] = c.upper() 149 tmp_cnt += 1 150 151 rest = len(input) - 26 152 153 for c in ascii_lowercase: 154 if rest > 0: 155 rest -= 1 156 test[tmp_cnt] = c.upper() + c.upper() 157 tmp_cnt += 1 158 159 cnt = 0 160 lst = ["." for _ in range(0, 1000)] 161 square = [copy.copy(lst) for _ in range(0, 1000)] 162 163 list_of_coords = [] 164 coordinate_names = dict() 165 coord_by_name = dict() 166 167 for i in input: 168 coords = (int(i.strip().split(",")[0]), int(i.strip().split(",")[1].strip())) 169 list_of_coords.append(coords) 170 square[coords[1]][coords[0]] = test[cnt] 171 coordinate_names[coords] = test[cnt] 172 coord_by_name[test[cnt]] = (coords[1], coords[0]) 173 cnt += 1 174 175 part_one() 176 part_two()
142 - warning: redefined-builtin 11 - warning: redefined-outer-name 13 - warning: redefined-builtin 14 - warning: redefined-outer-name 14 - error: possibly-used-before-assignment 21 - refactor: too-many-locals 23 - warning: redefined-outer-name 30 - warning: redefined-outer-name 54 - warning: redefined-outer-name 127 - warning: redefined-builtin 130 - warning: redefined-outer-name 26 - error: possibly-used-before-assignment 30 - error: possibly-used-before-assignment 34 - error: possibly-used-before-assignment 43 - refactor: use-dict-literal 60 - warning: modified-iterating-list 66 - warning: modified-iterating-list 72 - warning: modified-iterating-list 78 - warning: modified-iterating-list 21 - refactor: too-many-branches 21 - refactor: too-many-statements 144 - refactor: use-dict-literal 164 - refactor: use-dict-literal 165 - refactor: use-dict-literal
1 import sys 2 import copy 3 import string 4 from string import ascii_lowercase 5 6 # 42384 too low 7 8 if __name__ == '__main__': 9 10 input = sys.stdin.read().split() 11 print(input) 12 13 stack = [] 14 tree = [] 15 tmp_input = copy.copy(input) 16 open_meta_data = 0 17 idx = 0 18 19 while len(tmp_input) > open_meta_data: 20 21 print("len: " + str(len(tmp_input))) 22 print("need: " + str(int(input[idx + 1]) + 2)) 23 print("open meta len: " + str(open_meta_data)) 24 25 need = int(input[idx + 1]) + 2 26 27 if need + open_meta_data > len(tmp_input): 28 print("DONE") 29 break 30 31 node = (input[idx], input[idx + 1], []) 32 33 print("looking at: " + str(node)) 34 35 # if len(tmp_input) <= open_meta_data: 36 # print("len of rest: " + str(len(tmp_input))) 37 # print("open meta data: " + str(open_meta_data)) 38 # print("current need: " + str(node[1])) 39 # print("DONE") 40 # break 41 42 for i in range(0, len(tmp_input) - 1): 43 if tmp_input[i] == node[0] and tmp_input[i + 1] == node[1]: 44 tmp_idx = i 45 46 if node[0] == '0': 47 print("remove: " + str(tmp_input[tmp_idx : (tmp_idx + 2 + int(node[1]))])) 48 del tmp_input[tmp_idx : (tmp_idx + 2 + int(node[1]))] 49 else: 50 print("remove::: " + str(tmp_input[tmp_idx : tmp_idx + 2])) 51 del tmp_input[tmp_idx : tmp_idx + 2] 52 53 # no childs 54 if node[0] == '0': 55 print("handle now") 56 print(node) 57 58 for i in range(idx + 2, idx + 2 + int(node[1])): 59 node[2].append(input[i]) 60 61 tree.append(node) 62 63 else: 64 open_meta_data += int(node[1]) 65 print("append to stack") 66 stack.append(node) 67 print(node) 68 69 idx += 2 70 if node[0] == '0': 71 idx += int(node[1]) 72 73 print("TODO: " + str(tmp_input)) 74 75 for i in stack: 76 node = (i[0], i[1], []) 77 78 for j in range(0, int(i[1])): 79 node[2].append(tmp_input[j]) 80 del tmp_input[0 : int(i[1])] 81 tree.append(node) 82 83 res = 0 84 for i in tree: 85 res += sum([int(x) for x in i[2]]) 86 87 print("sol p1: " + str(res)) 88 89
10 - warning: redefined-builtin 47 - error: possibly-used-before-assignment 85 - refactor: consider-using-generator 3 - warning: unused-import 4 - warning: unused-import
1 import sys 2 from datetime import datetime 3 4 def calc_timespan(t1, t2): 5 fmt = '%H:%M' 6 return datetime.strptime(t2, fmt) - datetime.strptime(t1, fmt) 7 8 def parse_info(): 9 date = i.split("[")[1].split("]")[0].split(" ")[0].strip() 10 time = i.split("[")[1].split("]")[0].split(" ")[1].strip() 11 action = i.split("[")[1].split("]")[1].strip() 12 return (date, time, action) 13 14 if __name__ == '__main__': 15 16 input = sys.stdin.readlines() 17 input.sort() 18 19 current_guard_id = "" 20 start_sleeping = -1 21 sleep_times = dict() 22 sleeping_minutes = dict() 23 24 for i in input: 25 26 info = parse_info() 27 28 if current_guard_id != "": 29 if "falls" in i: 30 start_sleeping = info[1] 31 elif "wakes" in i: 32 33 if not current_guard_id in sleep_times.keys(): 34 sleep_times[current_guard_id] = 0 35 if not current_guard_id in sleeping_minutes.keys(): 36 sleeping_minutes[current_guard_id] = [] 37 38 time_to_add_in_minutes = int(str(calc_timespan(start_sleeping, info[1])).split(":")[0]) * 60 39 time_to_add_in_minutes += int(str(calc_timespan(start_sleeping, info[1])).split(":")[1]) 40 start = int(start_sleeping.split(":")[1]) 41 end = int(info[1].split(":")[1]) - 1 42 sleeping_minutes[current_guard_id].append(start) 43 sleeping_minutes[current_guard_id].append(end) 44 45 for idx in range(start + 1, start + time_to_add_in_minutes - 1): 46 sleeping_minutes[current_guard_id].append(idx % 60) 47 48 current_sleep_time = sleep_times[current_guard_id] + time_to_add_in_minutes 49 sleep_times[current_guard_id] = int(current_sleep_time) 50 51 if "#" in info[2]: 52 current_guard_id = info[2].split("#")[1].split("begins")[0].strip() 53 54 lazy_guard = max(sleep_times, key = sleep_times.get) 55 56 # min, guard 57 strategy1 = [max(sleeping_minutes[lazy_guard], key = sleeping_minutes[lazy_guard].count), int(lazy_guard)] 58 # min, count, guard 59 strategy2 = [0, 0, 0] 60 61 for i in sleep_times.keys(): 62 tmp_min = max(sleeping_minutes[i], key = sleeping_minutes[i].count) 63 64 if sleeping_minutes[i].count(tmp_min) > strategy2[1]: 65 strategy2[0] = tmp_min 66 strategy2[1] = sleeping_minutes[i].count(tmp_min) 67 strategy2[2] = i 68 69 print("sol p1: " + str(strategy1[0] * strategy1[1])) 70 print("sol p2: " + str(int(strategy2[2]) * strategy2[0]))
16 - warning: redefined-builtin 9 - error: used-before-assignment 21 - refactor: use-dict-literal 22 - refactor: use-dict-literal
1 import sys 2 3 def part_one(input): 4 exactly_two = 0 5 exactly_three = 0 6 7 for boxID in input: 8 letter_count = [boxID.count(letter) for letter in boxID] 9 10 if 2 in letter_count: 11 exactly_two += 1 12 if 3 in letter_count: 13 exactly_three += 1 14 15 return exactly_two * exactly_three 16 17 def part_two(input): 18 19 for boxID_one in input: 20 for boxID_two in input: 21 if boxID_one != boxID_two: 22 equal_letters = [l1 for l1, l2 in zip(boxID_one, boxID_two) if l1 == l2] 23 24 if len(boxID_one) - len(equal_letters) == 1: 25 return "".join(equal_letters).strip() 26 27 if __name__ == '__main__': 28 input = sys.stdin.readlines() 29 print("sol p1: " + str(part_one(input))) 30 print("sol p2: " + part_two(input))
28 - warning: redefined-builtin 3 - warning: redefined-outer-name 17 - warning: redefined-outer-name 17 - refactor: inconsistent-return-statements
1 import sys 2 3 if __name__ == '__main__': 4 5 input = sys.stdin.readlines() 6 curr_freq = 0 7 reached_twice = False 8 list_of_freqs = [] 9 10 while not reached_twice: 11 12 for change in input: 13 14 sign = change[0] 15 change = int(change.replace(sign, "")) 16 17 if (sign == "+"): 18 curr_freq += change 19 else: 20 curr_freq -= change 21 22 if curr_freq in list_of_freqs: 23 reached_twice = True 24 print("sol p2: " + str(curr_freq)) 25 break 26 else: 27 list_of_freqs.append(curr_freq) 28 29 if len(list_of_freqs) == len(input): 30 print("sol p1: " + str(curr_freq))
5 - warning: redefined-builtin 22 - refactor: no-else-break
1 import sys 2 import copy 3 import string 4 from string import ascii_lowercase 5 6 def get_names(): 7 names = dict() 8 cnt = 0 9 for i in ascii_lowercase: 10 if cnt == len(input) - 1: 11 break 12 names[i.upper()] = [] 13 cnt += 1 14 return names 15 16 def delete_item(item): 17 for i in names.keys(): 18 if i == item: 19 del names[i] 20 break 21 22 for i in names.keys(): 23 if item in names[i]: 24 names[i].remove(item) 25 26 def parse_input(): 27 for i in input: 28 before = i.strip().split("must")[0].split("Step")[1].strip() 29 after = i.strip().split("can")[0].split("step")[1].strip() 30 names[after].append(before) 31 32 if __name__ == '__main__': 33 34 input = sys.stdin.readlines() 35 names = get_names() 36 parse_input() 37 order = [] 38 39 while len(names) > 0: 40 41 deps = [] 42 for i in names.keys(): 43 deps.append(names[i]) 44 min_list = min(deps) 45 46 for j in names.keys(): 47 if names[j] == min_list: 48 order.append(j) 49 delete_item(j) 50 break 51 52 print("sol p1: " + "".join(order))
34 - warning: redefined-builtin 7 - warning: redefined-outer-name 9 - warning: redefined-outer-name 7 - refactor: use-dict-literal 17 - warning: redefined-outer-name 27 - warning: redefined-outer-name 2 - warning: unused-import 3 - warning: unused-import
1 import sys 2 import copy 3 from string import ascii_lowercase 4 5 def step_time(letter, sample): 6 if not sample: 7 return 60 + ord(letter) - 64 8 else: 9 return ord(letter) - 64 10 11 def get_names(): 12 names = dict() 13 cnt = 0 14 for i in ascii_lowercase: 15 if cnt == len(input) - 1: 16 break 17 names[i.upper()] = [] 18 cnt += 1 19 return names 20 21 def delete_item(item): 22 for i in names.keys(): 23 if i == item: 24 del names[i] 25 break 26 27 for i in names.keys(): 28 if item in names[i]: 29 names[i].remove(item) 30 31 def get_waiting_lists(names): 32 waiting_lists = [] 33 for i in names.keys(): 34 waiting_lists.append((names[i], i)) 35 return waiting_lists 36 37 def get_admissible_item(waiting_lists): 38 39 tmp = copy.copy(waiting_lists) 40 valid = False 41 42 while not valid: 43 valid = True 44 if len(tmp) == 0: 45 return None 46 tmp_best = min(tmp) 47 48 if len(tmp_best[0]) == 0: 49 for w in workers: 50 if w[2] == tmp_best[1]: 51 valid = False 52 else: 53 valid = False 54 55 if not valid: 56 tmp.remove(tmp_best) 57 58 return tmp_best[1] 59 60 if __name__ == '__main__': 61 62 input = sys.stdin.readlines() 63 num_of_workers = 5 64 sample = False 65 names = get_names() 66 workers = [] 67 68 for i in range(0, num_of_workers): 69 # (idx, available, working item, time_left) 70 workers.append((i, True, "", 0)) 71 72 for i in input: 73 before = i.strip().split("must")[0].split("Step")[1].strip() 74 after = i.strip().split("can")[0].split("step")[1].strip() 75 names[after].append(before) 76 77 time = 0 78 79 while len(names.keys()) > 0: 80 81 for w in workers: 82 # worker available 83 if w[1]: 84 waiting_lists = get_waiting_lists(names) 85 item = get_admissible_item(waiting_lists) 86 if item == None: 87 pass 88 # print("no item available for worker" + str(w[0])) 89 else: 90 workers[workers.index(w)] = (w[0], False, item, step_time(item, sample)) 91 # print("time " + str(time) + " worker" + str(w[0]) + " starts to work on item " + str(item) + " needs time: " + str(step_time(item, sample))) 92 # worker busy 93 else: 94 time_left = w[3] - 1 95 if time_left != 0: 96 workers[workers.index(w)] = (w[0], False, w[2], time_left) 97 else: 98 delete_item(str(w[2])) 99 # print("time " + str(time) + " worker" + str(w[0]) + " finished working on item " + str(w[2])) 100 101 waiting_lists = get_waiting_lists(names) 102 item = get_admissible_item(waiting_lists) 103 104 if item == None: 105 workers[workers.index(w)] = (w[0], True, "", 0) 106 # print("no item available for worker" + str(w[0])) 107 else: 108 workers[workers.index(w)] = (w[0], False, item, step_time(item, sample)) 109 # print("time " + str(time) + " worker" + str(w[0]) + " starts to work on item " + str(item) + " needs time: " + str(step_time(item, sample))) 110 111 continue 112 113 time += 1 114 115 print("sol p2: " + str(time - 1))
62 - warning: redefined-builtin 5 - warning: redefined-outer-name 6 - refactor: no-else-return 12 - warning: redefined-outer-name 14 - warning: redefined-outer-name 12 - refactor: use-dict-literal 21 - warning: redefined-outer-name 22 - warning: redefined-outer-name 22 - error: possibly-used-before-assignment 31 - warning: redefined-outer-name 32 - warning: redefined-outer-name 33 - warning: redefined-outer-name 37 - warning: redefined-outer-name 49 - warning: redefined-outer-name 49 - error: possibly-used-before-assignment
1 import sys 2 import copy 3 from string import ascii_lowercase 4 5 def remove_unit(tmp_input, idx): 6 del tmp_input[idx] 7 del tmp_input[idx] 8 9 def react_polymer(tmp_input): 10 11 modified = True 12 13 while modified: 14 modified = False 15 16 for i in range(0, len(tmp_input) - 1): 17 if tmp_input[i] != tmp_input[i + 1] and tmp_input[i].lower() == tmp_input[i + 1].lower(): 18 modified = True 19 remove_unit(tmp_input, i) 20 break 21 return tmp_input 22 23 if __name__ == '__main__': 24 input = sys.stdin.read().strip() 25 polymer_lengths = [] 26 27 print("sol p1: " + str(len(react_polymer(list(input))))) 28 29 for unit_type in ascii_lowercase: 30 31 tmp_input = list(input.replace(unit_type, "").replace(unit_type.upper(), "")) 32 tmp_input = react_polymer(tmp_input) 33 polymer_lengths.append(len(tmp_input)) 34 35 print("sol p2: " + str(min(polymer_lengths)))
24 - warning: redefined-builtin 5 - warning: redefined-outer-name 9 - warning: redefined-outer-name 2 - warning: unused-import
1 import tensorflow as tf 2 3 from tensorflow.python_io import TFRecordWriter 4 5 import numpy as np 6 import h5py 7 import glob 8 import os 9 from tqdm import tqdm 10 11 from IPython import embed 12 13 def _bytes_feature(value): 14 return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) 15 16 def _float_feature(value): 17 return tf.train.Feature(float_list=tf.train.FloatList(value=value)) 18 19 input_roots = '/data/dataTrain/val_*/' 20 output_name = '/data/dataTrain/val.tfrecords' 21 22 writer = TFRecordWriter(output_name) 23 24 h5files = glob.glob(os.path.join(input_roots, '*.h5')) 25 26 for h5file in tqdm(h5files): 27 try: 28 data = h5py.File(h5file, 'r') 29 for i in range(200): 30 img = data['CameraRGB'][i] 31 target = data['targets'][i] 32 33 feature_dict = {'image': _bytes_feature(img.tostring()), 34 'targets': _float_feature(target)} 35 36 example = tf.train.Example(features=tf.train.Features(feature=feature_dict)) 37 writer.write(example.SerializeToString()) 38 data.close() 39 except: 40 print('filename: {}'.format(h5file)) 41 42 43 writer.close()
39 - warning: bare-except 5 - warning: unused-import 11 - warning: unused-import
1 import numpy as np 2 import tensorflow as tf 3 4 def weight_ones(shape, name): 5 initial = tf.constant(1.0, shape=shape, name=name) 6 return tf.Variable(initial) 7 8 9 def weight_xavi_init(shape, name): 10 initial = tf.get_variable(name=name, shape=shape, 11 initializer=tf.contrib.layers.xavier_initializer()) 12 return initial 13 14 15 def bias_variable(shape, name): 16 initial = tf.constant(0.1, shape=shape, name=name) 17 return tf.Variable(initial) 18 19 20 class Network(object): 21 22 def __init__(self, train_state): 23 """ We put a few counters to see how many times we called each function """ 24 self._count_conv = 0 25 self._count_pool = 0 26 self._count_bn = 0 27 self._count_dropouts = 0 28 self._count_activations = 0 29 self._count_fc = 0 30 self._count_lstm = 0 31 self._count_soft_max = 0 32 self._conv_kernels = [] 33 self._conv_strides = [] 34 self._weights = {} 35 self._features = {} 36 self._train_state = train_state 37 38 """ Our conv is currently using bias """ 39 40 def conv(self, x, kernel_size, stride, output_size, padding_in='SAME'): 41 self._count_conv += 1 42 43 filters_in = x.get_shape()[-1] 44 shape = [kernel_size, kernel_size, filters_in, output_size] 45 46 weights = weight_xavi_init(shape, 'W_c_' + str(self._count_conv)) 47 bias = bias_variable([output_size], name='B_c_' + str(self._count_conv)) 48 49 self._weights['W_conv' + str(self._count_conv)] = weights 50 self._conv_kernels.append(kernel_size) 51 self._conv_strides.append(stride) 52 53 conv_res = tf.add(tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding=padding_in, 54 name='conv2d_' + str(self._count_conv)), bias, 55 name='add_' + str(self._count_conv)) 56 57 self._features['conv_block' + str(self._count_conv - 1)] = conv_res 58 59 return conv_res 60 61 def max_pool(self, x, ksize=3, stride=2): 62 self._count_pool += 1 63 return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1], strides=[1, stride, stride, 1], 64 padding='SAME', name='max_pool' + str(self._count_pool)) 65 66 def bn(self, x): 67 self._count_bn += 1 68 return tf.contrib.layers.batch_norm(x, is_training=False, 69 updates_collections=None, scope='bn' + str(self._count_bn)) 70 71 def activation(self, x): 72 self._count_activations += 1 73 return tf.nn.relu(x, name='relu' + str(self._count_activations)) 74 75 def dropout(self, x, prob=1): 76 print ("Dropout", self._count_dropouts) 77 self._count_dropouts += 1 78 output = tf.nn.dropout(x, prob, 79 name='dropout' + str(self._count_dropouts)) 80 return output 81 82 def fc(self, x, output_size): 83 self._count_fc += 1 84 filters_in = x.get_shape()[-1] 85 shape = [filters_in, output_size] 86 87 weights = weight_xavi_init(shape, 'W_f_' + str(self._count_fc)) 88 bias = bias_variable([output_size], name='B_f_' + str(self._count_fc)) 89 90 return tf.nn.xw_plus_b(x, weights, bias, name='fc_' + str(self._count_fc)) 91 92 def conv_block(self, x, kernel_size, stride, output_size, padding_in='SAME', dropout_prob=None): 93 print (" === Conv", self._count_conv, " : ", kernel_size, stride, output_size) 94 with tf.name_scope("conv_block" + str(self._count_conv)): 95 x = self.conv(x, kernel_size, stride, output_size, padding_in=padding_in) 96 x = self.bn(x) 97 if dropout_prob is not None: 98 x = tf.cond(self._train_state, 99 true_fn=lambda: self.dropout(x, dropout_prob), 100 false_fn=lambda: x) 101 102 return self.activation(x) 103 104 def fc_block(self, x, output_size, dropout_prob=None): 105 print (" === FC", self._count_fc, " : ", output_size) 106 with tf.name_scope("fc" + str(self._count_fc + 1)): 107 x = self.fc(x, output_size) 108 if dropout_prob is not None: 109 x = tf.cond(self._train_state, 110 true_fn=lambda: self.dropout(x, dropout_prob), 111 false_fn=lambda: x) 112 self._features['fc_block' + str(self._count_fc + 1)] = x 113 return self.activation(x) 114 115 def get_weigths_dict(self): 116 return self._weights 117 118 def get_feat_tensors_dict(self): 119 return self._features 120 121 122 def make_network(): 123 inp_img = tf.placeholder(tf.float32, shape=[None, 88, 200, 3], name='input_image') 124 inp_speed = tf.placeholder(tf.float32, shape=[None, 1], name='input_speed') 125 126 target_control = tf.placeholder(tf.float32, shape=[None, 3], name='target_control') 127 #target_command = tf.placeholder(tf.float32, shape=[None, 4], name='target_command') 128 train_state = tf.placeholder(tf.bool, shape=[], name='train_state') 129 130 network_manager = Network(train_state) 131 132 with tf.name_scope('Network'): 133 xc = network_manager.conv_block(inp_img, 5, 2, 32, padding_in='VALID') 134 print (xc) 135 xc = network_manager.conv_block(xc, 3, 1, 32, padding_in='VALID') 136 print (xc) 137 138 xc = network_manager.conv_block(xc, 3, 2, 64, padding_in='VALID') 139 print (xc) 140 xc = network_manager.conv_block(xc, 3, 1, 64, padding_in='VALID') 141 print (xc) 142 143 xc = network_manager.conv_block(xc, 3, 2, 128, padding_in='VALID') 144 print (xc) 145 xc = network_manager.conv_block(xc, 3, 1, 128, padding_in='VALID') 146 print (xc) 147 148 xc = network_manager.conv_block(xc, 3, 1, 256, padding_in='VALID') 149 print (xc) 150 xc = network_manager.conv_block(xc, 3, 1, 256, padding_in='VALID') 151 print (xc) 152 153 x = tf.reshape(xc, [-1, int(np.prod(xc.get_shape()[1:]))], name='reshape') 154 print (x) 155 156 x = network_manager.fc_block(x, 512, dropout_prob=0.7) 157 print (x) 158 x = network_manager.fc_block(x, 512, dropout_prob=0.7) 159 160 with tf.name_scope("Speed"): 161 speed = network_manager.fc_block(inp_speed, 128, dropout_prob=0.5) 162 speed = network_manager.fc_block(speed, 128, dropout_prob=0.5) 163 164 j = tf.concat([x, speed], 1) 165 j = network_manager.fc_block(j, 512, dropout_prob=0.5) 166 167 control_out = network_manager.fc_block(j, 256, dropout_prob=0.5) 168 control_out = network_manager.fc_block(control_out, 256) 169 control_out = network_manager.fc(control_out, 3) 170 loss = tf.reduce_mean(tf.square(tf.subtract(control_out, target_control))) 171 tf.summary.scalar('loss', loss) 172 173 ''' 174 branch_config = [["Steer", "Gas", "Brake"], ["Steer", "Gas", "Brake"], \ 175 ["Steer", "Gas", "Brake"], ["Steer", "Gas", "Brake"]] 176 177 branches = [] 178 losses = [] 179 for i in range(0, len(branch_config)): 180 with tf.name_scope("Branch_" + str(i)): 181 branch_output = network_manager.fc_block(j, 256, dropout_prob=0.5) 182 branch_output = network_manager.fc_block(branch_output, 256) 183 branches.append(network_manager.fc(branch_output, len(branch_config[i]))) 184 losses.append(tf.square(tf.subtract(branches[i], target_control))) 185 186 print (branch_output) 187 188 losses = tf.convert_to_tensor(losses) 189 losses = tf.reduce_mean(tf.transpose(losses, [1, 2, 0]), axis=1) * target_command; 190 loss = tf.reduce_sum(losses) 191 ''' 192 193 return {'loss': loss, 194 'train_state': train_state, 195 'inputs': [inp_img, inp_speed], 196 'labels': [target_control], 197 'outputs': [control_out]} 198
20 - refactor: useless-object-inheritance 20 - refactor: too-many-instance-attributes 38 - warning: pointless-string-statement 40 - refactor: too-many-arguments 40 - refactor: too-many-positional-arguments 92 - refactor: too-many-arguments 92 - refactor: too-many-positional-arguments 173 - warning: pointless-string-statement
1 import numpy as np 2 import tensorflow as tf 3 4 from network import make_network 5 from data_provider import DataProvider 6 from tensorflow.core.protobuf import saver_pb2 7 8 import time 9 import os 10 11 log_path = './log' 12 save_path = './data' 13 14 if __name__ == '__main__': 15 16 with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess: 17 train_provider = DataProvider('/data/dataTrain/train.tfrecords', sess) 18 val_provider = DataProvider('/data/dataTrain/val.tfrecords', sess) 19 20 network = make_network() 21 22 lr = 0.0001 23 lr_placeholder = tf.placeholder(tf.float32, []) 24 optimizer = tf.train.AdamOptimizer(learning_rate=lr_placeholder, 25 beta1=0.7, beta2=0.85) 26 optimizer = optimizer.minimize(network['loss']) 27 28 sess.run(tf.global_variables_initializer()) 29 merged_summary_op = tf.summary.merge_all() 30 31 saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V2) 32 saver.restore(sess, os.path.join(save_path, 'step-7500.ckpt')) 33 34 step = 0 35 36 while True: 37 if step % 50 == 0: 38 val_batch = val_provider.get_minibatch() 39 val_loss = sess.run(network['loss'], 40 feed_dict={network['inputs'][0]: val_batch.images, 41 network['inputs'][1]: val_batch.data[0], 42 network['labels'][0]: val_batch.data[1]}) 43 print('VALIDATION--------loss: %.4f' % val_loss) 44 if step % 500 == 0: 45 model_path = os.path.join(save_path, 'step-%d.ckpt' % step) 46 saver.save(sess, model_path) 47 print("Checkpoint saved to %s" % model_path) 48 49 a = time.time() 50 batch = train_provider.get_minibatch(augment=True) 51 imgs = batch.images 52 speed, target_control, _ = batch.data 53 b = time.time() 54 _, train_loss = sess.run([optimizer, network['loss']], 55 feed_dict={network['inputs'][0]: imgs, 56 network['inputs'][1]: speed, 57 network['labels'][0]: target_control, 58 lr_placeholder: lr}) 59 c = time.time() 60 print('step: %d loss %.4f prepare: %.3fs gpu: %.3fs' % (step, train_loss, b-a, c-b)) 61 62 step += 1 63 64
1 - warning: unused-import
1 import tensorflow as tf 2 import numpy as np 3 import glob 4 import os 5 import h5py 6 from imgaug.imgaug import Batch, BatchLoader, BackgroundAugmenter 7 import imgaug.augmenters as iaa 8 import cv2 9 10 from IPython import embed 11 12 BATCHSIZE = 120 13 14 st = lambda aug: iaa.Sometimes(0.4, aug) 15 oc = lambda aug: iaa.Sometimes(0.3, aug) 16 rl = lambda aug: iaa.Sometimes(0.09, aug) 17 18 seq = iaa.Sequential([ 19 rl(iaa.GaussianBlur((0, 1.5))), 20 rl(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05), per_channel=0.5)), 21 oc(iaa.Dropout((0.0, 0.10), per_channel=0.5)), 22 oc(iaa.CoarseDropout((0.0, 0.10), size_percent=(0.08, 0.2),per_channel=0.5)), 23 oc(iaa.Add((-40, 40), per_channel=0.5)), 24 st(iaa.Multiply((0.10, 2.5), per_channel=0.2)), 25 rl(iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)), 26 ], random_order=True) 27 28 ''' 29 def augmentation(imgs): 30 return imgs 31 ''' 32 33 def parse_proto(example_proto): 34 features = tf.parse_single_example(example_proto, 35 features={'image': tf.FixedLenFeature([], tf.string), 36 'targets': tf.FixedLenSequenceFeature([], tf.float32, allow_missing=True)}) 37 image = tf.decode_raw(features['image'], tf.uint8) 38 image = tf.reshape(image, [88, 200, 3]) 39 40 speed = features['targets'][10] 41 target_control = features['targets'][0:3] 42 target_command = features['targets'][24] % 4 43 return image, speed[None], target_control, target_command 44 45 class DataProvider: 46 def __init__(self, filename, session): 47 dataset = tf.data.TFRecordDataset(filename) 48 dataset = dataset.repeat().shuffle(buffer_size=2000).map(parse_proto).batch(BATCHSIZE) 49 iterator = tf.data.Iterator.from_structure(dataset.output_types, 50 dataset.output_shapes) 51 dataset_init = iterator.make_initializer(dataset) 52 session.run(dataset_init) 53 54 self.dataset = dataset 55 self.session = session 56 self.next = iterator.get_next() 57 58 def get_minibatch(self, augment = False): 59 data = self.session.run(self.next) 60 imgs = data[0].astype('float32') 61 if augment: 62 imgs = seq.augment_images(imgs) 63 return Batch(images=imgs, data=data[1:]) 64 65 def show_imgs(self): 66 batch = self.get_minibatch(True) 67 for img in batch.images: 68 cv2.imshow('img', img) 69 cv2.waitKey(0) 70 71 # Test tf.data & imgaug backgroud loader APIs 72 if __name__ == '__main__': 73 import time 74 sess = tf.Session() 75 dp = DataProvider('/mnt/AgentHuman/train.tfrecords', sess) 76 77 while True: 78 a = time.time() 79 dp.get_minibatch() 80 b = time.time() 81 print(b-a) 82
2 - warning: unused-import 3 - warning: unused-import 4 - warning: unused-import 5 - warning: unused-import 6 - warning: unused-import 6 - warning: unused-import 10 - warning: unused-import
1 import tensorflow as tf 2 import glob 3 import h5py 4 import numpy as np 5 from network import make_network 6 # read an example h5 file 7 datasetDirTrain = '/home/eric/self-driving/AgentHuman/SeqTrain/' 8 datasetDirVal = '/home/eric/self-driving/AgentHuman/SeqVal/' 9 datasetFilesTrain = glob.glob(datasetDirTrain+'*.h5') 10 datasetFilesVal = glob.glob(datasetDirVal+'*.h5') 11 print("Len train:{0},len val{1}".format(len(datasetFilesTrain),len(datasetFilesVal))) 12 data = h5py.File(datasetFilesTrain[1], 'r') 13 image_input = data['rgb'][1] 14 input_speed =np.array([[100]]) 15 image_input = image_input.reshape( 16 (1, 88, 200, 3)) 17 18 with tf.Session() as sess: 19 network = make_network() 20 saver = tf.train.Saver() 21 ckpt = tf.train.latest_checkpoint("./data") 22 if ckpt: 23 saver.restore(sess, ckpt) 24 output=sess.run(network['outputs'], feed_dict={network['inputs'][0]:image_input, 25 network['inputs'][1]: input_speed}) 26 print(output) 27 sess.close()
Clean Code: No Issues Detected
1 from PIL import Image 2 from numpy import * 3 from pylab import * 4 import os 5 import sift 6 7 imlist = os.listdir('pages') 8 nbr_images = len(imlist) 9 imlist_dir = [str('../pages/'+imlist[n]) for n in range(nbr_images)] 10 11 imname = [imlist[n][:-4] for n in range(nbr_images)] 12 13 os.mkdir('sifts') 14 15 os.chdir('sifts') 16 17 for n in range(nbr_images): 18 sift.process_image(imlist_dir[n],str(imname[n]+'.sift')) 19 20 21
18 - warning: bad-indentation 2 - warning: redefined-builtin 2 - warning: redefined-builtin 2 - warning: redefined-builtin 2 - warning: redefined-builtin 2 - warning: redefined-builtin 2 - warning: redefined-builtin 2 - warning: redefined-builtin 2 - warning: redefined-builtin 2 - warning: redefined-builtin 2 - warning: redefined-builtin 2 - warning: wildcard-import 3 - warning: wildcard-import 1 - warning: unused-import 2 - warning: unused-wildcard-import
1 import os 2 import shutil 3 from dataclasses import dataclass, field 4 from typing import List 5 import h5py 6 7 import matplotlib.patches as patches 8 import matplotlib.pyplot as plt 9 import numpy as np 10 import pandas as pd 11 12 @dataclass 13 class Cluster: 14 # cluster object, contains detected cluster points and additional values 15 label: int 16 cardinality: int = 0 17 elements: List = field(default_factory=list) 18 dopplers: List = field(default_factory=list) 19 center_polar: np.ndarray = np.empty((2, 1)) 20 center_cartesian: np.ndarray = np.empty((2, 1)) 21 box: np.ndarray = np.empty((4, 1)) 22 23 def polar2cartesian(xp): 24 # angles in rad 25 return np.array([xp[0]*np.cos(xp[1]), xp[0]*np.sin(xp[1])], dtype=np.float64).reshape(-1, 1) 26 27 def cartesian2polar(xy): 28 # angles in rad 29 return np.array([np.sqrt(xy[0]**2 + xy[1]**2), np.arctan2(xy[1], xy[0])]).reshape(-1, 1) 30 31 def deg2rad_shift(angles): 32 a = np.copy(angles) 33 a = np.pi*a/180 34 a = -a + np.pi/2 35 return a 36 37 def shift_rad2deg(angles): 38 a = np.copy(angles) 39 a = -a + np.pi/2 40 a = 180*a/np.pi 41 return a 42 43 def get_box(cluster, c=None, h=0.5, w=0.3): 44 if cluster is not None: 45 r_ext = cluster.elements[0].max() - cluster.elements[0].min() 46 # print(cluster.elements[1]) 47 a_ext = cluster.elements[1].max() - cluster.elements[1].min() 48 out = np.array([cluster.center_polar[0].squeeze(), 49 cluster.center_polar[1].squeeze(), 50 r_ext, 51 a_ext]).reshape(4, 1) 52 return out 53 else: 54 return np.array([c[0], c[1], h, w]).reshape(4, 1) 55 56 def IOU_score(a, b): 57 # returns the IOU score of the two input boxes 58 x1 = max(a[0], b[0]) 59 y1 = max(a[1], b[1]) 60 x2 = min(a[2], b[2]) 61 y2 = min(a[3], b[3]) 62 width = x2 - x1 63 height = y2 - y1 64 if (width < 0) or (height < 0): 65 return 0.0 66 area_intersection = width*height 67 area_a = (a[2] - a[0])*(a[3] - a[1]) 68 area_b = (b[2] - b[0])*(b[3] - b[1]) 69 area_union = area_a + area_b - area_intersection 70 return area_intersection/area_union
7 - refactor: consider-using-from-import 44 - refactor: no-else-return 1 - warning: unused-import 2 - warning: unused-import 5 - warning: unused-import 7 - warning: unused-import 8 - warning: unused-import 10 - warning: unused-import
1 import gc 2 3 from .train import train 4 from .predict import predict 5 6 def main(args): 7 gc.collect() 8 if args.Action == 'train': 9 train() 10 elif args.Action == 'predict': 11 predict() 12 gc.collect() 13
3 - error: relative-beyond-top-level 4 - error: relative-beyond-top-level
1 import torch 2 # import torch.nn as nn 3 # import torch.nn.functional as F 4 # import torch.optim as optim 5 # import torchvision 6 import torchvision.transforms as transforms 7 8 import os, sys 9 # import pickle, time, random 10 11 import numpy as np 12 # from PIL import Image 13 import argparse 14 15 from .darknet import DarkNet 16 from .dataset import * 17 from .util import * 18 19 def parse_arg(): 20 parser = argparse.ArgumentParser(description='MmWaveYoLo Prediction module', add_help=True) 21 22 parser.add_argument('--cfg', type=str, default='yolov3micro', 23 help="Name of the network config (default: yolov3micro)") 24 parser.add_argument('--pathin', type=str, 25 help="Path for the input folder (default: testset)") 26 parser.add_argument('--pathout', type=str, 27 help="Path for the output folder") 28 parser.add_argument('--video', type=str, default='False', 29 help="Create video after prediction (default: False)") 30 31 parser.add_argument('--datasplit', type=float, default=0, 32 help="Dataset split percentage (default: 0 (single set))") 33 parser.add_argument('--seed', type=float, default=0, 34 help="Seed for the random shuffling (default: 0, (no shuffle))") 35 parser.add_argument('--bs', type=int, default=8, 36 help="Batch size (default: 8)") 37 parser.add_argument('--ckpt', type=str, default='10.0', 38 help="Checkpoint name <'epoch'.'iteration'>") 39 40 parser.add_argument('--nms', type=float, default=0.5, 41 help="NMS threshold (default: 0.5)") 42 parser.add_argument('--obj', type=float, default=0.5, 43 help="Objectiveness threshold (default: 0.5)") 44 parser.add_argument('--iou', type=float, default=0.5, 45 help="Intersection over Union threshold (default: 0.5)") 46 parser.add_argument('--reso', type=int, default=416, 47 help="Input image resolution (default: 416)") 48 49 parser.add_argument('--v', type=int, default=0, 50 help="Verbose (0 minimal (default), 1 normal, 2 all") 51 52 return parser.parse_args(sys.argv[2:]) 53 54 def predict(): 55 torch.cuda.empty_cache() 56 57 # CONSTANTS 58 args = parse_arg() 59 pathcfg = f"cfg/{args.cfg}.cfg" 60 pathin = f"dataset/{args.pathin}/final" 61 pathout = f"results/{args.pathout}" 62 num_workers = 2 63 64 # NETWORK 65 darknet = DarkNet(pathcfg, args.reso, args.obj, args.nms) 66 pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad) 67 print('# of params: ', pytorch_total_params) 68 if args.v > 0: 69 print(darknet.module_list) 70 71 # IMAGE PREPROCESSING!!! 72 transform = transforms.Compose([ 73 transforms.Resize(size=(args.reso, args.reso), interpolation=3), 74 transforms.ToTensor() 75 ]) 76 # ==================================================== 77 78 # Test data allocation 79 _, testloader = getDataLoaders(pathin, transform, train_split=args.datasplit, batch_size=args.bs, \ 80 num_workers=num_workers, collate_fn=collate, random_seed=args.seed) 81 # ==================================================== 82 83 start_epoch = 2 84 start_iteration = 0 85 86 # LOAD A CHECKPOINT!!! 87 start_epoch, start_iteration = args.ckpt.split('.') 88 start_epoch, start_iteration, state_dict, _, _, _, _ = load_checkpoint( 89 f'save/checkpoints/', 90 int(start_epoch), 91 int(start_iteration) 92 ) 93 darknet.load_state_dict(state_dict) 94 # ==================================================== 95 96 # Use GPU if available 97 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 98 darknet.to(device) # Put the network on device 99 if args.v > 0: 100 print(next(darknet.parameters()).device) 101 102 # Create the subsequent save folders 103 # if os.path.isdir(pathout): 104 # shutil.rmtree(pathout) 105 if not os.path.isdir(pathout): 106 os.makedirs(pathout) 107 108 # PREDICT 109 print(f'[LOG] PREDICT | Test set: {len(testloader.dataset)}') 110 darknet.eval() # set network to evaluation mode 111 outcomes = np.zeros(4) 112 predList = [] 113 countLabels = 0 114 with torch.no_grad(): 115 for bidx, (paths, inputs, targets) in enumerate(testloader): 116 inputs = inputs.to(device) 117 predictions = darknet(inputs) 118 119 for idx, path in enumerate(paths): 120 print(f'[LOG] PREDICT | Predicting {(bidx*args.bs)+idx+1}/{len(testloader.dataset)}', end='\r') 121 savename = path.split('/')[-1].split('_')[2] 122 123 try: 124 prediction = predictions[predictions[:, 0] == idx] 125 except Exception: 126 prediction = torch.Tensor([]) 127 print(f'[ERROR] TEST | No prediction? {prediction}') 128 129 tempL, _= correctness(prediction, targets[idx], reso=darknet.reso, iou_thresh=args.iou) 130 predList.extend(tempL) 131 countLabels += targets[idx].size(0) 132 133 # draw_prediction(path, prediction, targets[idx], darknet.reso, \ 134 # names=[''], pathout=f'{pathout}/preds', savename=f'{savename}.png') 135 136 if args.video: 137 animate_predictions(pathout, args.video) 138 139 print(countLabels) 140 predList = precision_recall(predList, countLabels) 141 plot_precision_recall(predList, pathout=f'{pathout}/map', savename='') 142 # plot_precision_recall(predList, pathout=f'{pathout}/map', savename=f'iou{args.iou}.png') 143 144 # ====================================================
6 - refactor: consider-using-from-import 15 - error: relative-beyond-top-level 16 - error: relative-beyond-top-level 16 - warning: wildcard-import 17 - error: relative-beyond-top-level 17 - warning: wildcard-import 54 - refactor: too-many-locals 79 - error: undefined-variable 80 - error: undefined-variable 88 - error: undefined-variable 89 - warning: f-string-without-interpolation 125 - warning: broad-exception-caught 129 - error: undefined-variable 137 - error: undefined-variable 140 - error: undefined-variable 141 - error: undefined-variable 111 - warning: unused-variable 121 - warning: unused-variable
1 import h5py 2 import numpy as np 3 import os, shutil 4 5 def chext(args): 6 rawpath = f'raw/{args.pathin}' 7 savepath = f'dataset/{args.pathout}/chext' if args.pathout else f'dataset/{args.pathin}/chext' 8 print(f'[LOG] ChExt | Starting: {args.pathin}') 9 10 # Create the subsequent save folders 11 # if os.path.isdir(savepath): 12 # shutil.rmtree(savepath) 13 if not os.path.isdir(savepath): 14 os.makedirs(savepath) 15 16 for i, fname in enumerate(os.listdir(rawpath)): 17 logprefix = f'[LOG] ChExt | {i+1} / {len(os.listdir(rawpath))}' 18 savename = f'{args.saveprefix}_seq_{i}' if args.saveprefix else f'{fname.split("_")[0]}_seq_{fname.split("_")[1].split(".")[0]}' 19 print(f'{logprefix} fname', end='\r') 20 channel_extraction( 21 f'{rawpath}/{fname}', 22 savepath, 23 savename, 24 action='SAVE', 25 logprefix=logprefix) 26 print('\n') 27 28 def channel_extraction(loadpath, savepath, savename, action, logprefix='', nr_chn=16): 29 with h5py.File(loadpath, 'r+') as h5data: 30 print(f'{logprefix} Initializing: {loadpath}', end='\r') 31 Data = np.zeros((h5data['Chn1'].shape[1], nr_chn, h5data['Chn1'].shape[0]), dtype=np.float32) 32 for i in range(nr_chn): 33 print(f'{logprefix} Extracting channel {i+1} \t\t\t', end='\r') 34 channel = np.asarray(h5data['Chn{}'.format(i+1)]) 35 Data[:, i, :] = channel.T 36 print(f'{logprefix} Finalizing {savepath}', end='\r') 37 if action == 'SAVE': 38 print(f'{logprefix} Saving', end='\r') 39 np.save(f'{savepath}/{savename}', Data) 40 print(f'{logprefix} Saved: {savepath}/{savename} Data shape: {Data.shape}') 41 elif action == 'RETURN': 42 return Data 43 else: 44 print(f'[ERR] ChExt | Invalid action, please select SAVE or RETURN')
28 - refactor: too-many-arguments 28 - refactor: too-many-positional-arguments 44 - warning: f-string-without-interpolation 28 - refactor: inconsistent-return-statements 3 - warning: unused-import
1 import matplotlib.animation as animation 2 import numpy as np 3 import scipy as sp 4 from matplotlib import pyplot as plt 5 6 class KalmanTracker: 7 8 def __init__(self, id_, s0=None, disable_rejection_check=False): 9 # Filter-related parameters 10 self.dt = 66.667e-3 # T_int of the radar TX 11 # state transition matrix 12 self.F = np.kron(np.eye(2), np.array([[1, self.dt], [0, 1]])) 13 # # state-acceleration matrix 14 self.G = np.array([0.5*(self.dt**2), self.dt]).reshape(2, 1) 15 # # observation matrix 16 self.H = np.array([[1, 0, 0, 0], 17 [0, 0, 1, 0]]) 18 # measurement covariance matrix 19 self.R = np.array([[0.5, 0], [0, 0.5]]) # [wagner2017radar] 20 # initial state covariance 21 self.P = 0.2*np.eye(4) 22 # state noise variance 23 self.sigma_a = 8 # [wagner2017radar] 24 # state noise covariance 25 self.Q = np.kron(np.eye(2), np.matmul(self.G, self.G.T)*self.sigma_a**2) 26 self.n = self.F.shape[1] 27 self.m = self.H.shape[1] 28 # initial state 29 self.s = np.zeros((self.n, 1)) if s0 is None else s0 30 self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1) 31 self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1) 32 self.REJECT_THR = 4.605 33 self.disable_rejection_check = disable_rejection_check 34 ######################################################### 35 # Tracker-related parameters 36 self.misses_number = 0 37 self.hits = 0 38 self.id = id_ 39 self.box = np.array([]) 40 self.state_memory = [] 41 self.identity_label = 'UNK' # initialize as unknown cluster 42 self.id_dict = {-1: 'UNK', 0: 'S1', 1: 'S2', 2:'S3', 3:'S4'} 43 # self.id_dict = {-1: 'UNK', 0: 'JP', 1: 'FM', 2:'GP', 3:'RF'} 44 45 def transform_obs(self, z): 46 z_prime = np.array([z[0]*np.cos(z[1]), z[0]*np.sin(z[1])]).reshape(-1, 1) 47 return z_prime 48 49 def reject_obs(self, i, S): 50 chi_squared = np.matmul(np.matmul(i.T, np.linalg.inv(S)), i)[0, 0] 51 return chi_squared >= self.REJECT_THR 52 53 def predict(self): 54 # a_x = np.random.normal(0, self.sigma_a) 55 # a_y = np.random.normal(0, self.sigma_a) 56 self.s = np.matmul(self.F, self.s) 57 # check that x has the correct shape 58 assert self.s.shape == (self.n, 1) 59 self.P = np.matmul(np.matmul(self.F, self.P), self.F.T) + self.Q 60 self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1) 61 self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1) 62 return self.s, self.xy 63 64 def update(self, z): 65 z = self.transform_obs(z) 66 # innovation 67 y = z - np.matmul(self.H, self.s) 68 S = np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R 69 if (not self.reject_obs(y, S)) or self.disable_rejection_check: 70 K = np.matmul(np.matmul(self.P, self.H.T), np.linalg.inv(S)) 71 self.s = self.s + np.matmul(K, y) 72 assert self.s.shape == (self.n, 1) 73 self.P = np.matmul(np.eye(self.n) - np.matmul(K, self.H), self.P) 74 self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1) 75 self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1) 76 self.state_memory.append(self.xy) 77 return self.s, self.xy 78 else: 79 self.state_memory.append(self.xy) 80 return self.s, self.xy 81 82 def get_S(self): 83 return np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R 84 85 @staticmethod 86 def get_mahalanobis_distance(x, C): 87 # returns Mahalanobis distance given the differece vector x and covariance C 88 return np.matmul(np.matmul(x.T, np.linalg.inv(C)), x)[0, 0] 89 90 @staticmethod 91 def hungarian_assignment(score_matrix): 92 # call the scipy implementation of Hungarian alg. 93 det_idx, tr_idx = sp.optimize.linear_sum_assignment(score_matrix) 94 unmatched, undetected = [], [] 95 for t in range(score_matrix.shape[1]): 96 if t not in tr_idx: 97 undetected.append(t) 98 for d in range(score_matrix.shape[0]): 99 if d not in det_idx: 100 unmatched.append(d) 101 matches = [] 102 for d, t in zip(det_idx, tr_idx): 103 matches.append(np.array([d, t]).reshape(1, 2)) 104 if len(matches) == 0: 105 matches = np.empty((0, 2), dtype=int) 106 else: 107 matches = np.concatenate(matches, axis=0) 108 return matches, np.array(undetected), np.array(unmatched) 109
1 - refactor: consider-using-from-import 6 - refactor: too-many-instance-attributes 69 - refactor: no-else-return 1 - warning: unused-import 4 - warning: unused-import
1 import torch 2 import torch.utils.data 3 from torch.utils.data.dataloader import default_collate 4 # from torchvision import transforms 5 6 import os 7 # import random 8 import numpy as np 9 from PIL import Image 10 11 # anchors_wh = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], 12 # [59, 119], [116, 90], [156, 198], [373, 326]], 13 # np.float32) / 416 14 15 class MmwaveDataset(torch.utils.data.Dataset): 16 def __init__(self, data_dir, data_size = 0, transforms = None): 17 files = sorted(os.listdir(data_dir)) 18 self.files = [f"{data_dir}/{x}" for x in files] 19 20 if data_size < 0 or data_size > len(files): 21 assert("Data size should be between 0 to number of files in the dataset") 22 23 if data_size == 0: 24 data_size = len(files) 25 26 self.data_size = data_size 27 self.transforms = transforms 28 29 def __len__(self): 30 return self.data_size 31 32 def __getitem__(self, idx): 33 image_path = self.files[idx] 34 image = Image.open(image_path) 35 img_w, img_h = image.size 36 37 image = self.preProcessImage(image) 38 39 labels = [] # to make it array of bbs (for multiple bbs in the future) 40 labels_str = image_path.split("_")[-1] 41 42 if "[[" in labels_str: 43 labels_str = labels_str.split('[[')[1].split(']]')[0].split('],[') 44 labels = np.zeros((4, 5)) 45 for i, l in enumerate(labels_str): 46 label = np.zeros(5) 47 label[:4] = np.array([int(a) for a in l.split(',')]) # [xc, yc, w, h] 48 49 # Normalizing labels 50 label[0] /= img_w #Xcenter 51 label[1] /= img_h #Ycenter 52 label[2] /= img_w #Width 53 label[3] /= img_h #Height 54 55 labels[i, :] = label 56 else: 57 labels_str = labels_str.split('[')[1].split(']')[0].split(',') # get the bb info from the filename 58 labels = np.zeros((1, 5)) 59 labels[0, :4] = np.array([int(a) for a in labels_str]) # [xc, yc, w, h] 60 61 if np.any(labels[0, :4] == 0): 62 return image, None 63 64 # Normalizing labels 65 labels[0, 0] /= img_w #Xcenter 66 labels[0, 1] /= img_h #Ycenter 67 labels[0, 2] /= img_w #Width 68 labels[0, 3] /= img_h #Height 69 # labels[0, 4] = 0 # class label (0 = person) 70 # print(torch.any(torch.isfinite(image) == False), labels) 71 72 return image_path, image, labels 73 74 #Image custom preprocessing if required 75 def preProcessImage(self, image): 76 image = image.convert('RGB') 77 if self.transforms: 78 return self.transforms(image) 79 else: 80 image = np.array(image) 81 image = image.transpose(2,1,0) 82 return image.astype(np.float32) 83 84 def collate(batch): 85 batch = list(filter(lambda x:x[1] is not None, batch)) 86 return default_collate(batch) # Use the default method to splice the filtered batch data 87 88 def getDataLoaders(data_dir, transforms, train_split=0, batch_size=8, \ 89 num_workers=2, collate_fn=collate, random_seed=0): 90 91 if train_split < 0 or train_split > 1: 92 raise Exception(f"data_loader | Split ({train_split}) coefficient should be 0 < x < 1") 93 94 dataset = MmwaveDataset(data_dir=data_dir, transforms=transforms) 95 shuffle = True if random_seed != 0 else False 96 97 # Single Set 98 if train_split == 0 or train_split == 1: 99 return None, torch.utils.data.DataLoader(dataset, batch_size=batch_size, 100 shuffle=shuffle, num_workers=num_workers, collate_fn = collate_fn) 101 102 # Generate a fixed seed 103 generator = torch.Generator() 104 if random_seed != 0: 105 generator.manual_seed(random_seed) 106 107 train_size = int(train_split * len(dataset)) 108 test_size = len(dataset) - train_size 109 110 trainset, testset = torch.utils.data.random_split(dataset, [train_size, test_size], generator=generator) 111 112 # Train and Validation sets 113 return torch.utils.data.DataLoader(trainset, batch_size=batch_size, \ 114 shuffle=shuffle, num_workers=2, collate_fn = collate_fn), \ 115 torch.utils.data.DataLoader(testset, batch_size=batch_size, \ 116 shuffle=shuffle, num_workers=2, collate_fn = collate_fn)
21 - warning: assert-on-string-literal 77 - refactor: no-else-return 88 - refactor: too-many-arguments 88 - refactor: too-many-positional-arguments 92 - warning: broad-exception-raised 95 - refactor: simplifiable-if-expression 98 - refactor: consider-using-in
1 import os, shutil, gc 2 from argparse import ArgumentParser 3 from time import sleep 4 5 import h5py 6 import numpy as np 7 import scipy as sp 8 from matplotlib import pyplot as plt 9 from mpl_toolkits.mplot3d import Axes3D 10 from scipy import io, signal 11 from scipy.signal.windows import nuttall, taylor 12 13 from .util import * 14 15 def proc(args): 16 rawpath = f'dataset/{args.pathin}/chext' 17 savepath = f'dataset/{args.pathout}/proc' if args.pathout else f'dataset/{args.pathin}/proc' 18 print(f'[LOG] Proc | Starting: {args.pathin}') 19 20 # Create the subsequent save folders 21 # if os.path.isdir(savepath): 22 # shutil.rmtree(savepath) 23 if not os.path.isdir(savepath): 24 os.makedirs(savepath + '/raw/') 25 os.mkdir(savepath + '/denoised/') 26 27 # # # PARAMETERS INIT # # # 28 29 c0 = 1/np.sqrt(4*np.pi*1e-7*8.85e-12) # speed of light 30 f_start = 76e9 31 f_stop = 78e9 32 # Tramp_up = 180e-6 33 # Tramp_down = 32e-6 34 Tp = 250e-6 35 # T_int = 66.667e-3 36 N = 512 37 # N_frames = 1250 38 N_loop = 256 39 # Tx_power = 100 40 kf = 1.1106e13 41 BrdFuSca = 4.8828e-5 42 fs = 2.8571e6 43 fc = (f_start + f_stop)/2 44 45 # # # CONFIGURE SIGNAL PROCESSING # # # 46 47 # # Range dimension 48 NFFT = 2**10 # number of fft points in range dim 49 nr_chn = 16 # number of channels 50 # fft will be computed using a hannng window to lower border effects 51 win_range = np.broadcast_to(np.hanning(N-1), (N_loop, nr_chn, N-1)).T # integral of the window for normalization 52 # print(win_range.shape) 53 sca_win = np.sum(win_range[:, 0, 0]) 54 55 v_range = np.arange(NFFT)/NFFT*fs*c0/(2*kf) # vector of range values for each range bin 56 57 r_min = 0 # min range considered 58 r_max = 10 # max range considered 59 60 arg_rmin = np.argmin(np.abs(v_range - r_min)) # index of the min range considered value 61 arg_rmax = np.argmin(np.abs(v_range - r_max)) # index of the max range considered value 62 vrange_ext = v_range[arg_rmin:arg_rmax+1] # vector of range values from rmin to rmax 63 64 # # Doppler dimension 65 NFFT_vel = 256 # number of fft points in angle dim 66 win_vel = np.broadcast_to(np.hanning(N_loop).reshape(1, 1, -1), (vrange_ext.shape[0], nr_chn, N_loop)) 67 scawin_vel = np.sum(win_vel[0, 0, :]) 68 vfreq_vel = np.arange(-NFFT_vel/2, NFFT_vel/2)/NFFT_vel*(1/Tp) # vector of considered frequencies in Doppler dim 69 v_vel = vfreq_vel*c0/(2*fc) # transform freqs into velocities 70 v_vel = np.delete(v_vel, np.arange(124, 132)) # delete velocities close to 0 71 72 # # Angle dimension 73 NFFT_ant = 64 # number of fft points in angle dim 74 win_ant = np.broadcast_to(taylor(nr_chn, nbar=20, sll=20).reshape(1,-1,1), (vrange_ext.shape[0], nr_chn, NFFT_vel)) 75 scawin_ant = np.sum(win_ant[0, :, 0]) 76 # win_ant = np.tile(win_ant, (len(vrange_ext), 1)) 77 # vang_deg = np.arcsin(2*np.arange(-NFFT_ant/2, NFFT_ant/2)/NFFT_ant)/np.pi*180 # vector of considered angles [-90, 90-dtheta] 78 # print(vang_deg) 79 # print(deg2rad_shift(vang_deg)) 80 81 # ant_idx = np.concatenate([np.arange(nr_chn), np.arange(nr_chn+1, 2*nr_chn)]) # indices of virtual antenna elements 82 # ant_idx = np.arange(nr_chn) 83 cal_data = io.loadmat('dataprep/calibration.mat')['CalData'] # load complex calibration weights for each antenna element 84 cal_data = cal_data[:16] # keep weights for TX1 only 85 mcal_data = np.broadcast_to(cal_data, (N-1, cal_data.shape[0], N_loop)) 86 87 # # # PROCESS THE RDA SLICES FOR EACH FRAME # # # 88 # sequences = [1, 2, 3, 4, 5, 6] # this is just as an example, you should put here the ids of the sequences you want to process 89 # sequences = range(0, len(os.listdir(rawpath))) # this is just as an example, you should put here the ids of the sequences you want to process 90 for i, fname in enumerate(os.listdir(rawpath)): 91 frawname = fname.split('.')[0] 92 logprefix = f'[LOG] Proc | {i+1} / {len(os.listdir(rawpath))} {frawname}' 93 print(f'{logprefix} {fname}', end='\r') 94 95 Data_orig = np.load(f'{rawpath}/{fname}') 96 # print(f'{logprefix} Original data shape: {Data_orig.shape}', end='\r') 97 98 parts = [0, 1, 2, 3] 99 SIDELOBE_LEVEL = 3 100 LINTHR_HIGH = -97 101 LINTHR_LOW = -107 102 103 for part in parts: # split processing in parts for memory, each track is split in 4 104 savename = f'{args.saveprefix}_seq_{frawname.split("_")[2]}_sub_{part}' \ 105 if args.saveprefix else f'{frawname}_sub_{part}' 106 logprefix = f'[LOG] Proc | {i*len(parts)+part+1} / {len(os.listdir(rawpath))*len(parts)} {frawname}' 107 print(f'{logprefix} {savename}', end='\r') 108 109 Data = Data_orig[:, :, part*32000:(part+1)*32000] # each part has 32k blocks (128k/4) 110 split_locs = np.arange(Data.shape[2], step=N_loop, dtype=np.int)[1:] 111 Data = np.stack(np.split(Data, split_locs, axis=2)[:-1], axis=-1) # split data into a sequence of radar cubes 112 print(f'{logprefix} Time-split \t\t\t', end='\r') 113 114 nsteps = Data.shape[-1] # last dim is time 115 rda_data = np.zeros((len(vrange_ext), NFFT_ant, NFFT_vel, nsteps), dtype=np.float32) 116 raw_ra = np.zeros((len(vrange_ext), NFFT_ant, nsteps), dtype=np.float32) 117 for j in range(nsteps): # loop on the timesteps 118 print(f'{logprefix} Timestep: {j+1} \t\t\t', end='\r') 119 RawRadarCube = Data[1:, :, :, j] 120 # print(RawRadarCube.shape) 121 # Range fft: window, calibration and scaling are applied 122 range_profile = np.fft.fft(RawRadarCube*win_range*mcal_data, NFFT, axis=0)*BrdFuSca/sca_win 123 rp_ext = range_profile[arg_rmin:arg_rmax+1] # extract only ranges of interest (0 to 10 m) 124 # background subtraction for MTI 125 rp_ext -= np.mean(rp_ext, axis=2, keepdims=True) 126 # Doppler fft 127 range_doppler = np.fft.fftshift(np.fft.fft(rp_ext*win_vel, NFFT_vel, axis=2)/scawin_vel, axes=2) 128 # Angle fft 129 range_angle_doppler = np.fft.fftshift(np.fft.fft(range_doppler*win_ant, NFFT_ant, axis=1)/scawin_ant, axes=1) 130 131 # absolute value + 20log10 to compute power 132 range_angle_doppler = 20*np.log10(np.abs(range_angle_doppler)) 133 134 # fig, ax = plt.subplots(1, 2) 135 # ax[0].imshow(range_angle_doppler.max(2)) 136 # ax[1].imshow(range_angle_doppler.max(1)) 137 # plt.show() 138 139 raw_ra[..., j] = range_angle_doppler.max(2) # store raw range-angle image 140 141 # at this point you have the RDA representation and you can apply further denoising 142 rdep_thr = np.linspace(LINTHR_HIGH, LINTHR_LOW, range_angle_doppler.shape[0]).reshape((-1, 1, 1)) 143 144 range_angle_doppler -= rdep_thr 145 range_angle_doppler[range_angle_doppler < 0] = 0 146 147 maxs = np.max(range_angle_doppler, axis=1).reshape(range_angle_doppler.shape[0], 1, range_angle_doppler.shape[2]) 148 # maxs = np.max(range_angle_doppler, axis=(0, 2)).reshape(1, range_angle_doppler.shape[1], 1) 149 threshold = maxs - SIDELOBE_LEVEL 150 range_angle_doppler[range_angle_doppler < threshold] = 0 151 152 rda_data[..., j] = range_angle_doppler 153 154 # fig, ax = plt.subplots(1, 2) 155 # ax[0].imshow(range_angle_doppler.max(2)) 156 # ax[1].imshow(range_angle_doppler.max(1)) 157 # plt.show() 158 159 print(f'{logprefix} Saving: {savename} \t\t\t') 160 np.save(f'{savepath}/denoised/{savename}.npy', rda_data) 161 np.save(f'{savepath}/raw/{savename}.npy', raw_ra) 162 163 del Data, rda_data, split_locs, raw_ra 164 gc.collect() 165 del Data_orig 166 gc.collect() 167 print('\n')
13 - error: relative-beyond-top-level 13 - warning: wildcard-import 15 - refactor: too-many-locals 15 - refactor: too-many-statements 1 - warning: unused-import 2 - warning: unused-import 3 - warning: unused-import 5 - warning: unused-import 7 - warning: unused-import 8 - warning: unused-import 9 - warning: unused-import 10 - warning: unused-import 11 - warning: unused-import
1 import torch 2 import torch.nn as nn 3 # import torch.nn.functional as F 4 import torch.optim as optim 5 # import torchvision 6 import torchvision.transforms as transforms 7 8 # import os, pickle, random 9 import time, sys 10 11 import numpy as np 12 # from PIL import Image 13 import argparse 14 15 from .darknet import DarkNet 16 from .dataset import * 17 from .util import * 18 19 def parse_arg(): 20 parser = argparse.ArgumentParser(description='mmWaveYoLov3 Training module', add_help=True) 21 22 parser.add_argument('--cfg', type=str, default='yolov3micro', 23 help="Name of the network config") 24 parser.add_argument('--pathin', type=str, default='trainset', 25 help="Input dataset name") 26 27 parser.add_argument('--datasplit', type=float, default=0.8, 28 help="Dataset split percentage (def: 0.8 (80 (train):20 (validation))") 29 parser.add_argument('--seed', type=float, default=42, 30 help="Seed for the random shuffle (default: 42, 0 for no shuffling)") 31 parser.add_argument('--bs', type=int, default=8, 32 help="Batch size (default: 8, 0 for single batch)") 33 parser.add_argument('--ckpt', type=str, default='0.0', 34 help="Checkpoint name as <'epoch'.'iteration'>") 35 parser.add_argument('--ep', type=int, default=5, 36 help="Total epoch number (default: 5)") 37 38 parser.add_argument('--lr', type=float, default=1e-5, 39 help="Learning rate (default: 1e-5)") 40 parser.add_argument('--reso', type=int, default=416, 41 help="Input image resolution (default: 416)") 42 43 parser.add_argument('--v', type=int, default=0, 44 help="Verbose (0 minimal (default), 1 normal, 2 all") 45 46 return parser.parse_args(sys.argv[2:]) 47 48 def train(): 49 torch.cuda.empty_cache() 50 51 # CONSTANTS 52 args = parse_arg() 53 pathcfg = f"cfg/{args.cfg}.cfg" 54 pathin = f"dataset/{args.pathin}/final" 55 num_workers = 2 56 57 # NETWORK 58 darknet = DarkNet(pathcfg, args.reso) 59 pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad) 60 print('# of params: ', pytorch_total_params) 61 if args.v > 0: 62 print(darknet.module_list) 63 64 # LOAD A CHECKPOINT!!! 65 start_epoch, start_iteration = [0, 0] 66 tlosses, vlosses = [], [] 67 optimizer, scheduler = None, None 68 start_epoch, start_iteration = [int(x) for x in args.ckpt.split('.')] 69 if start_epoch != 0 and start_epoch != 0: 70 start_epoch, start_iteration, state_dict, \ 71 tlosses, vlosses, \ 72 optimizer, scheduler = load_checkpoint( 73 f'save/checkpoints/', 74 int(start_epoch), 75 int(start_iteration) 76 ) 77 darknet.load_state_dict(state_dict) 78 # ==================================================== 79 80 # OPTIMIZER & HYPERPARAMETERS 81 if optimizer == None: 82 # optimizer = optim.SGD(filter(lambda p: p.requires_grad, darknet.parameters()), \ 83 # lr=args.lr, momentum=0.9, weight_decay=5e-4, nesterov=True) 84 optimizer = optim.Adam(filter(lambda p: p.requires_grad, darknet.parameters()), \ 85 lr=args.lr, betas=[0.9,0.999], eps=1e-8, weight_decay=0, amsgrad=False) 86 if scheduler == None: 87 scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1) 88 89 # IMAGE PREPROCESSING!!! 90 transform = transforms.Compose([ 91 # transforms.RandomResizedCrop(size=args.reso, interpolation=3), 92 transforms.Resize(size=(args.reso, args.reso), interpolation=3), 93 transforms.ColorJitter(brightness=1.5, saturation=1.5, hue=0.2), 94 transforms.RandomVerticalFlip(), 95 transforms.ToTensor() 96 ]) 97 # ==================================================== 98 99 # Train and Validation data allocation 100 trainloader, validloader = getDataLoaders(pathin, transform, \ 101 train_split=args.datasplit, batch_size=args.bs, \ 102 num_workers=num_workers, collate_fn=collate, random_seed=args.seed) 103 # ==================================================== 104 105 # Use GPU if available 106 device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") 107 if torch.cuda.device_count() > 1: # Use Multi GPU if available 108 darknet = nn.DataParallel(darknet) 109 darknet.to(device) # Put the network on device 110 if args.v > 0: 111 print(next(darknet.parameters()).device) 112 113 # TRAIN 114 print(f'[LOG] TRAIN | Training set: {len(trainloader.dataset)}') 115 print(f'[LOG] TRAIN | Validation set: {len(validloader.dataset)}') 116 print(f'[LOG] TRAIN | Starting to train from epoch {start_epoch} iteration {start_iteration}') 117 if start_epoch > args.ep: 118 print(f'[ERR] TRAIN | Total epochs ({args.ep}) is less then current epoch ({start_epoch})') 119 return 120 121 for epoch in range(start_epoch, args.ep): 122 print(f'[LOG] TRAIN | Starting Epoch #{epoch+1}') 123 darknet.train() # set network to training mode 124 tloss, vloss = [], [] 125 start = time.time() 126 127 for batch_idx, (_, inputs, targets) in enumerate(trainloader): 128 optimizer.zero_grad() # clear the grads from prev passes 129 inputs, targets = inputs.to(device), targets.to(device) # Images, Labels 130 131 outputs = darknet(inputs, targets, device) # Loss 132 outputs['total'].backward() # Gradient calculations 133 134 tloss.append(outputs['total'].item()) 135 optimizer.step() 136 137 end = time.time() 138 139 # Latest iteration! 140 if args.v == 1: 141 print(f'x: {outputs["x"].item():.2f} y: {outputs["y"].item():.2f} ') 142 elif args.v == 2: 143 print(f'x: {outputs["x"].item():.2f} y: {outputs["y"].item():.2f} ' \ 144 f'w: {outputs["w"].item():.2f} h: {outputs["h"].item():.2f} ' \ 145 f'cls: {outputs["cls"].item():.2f} ' \ 146 f'conf: {outputs["conf"].item()}') 147 148 if (batch_idx % 100) == 99: 149 print(f'[LOG] TRAIN | Batch #{batch_idx+1}\ 150 Loss: {np.mean(tloss)}\ 151 Time: {end - start}s') 152 start = time.time() 153 154 # Save train loss for the epoch 155 tlosses.append(np.mean(tloss)) 156 157 scheduler.step() 158 159 # VALIDATION 160 with torch.no_grad(): 161 for batch_idx, (_, inputs, targets) in enumerate(validloader): 162 inputs, targets = inputs.to(device), targets.to(device) 163 164 voutputs = darknet(inputs, targets) 165 vloss.append(voutputs['total'].item()) 166 167 # Validation loss! 168 print(f'[LOG] VALID | Epoch #{epoch+1} \ 169 Loss: {np.mean(vloss)}') 170 171 # Save valid loss for the epoch 172 vlosses.append(np.mean(vloss)) 173 # ==================================================== 174 175 if (epoch % 10) == 9: 176 save_checkpoint(f'save/checkpoints/', epoch+1, 0, { 177 'epoch': epoch+1, 178 'iteration': 0, 179 'state_dict': darknet.state_dict(), 180 'tlosses': tlosses, 181 'vlosses': vlosses, 182 'optimizer': optimizer, 183 'scheduler': scheduler 184 }) 185 plot_losses(tlosses, vlosses, f'save/losses') 186 187 save_checkpoint(f'save/checkpoints/', epoch+1, 0, { 188 'epoch': epoch+1, 189 'iteration': 0, 190 'state_dict': darknet.state_dict(), 191 'tlosses': tlosses, 192 'vlosses': vlosses, 193 'optimizer': optimizer, 194 'scheduler': scheduler 195 }) 196 plot_losses(tlosses, vlosses, f'save/losses') 197
2 - refactor: consider-using-from-import 4 - refactor: consider-using-from-import 6 - refactor: consider-using-from-import 15 - error: relative-beyond-top-level 16 - error: relative-beyond-top-level 16 - warning: wildcard-import 17 - error: relative-beyond-top-level 17 - warning: wildcard-import 48 - refactor: too-many-locals 69 - refactor: consider-using-in 72 - error: undefined-variable 73 - warning: f-string-without-interpolation 100 - error: undefined-variable 102 - error: undefined-variable 176 - error: undefined-variable 176 - warning: f-string-without-interpolation 185 - error: undefined-variable 185 - warning: f-string-without-interpolation 187 - error: undefined-variable 187 - warning: f-string-without-interpolation 196 - error: undefined-variable 196 - warning: f-string-without-interpolation 48 - refactor: too-many-branches 48 - refactor: too-many-statements
1 import torch 2 import numpy as np 3 import os 4 5 l = [{'test': 0, 'test2': 1}, {'test': 3, 'test2': 4}] 6 7 print(l) 8 9 for i, j in enumerate(l): 10 print(i) 11 12 13 print(l) 14
1 - warning: unused-import 2 - warning: unused-import 3 - warning: unused-import
1 import argparse 2 import sys 3 4 import yolo 5 import dataprep 6 7 def parse_arg(): 8 parser = argparse.ArgumentParser(description='mmWave YOLOv3', add_help=True, 9 usage='''python . <action> [<args>] 10 11 Actions: 12 train Network training module 13 predict Object detection module 14 dataprep Data preprocessing module 15 ''' 16 ) 17 parser.add_argument('Action', type=str, help='Action to run') 18 19 return parser.parse_args(sys.argv[1:2]) 20 21 args = parse_arg() 22 23 if args.Action == 'train' or args.Action == 'predict': 24 yolo.main(args) 25 elif args.Action == 'dataprep': 26 dataprep.main() 27 else: 28 print('Unknown action. Check "python . --help"')
23 - refactor: consider-using-in
1 import argparse 2 import sys, gc 3 4 from .channel_extraction import chext 5 from .processing import proc 6 from .truth import truth 7 8 def parse_arg(): 9 parser = argparse.ArgumentParser(description='Data preprocessing module', add_help=True) 10 11 parser.add_argument('--pathin', type=str, required=True, 12 help="Path for the input folder") 13 parser.add_argument('--pathout', type=str, 14 help="Path for the output folder") 15 parser.add_argument('--saveprefix', type=str, 16 help="Prefix for the save file") 17 18 parser.add_argument('--chext', action='store_true', 19 help="Perform channel extraction") 20 parser.add_argument('--proc', action='store_true', 21 help="Perform signal processing (FFT and denoising)") 22 parser.add_argument('--truth', action='store_true', 23 help="Perform ground truth (clustering, tracking) bouding box calculations") 24 25 26 parser.add_argument('--objcount', type=int, default=1, 27 help="Number of objects per image (default: 1)") 28 parser.add_argument('--reso', type=int, default=416, 29 help="Input image resolution (def: 416)") 30 31 parser.add_argument('--v', type=int, default=0, 32 help="Verbose (0 minimal (def), 1 normal, 2 all") 33 34 return parser.parse_args(sys.argv[2:]) 35 36 def main(): 37 args = parse_arg() 38 39 if args.chext: 40 chext(args) 41 gc.collect() 42 if args.proc: 43 proc(args) 44 gc.collect() 45 if args.truth: 46 truth(args) 47 gc.collect()
4 - error: relative-beyond-top-level 5 - error: relative-beyond-top-level 6 - error: relative-beyond-top-level
1 files = [ "utilities.vhd", 2 "arp_types.vhd", 3 "axi_types.vhd", 4 "ipv4_types.vhd", 5 "xUDP_Common_pkg.vhdl", 6 "axi_tx_crossbar.vhd", 7 "arp_REQ.vhd", 8 "arp_RX.vhd", 9 "arp_STORE_br.vhd", 10 "arp_SYNC.vhd", 11 "arp_TX.vhd", 12 "arp.vhd", 13 "IPv4_RX.vhd", 14 "IPv4_TX.vhd", 15 "IPv4.vhd", 16 "IPv4_Complete_nomac.vhd", 17 "UDP_RX.vhd", 18 "UDP_TX.vhd", 19 "UDP_Complete_nomac.vhd", 20 "xge_mac_axi.vhd"]
Clean Code: No Issues Detected
1 action = "simulation" 2 3 include_dirs = ["./include"] 4 5 #vlog_opt = '+incdir+' + \ 6 #"../../../../../rtl/verilog/ipcores/xge_mac/include" 7 #__import__('os').path.dirname(__import__('os').path.abspath(__import__('inspect').getfile(__import__('inspect').currentframe()))) 8 #os.path.abspath(__import__('inspect').getfile(inspect.currentframe()))) 9 10 11 files = [ "./include/utils.v", 12 "./include/CRC32_D64.v", 13 "./include/CRC32_D8.v", 14 "./verilog/tx_dequeue.v", 15 "./verilog/sync_clk_core.v", 16 "./verilog/generic_fifo.v", 17 "./verilog/stats.v", 18 "./verilog/rx_hold_fifo.v", 19 "./verilog/tx_enqueue.v", 20 "./verilog/rx_dequeue.v", 21 "./verilog/sync_clk_wb.v", 22 "./verilog/tx_data_fifo.v", 23 "./verilog/fault_sm.v", 24 "./verilog/generic_mem_small.v", 25 "./verilog/wishbone_if.v", 26 "./verilog/generic_mem_medium.v", 27 "./verilog/meta_sync_single.v", 28 "./verilog/stats_sm.v", 29 "./verilog/rx_stats_fifo.v", 30 "./verilog/tx_hold_fifo.v", 31 "./verilog/rx_data_fifo.v", 32 "./verilog/xge_mac.v", 33 "./verilog/rx_enqueue.v", 34 "./verilog/generic_fifo_ctrl.v", 35 "./verilog/sync_clk_xgmii_tx.v", 36 "./verilog/tx_stats_fifo.v", 37 "./verilog/meta_sync.v" ] 38
Clean Code: No Issues Detected
1 files = [ "xaui_init.vhd", 2 "mdio/mdio.v", 3 "mdio/mdio_ctrl.vhd", 4 "vsc8486_init.vhd", 5 "clk_wiz_v3_3_0.vhd", 6 "xUDP_top.vhd", 7 __import__('os').path.relpath( __import__('os').environ.get('XILINX') ) + "/verilog/src/glbl.v" ] 8 9 modules = { "local" : [ "../../../rtl/vhdl/ipcores/xilinx/xaui"]} 10 # "../../../rtl/verilog/ipcores/xge_mac" ]} 11
Clean Code: No Issues Detected
1 action = "simulation" 2 include_dirs = [ "../../environment", "../../sequences/"] 3 4 vlog_opt = '+incdir+' + \ 5 __import__('os').environ.get('QUESTA_MVC_HOME') + '/questa_mvc_src/sv+' + \ 6 __import__('os').environ.get('QUESTA_MVC_HOME') + '/questa_mvc_src/sv/mvc_base+' + \ 7 __import__('os').environ.get('QUESTA_MVC_HOME') + '/include+' + \ 8 __import__('os').environ.get('QUESTA_MVC_HOME') + '/examples/ethernet/common+' + \ 9 __import__('os').environ.get('QUESTA_MVC_HOME') + '/questa_mvc_src/sv/ethernet/ ' 10 11 top_module = "top" 12 sim_tool = "modelsim" 13 14 files = ["src/genericTest.sv"] 15 16 modules = { "local" : [ "../../../../../syn/xilinx/src", 17 "../../../../../rtl/verilog/ipcores/xge_mac/" ] }
Clean Code: No Issues Detected
1 files = [ "./xaui_v10_4.vhd", 2 "./xaui_v10_4/simulation/demo_tb.vhd", 3 "./xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper_gtx.vhd", 4 "./xaui_v10_4/example_design/xaui_v10_4_example_design.vhd", 5 "./xaui_v10_4/example_design/xaui_v10_4_tx_sync.vhd", 6 "./xaui_v10_4/example_design/xaui_v10_4_gtx_wrapper.vhd", 7 "./xaui_v10_4/example_design/xaui_v10_4_block.vhd", 8 "./xaui_v10_4/example_design/xaui_v10_4_chanbond_monitor.vhd" ]
Clean Code: No Issues Detected
1 import ConfigParser 2 import pymongo as pm 3 from datetime import datetime 4 import numpy as np 5 import importlib 6 import sys 7 sys.path.insert(0,'/var/www/Backend/Backend/') 8 9 def readConfigFile(): 10 """ 11 Reading the setting file to use. 12 Different setting files are used on Production and Test robo brain 13 """ 14 15 global setfile 16 config = ConfigParser.ConfigParser() 17 config.read('/tmp/backend_uwsgi_setting') 18 env = config.get('uwsgi','env') 19 setting_file_name = env.strip().split('.')[1] 20 setfile = importlib.import_module(setting_file_name) 21 22 def establishConnection(): 23 """ 24 Establishes connection to remote db 25 """ 26 27 global brain_feeds, viewer_feeds 28 client = pm.MongoClient(host,port) 29 db = client[dbname] 30 brain_feeds = db['brain_feeds'] 31 viewer_feeds = db['viewer_feeds'] 32 33 def viewerFeedsUpdate(): 34 """ 35 Sorts Brain Feeds on Basis of score and pushes them to ViewerFeeds table 36 """ 37 38 feeds_ordered = brain_feeds.find().sort('score',pm.DESCENDING) 39 overall_counter = 0 40 feeds_to_push = [] 41 first_time = True 42 43 for feeds in feeds_ordered: 44 try: 45 new_feed = {} 46 new_feed['_id'] = overall_counter 47 new_feed['feedid'] = feeds['_id'].__str__() 48 feeds_to_push.append(new_feed) 49 overall_counter += 1 50 print "{0} {1} {2}".format(overall_counter,feeds['score'],feeds['source_url']) 51 if overall_counter % 100 == 0: 52 if first_time: 53 viewer_feeds.drop() 54 first_time = False 55 viewer_feeds.insert(feeds_to_push) 56 feeds_to_push = [] 57 except: 58 print "**************skipping*************" 59 60 def viewerFeedsUpdate_deprecated(): 61 """ 62 DEPRECATED 63 Equally represent each project 64 """ 65 different_projects = brain_feeds.distinct('source_url') 66 different_projects = sorted(different_projects,key=len) 67 feeds_each_project = {} 68 feeds_count = {} 69 for url in different_projects: 70 feeds_each_project[url] = brain_feeds.find({'source_url':url},{'created_at':1}).sort('created_at',pm.DESCENDING) 71 feeds_count[url] = feeds_each_project[url].count() 72 73 feeds_to_push = [] 74 overall_counter = 0 75 level = 0 76 first_time = True 77 while True: 78 toBreak = True 79 remaining_projects = [] 80 for url in different_projects: 81 if feeds_count[url] > level: 82 print url 83 new_feed = {} 84 new_feed['_id'] = overall_counter 85 new_feed['feedid'] = feeds_each_project[url][level]['_id'].__str__() 86 feeds_to_push.append(new_feed) 87 overall_counter += 1 88 remaining_projects.append(url) 89 toBreak = False 90 if overall_counter % 100 == 0: 91 if first_time: 92 viewer_feeds.drop() 93 first_time = False 94 viewer_feeds.insert(feeds_to_push) 95 feeds_to_push = [] 96 different_projects = remaining_projects 97 98 if toBreak: 99 break 100 101 102 level += 1 103 104 if __name__=="__main__": 105 global host, dbname, port, setfile, brain_feeds, viewer_feeds 106 107 # Reading the setting file for db address 108 readConfigFile() 109 host = setfile.DATABASES['default']['HOST'] 110 dbname = setfile.DATABASES['default']['NAME'] 111 port = int(setfile.DATABASES['default']['PORT']) 112 113 # Extablishing connection to remote db 114 establishConnection() 115 116 viewerFeedsUpdate()
50 - error: syntax-error
1 from django.conf.urls import patterns, url 2 from feed import views 3 4 urlpatterns = patterns('', 5 url(r'most_recent/', views.return_top_k_feeds, name='most_recent'), 6 url(r'infinite_scroll/', views.infinite_scrolling, name='infinite_scrolling'), 7 url(r'filter/', views.filter_feeds_with_hashtags, name='filter'), 8 url(r'filter_type/', views.filter_feeds_with_type, name='filter_type'), 9 url(r'since/', views.return_feeds_since, name='since'), 10 url(r'upvotes/', views.upvotes_recorder, name='upvotes'), 11 url(r'graph_feedback/', views.save_graph_feedback, name='graph_feedback'), 12 )
Clean Code: No Issues Detected
1 from django.db import models 2 from djangotoolbox.fields import ListField 3 from datetime import datetime 4 from django.db.models.signals import post_save 5 from queue_util import add_feed_to_queue 6 #from feed.models import BrainFeeds 7 8 class GraphFeedback(models.Model): 9 id_node = models.TextField() 10 feedback_type = models.TextField() 11 node_handle = models.TextField() 12 action_type = models.TextField() 13 14 def to_json(self): 15 return {"_id":self.id, 16 "id_node":self.id_node, 17 "feedback_type":self.feedback_type, 18 "node_handle":self.node_handle, 19 "action_type":self.action_type 20 } 21 22 class Meta: 23 db_table = "graph_feedback" 24 25 class BrainFeeds(models.Model): 26 toshow = models.BooleanField(default=True) 27 feedtype = models.TextField() #originally feedtype -> type 28 text = models.TextField() 29 source_text = models.TextField() 30 source_url = models.TextField(db_index=True) 31 meta = {'indexes':['source_url']} 32 media = ListField() 33 mediatype = ListField() 34 created_at = models.DateTimeField(default=datetime.now()) 35 hashtags = models.TextField(db_index=True) 36 meta = {'indexes':['hashtags']} 37 upvotes = models.IntegerField(default=0) 38 downvotes = models.IntegerField(default=0) 39 jsonfeed_id = models.TextField() 40 username = models.TextField() 41 score = models.FloatField(default=0.0,db_index=True) 42 meta = {'indexes':['score']} 43 update_score = models.BooleanField(default=True,db_index=True) 44 meta = {'indexes':['update_score']} 45 log_normalized_feed_show = models.FloatField(default=1.0) 46 47 48 def to_json(self): 49 return {"_id":self.id, 50 "toshow":self.toshow, 51 "feedtype":self.feedtype, 52 "text":self.text, 53 "source_text":self.source_text, 54 "source_url":self.source_url, 55 "media":self.media, 56 "mediatype":self.mediatype, 57 "created_at":self.created_at.isoformat(), 58 "hashtags":self.hashtags, 59 "upvotes":self.upvotes, 60 "downvotes":self.downvotes, 61 "jsonfeed_id":self.jsonfeed_id, 62 "username":self.username, 63 "score":self.score, 64 "log_normalized_feed_show":self.log_normalized_feed_show, 65 "update_score":self.update_score 66 } 67 68 class Meta: 69 db_table = 'brain_feeds' 70 get_latest_by = 'created_at' 71 72 73 class JsonFeeds(models.Model): 74 feedtype = models.TextField() #originally feedtype -> type 75 text = models.TextField() 76 source_text = models.TextField() 77 source_url = models.TextField() 78 mediashow = ListField() 79 media = ListField() 80 mediatype = ListField() 81 mediamap = ListField() 82 keywords = ListField() 83 graphStructure = ListField() 84 85 created_at = models.DateTimeField() 86 hashtags = models.TextField(default=datetime.now, blank=True) 87 meta = {'indexes':['hashtags']} 88 upvotes = models.IntegerField(default=0) 89 downvotes = models.IntegerField(default=0) 90 username = models.TextField() 91 92 def to_json(self): 93 return {"_id":self.id, 94 "feedtype":self.feedtype, 95 "text":self.text, 96 "source_text":self.source_text, 97 "source_url":self.source_url, 98 "mediashow":self.mediashow, 99 "media":self.media, 100 "mediatype":self.mediatype, 101 "mediamap":self.mediamap, 102 "keywords":self.keywords, 103 "graphStructure":self.graphStructure, 104 "created_at":self.created_at.isoformat(), 105 "hashtags":self.hashtags, 106 "upvotes":self.upvotes, 107 "downvotes":self.downvotes, 108 "username":self.username 109 } 110 111 class Meta: 112 db_table = 'json_feeds' 113 114 def postSaveJson(**kwargs): 115 instance = kwargs.get('instance') 116 print "Post Saving JsonFeed: ", instance.to_json() 117 add_feed_to_queue(instance.to_json()) 118 119 #Saving JsonFeed to BrainFeed 120 brain_feed = BrainFeeds( 121 feedtype=instance.feedtype, 122 text=instance.text, 123 source_text=instance.source_text, 124 source_url=instance.source_url, 125 hashtags=instance.hashtags, 126 jsonfeed_id=instance.id, 127 username=instance.username 128 ) 129 130 media = [] 131 mediatype = [] 132 133 for mediashow,_media,_mediatype in zip(instance.mediashow,instance.media,instance.mediatype): 134 if mediashow.lower() == 'true': 135 media.append(_media) 136 mediatype.append(_mediatype) 137 brain_feed.media = media 138 brain_feed.mediatype = mediatype 139 brain_feed.save() 140 141 142 #Saving viewer feed 143 """ 144 numitem = ViewerFeed.objects.all().count() 145 viewer_feed = ViewerFeed( 146 id = numitem, 147 feedid = brain_feed.id 148 ) 149 viewer_feed.save() 150 """ 151 #Saving JsonFeed to GraphDB 152 153 post_save.connect(postSaveJson, JsonFeeds) 154 155 class ViewerFeed(models.Model): 156 feedid = models.TextField() 157 id = models.IntegerField(db_index=True,primary_key=True) 158 meta = {'indexes':['id']} 159 160 def to_json(self): 161 return {"_id":self.id,"id":self.id,"feedid":self.feedid} 162 163 class Meta: 164 db_table = 'viewer_feeds'
116 - error: syntax-error
1 #!/usr/bin/python 2 3 import boto 4 import json 5 import traceback 6 from boto.sqs.message import RawMessage 7 from bson import json_util 8 9 conn = boto.sqs.connect_to_region( 10 "us-west-2", 11 aws_access_key_id='AKIAIDKZIEN24AUR7CJA', 12 aws_secret_access_key='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') 13 14 feed_queue = conn.create_queue('feed_queue') 15 16 def add_feed_to_queue(json_feed): 17 m = RawMessage() 18 try: 19 m.set_body(json.dumps(json_feed, default=json_util.default)) 20 feed_queue.write(m) 21 except Exception, e: 22 print traceback.format_exc() 23 print json_feed 24 25 if __name__ == '__main__': 26 add_feed_to_queue({ 27 "username" : "arzav", 28 "_id": "546e6a2f5caae434656bbc36", 29 "feedtype" : "", 30 "mediashow" : [ ], 31 "text" : "#Simhat_Torah is a synonym of #Rejoicing_in_the_Law", 32 "hashtags" : " simhat_torah rejoicing_in_the_law", 33 "mediatype" : [ ], 34 "source_url" : "http://wordnet.princeton.edu/", 35 "source_text" : "WordNet", 36 "mediamap" : [ ], 37 "media" : [ ], 38 "keywords": ["Simhat_Torah","Rejoicing_in_the_Law","synonym","wordnet"], 39 "upvotes" : 0, 40 "graphStructure": ["#same_synset: #0 -> #1", "#same_synset: #1 -> #0"]})
21 - error: syntax-error
1 from django.conf.urls import patterns, url 2 from rest_framework.urlpatterns import format_suffix_patterns 3 4 urlpatterns = patterns('rest_api.views', 5 url(r'^feeds/$', 'feed_list'), 6 #url(r'^snippets/(?P<pk>[0-9]+)$', 'snippet_detail'), 7 ) 8 9 urlpatterns = format_suffix_patterns(urlpatterns)
Clean Code: No Issues Detected
1 from django.conf.urls import patterns, url 2 import auth 3 4 urlpatterns = patterns('', 5 url(r'create_user/', auth.create_user_rb, name='create_user'), 6 url(r'login/', auth.login_rb, name='login'), 7 url(r'logout/', auth.logout_rb, name='logout') 8 )
Clean Code: No Issues Detected
1 from __future__ import with_statement 2 from fabric.api import cd, env, local, settings, run, sudo 3 from fabric.colors import green, red 4 from fabric.contrib.console import confirm 5 6 def prod_deploy(user='ubuntu'): 7 print(red('Deploying to production at robobrain.me...')) 8 if not confirm('Are you sure you want to deploy to production?'): 9 print(red('Aborting deploy.')) 10 env.host_string = '54.149.21.165' 11 env.key_filename = 'conf/www.pem' 12 env.user = user 13 env.shell = '/bin/zsh -l -c' 14 with cd('/var/www/Backend'): 15 # sudo('su - ubuntu') 16 print(green('Checking out test...')) 17 run('git checkout test') 18 print(green('Pulling latest version of test...')) 19 run('git pull origin test') 20 print(green('Checking out production...')) 21 run('git checkout production') 22 print(green('Rebasing onto test...')) 23 run('git rebase test') 24 print(green('Pushing production upstream...')) 25 run('git push origin production') 26 print(green('Reloading server...')) 27 sudo('uwsgi --reload /tmp/robobrain-master.pid') 28 print(red('Done!')) 29 30 def test_deploy(user='ubuntu'): 31 env.host_string = '54.148.225.192' 32 env.key_filename = 'conf/www.pem' 33 env.user = user 34 env.shell = '/bin/zsh -l -c' 35 print(red('Deploying to test at test.robobrain.me...')) 36 with cd('/var/www/Backend'): 37 print(green('Checking out master...')) 38 run('git checkout master') 39 print(green('Pulling latest version of master...')) 40 run('git pull origin master') 41 print(green('Checking out test...')) 42 run('git checkout test') 43 print(green('Rebasing onto master...')) 44 run('git rebase master') 45 print(green('Pulling latest version of test...')) 46 run('git pull origin test') 47 print(green('Push the latest version of test...')) 48 run('git push origin test') 49 print(green('Reloading server...')) 50 sudo('uwsgi --reload /tmp/robobrain-master.pid') 51 print(red('Done!'))
7 - warning: bad-indentation 8 - warning: bad-indentation 9 - warning: bad-indentation 10 - warning: bad-indentation 11 - warning: bad-indentation 12 - warning: bad-indentation 13 - warning: bad-indentation 14 - warning: bad-indentation 16 - warning: bad-indentation 17 - warning: bad-indentation 18 - warning: bad-indentation 19 - warning: bad-indentation 20 - warning: bad-indentation 21 - warning: bad-indentation 22 - warning: bad-indentation 23 - warning: bad-indentation 24 - warning: bad-indentation 25 - warning: bad-indentation 26 - warning: bad-indentation 27 - warning: bad-indentation 28 - warning: bad-indentation 31 - warning: bad-indentation 32 - warning: bad-indentation 33 - warning: bad-indentation 34 - warning: bad-indentation 35 - warning: bad-indentation 36 - warning: bad-indentation 37 - warning: bad-indentation 38 - warning: bad-indentation 39 - warning: bad-indentation 40 - warning: bad-indentation 41 - warning: bad-indentation 42 - warning: bad-indentation 43 - warning: bad-indentation 44 - warning: bad-indentation 45 - warning: bad-indentation 46 - warning: bad-indentation 47 - warning: bad-indentation 48 - warning: bad-indentation 49 - warning: bad-indentation 50 - warning: bad-indentation 51 - warning: bad-indentation 2 - warning: unused-import 2 - warning: unused-import
1 # Create your views here. 2 from rest_framework import status 3 from rest_framework.decorators import api_view 4 from rest_framework.response import Response 5 from feed.models import JsonFeeds 6 from rest_api.serializer import FeedSerializer 7 from datetime import datetime 8 from rest_framework import permissions 9 10 11 @api_view(['GET', 'POST']) 12 def feed_list(request): 13 #List all snippets, or create a new snippet. 14 if request.method == 'GET': 15 feeds = JsonFeeds.objects.all()[:25] 16 serializer = FeedSerializer(feeds, many=True) 17 return Response(serializer.data) 18 19 elif request.method == 'POST': 20 serializer = FeedSerializer(data=request.DATA) 21 if serializer.is_valid(): 22 serializer.save() 23 return Response(serializer.data, status=status.HTTP_201_CREATED) 24 return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) 25 26 27
14 - refactor: no-else-return 12 - refactor: inconsistent-return-statements 7 - warning: unused-import 8 - warning: unused-import
1 from django.http import HttpResponse 2 import json 3 from django.contrib.auth.models import User 4 from django.views.decorators.csrf import ensure_csrf_cookie 5 from django import forms 6 from django.contrib.auth import login, logout 7 from django.contrib.auth import authenticate 8 from base64 import b64decode 9 10 11 @ensure_csrf_cookie 12 def create_user_rb(request): 13 if request.method == 'GET': 14 return HttpResponse('Ok') 15 elif request.method == 'POST': 16 payload = json.loads(request.body) 17 username = payload['username'] 18 email = payload['email'] 19 password = payload['password'] 20 if email and User.objects.filter(email=email).exclude(username=username).count(): 21 return HttpResponse('This email address is already in use! Try logging in.', status=401) 22 if email and User.objects.filter(email=email, username=username).count(): 23 return HttpResponse('This account already exists! Try logging in.', status=401) 24 user = User.objects.create_user(username, email, password) 25 user.save() 26 return HttpResponse('Ok') 27 28 @ensure_csrf_cookie 29 def login_rb(request): 30 if request.user.is_authenticated(): 31 user = request.user 32 user_data = { 33 'id': user.id, 34 'username': user.username, 35 'email': user.email, 36 'loggedin': 'True' 37 }; 38 return HttpResponse(json.dumps(user_data), content_type='application/json') 39 if request.method == 'GET': 40 return HttpResponse('Ok') 41 elif request.method == 'POST': 42 decodedCredentials = b64decode(request.body) 43 if not ':' in decodedCredentials: 44 return HttpResponse('Not logged in', status=401) 45 email, password = decodedCredentials.split(':') 46 user = authenticateEmail(email, password) 47 if not user: 48 return HttpResponse('Invalid Credentials', status=401) 49 user = authenticate(username=user.username, password=password) 50 if not user: 51 return HttpResponse('Invalid Credentials', status=401) 52 login(request, user) 53 54 user_data = { 55 'id': user.id, 56 'username': user.username, 57 'email': user.email 58 }; 59 return HttpResponse(json.dumps(user_data), content_type='application/json') 60 61 def authenticateEmail(email=None, password=None): 62 try: 63 user = User.objects.get(email=email) 64 if user.check_password(password): 65 return user 66 except User.DoesNotExist: 67 return None 68 69 def logout_rb(request): 70 logout(request) 71 return HttpResponse('Logged Out')
13 - warning: bad-indentation 14 - warning: bad-indentation 15 - warning: bad-indentation 16 - warning: bad-indentation 17 - warning: bad-indentation 18 - warning: bad-indentation 19 - warning: bad-indentation 20 - warning: bad-indentation 21 - warning: bad-indentation 22 - warning: bad-indentation 23 - warning: bad-indentation 24 - warning: bad-indentation 25 - warning: bad-indentation 26 - warning: bad-indentation 30 - warning: bad-indentation 31 - warning: bad-indentation 32 - warning: bad-indentation 37 - warning: unnecessary-semicolon 38 - warning: bad-indentation 39 - warning: bad-indentation 40 - warning: bad-indentation 41 - warning: bad-indentation 42 - warning: bad-indentation 43 - warning: bad-indentation 44 - warning: bad-indentation 45 - warning: bad-indentation 46 - warning: bad-indentation 47 - warning: bad-indentation 48 - warning: bad-indentation 49 - warning: bad-indentation 50 - warning: bad-indentation 51 - warning: bad-indentation 52 - warning: bad-indentation 54 - warning: bad-indentation 58 - warning: unnecessary-semicolon 59 - warning: bad-indentation 62 - warning: bad-indentation 63 - warning: bad-indentation 64 - warning: bad-indentation 65 - warning: bad-indentation 66 - warning: bad-indentation 67 - warning: bad-indentation 70 - warning: bad-indentation 71 - warning: bad-indentation 13 - refactor: no-else-return 12 - refactor: inconsistent-return-statements 39 - refactor: no-else-return 29 - refactor: inconsistent-return-statements 61 - refactor: inconsistent-return-statements 5 - warning: unused-import
1 from django.forms import widgets 2 from rest_framework import serializers 3 from feed.models import JsonFeeds 4 from djangotoolbox.fields import ListField 5 6 import drf_compound_fields.fields as drf 7 from datetime import datetime 8 9 class TagFieldS(serializers.Serializer): 10 media = serializers.CharField(required=False) 11 12 13 class FeedSerializer(serializers.Serializer): 14 pk = serializers.Field() # Note: `Field` is an untyped read-only field. 15 feedtype = serializers.CharField(required=False) 16 text = serializers.CharField(required=False) 17 source_text = serializers.CharField(required=False) 18 source_url = serializers.CharField(required=False) 19 hashtags = serializers.CharField(required=False) 20 created_at = serializers.DateTimeField(required=False) 21 upvotes = serializers.IntegerField(required=False) 22 media = drf.ListField(serializers.CharField(),required=False)# serializers.CharField(required=False,many=True) 23 mediamap = drf.ListField(serializers.CharField(),required=False) 24 mediatype = drf.ListField(serializers.CharField(),required=False) 25 keywords = drf.ListField(serializers.CharField(),required=False) 26 graphStructure = drf.ListField(serializers.CharField(),required=False) 27 mediashow = drf.ListField(serializers.CharField(),required=False) 28 username = serializers.CharField(required=False) 29 30 def restore_object(self, attrs, instance=None): 31 """ 32 Create or update a new snippet instance, given a dictionary 33 of deserialized field values. 34 35 Note that if we don't define this method, then deserializing 36 data will simply return a dictionary of items. 37 """ 38 if instance: 39 # Update existing instance 40 #instance.feedtype = attrs.get('feedtype', instance.feedtype) 41 #instance.code = attrs.get('code', instance.code) 42 #instance.linenos = attrs.get('linenos', instance.linenos) 43 #instance.language = attrs.get('language', instance.language) 44 #instance.style = attrs.get('style', instance.style) 45 return instance 46 47 # Create new instance 48 attrs['created_at']=datetime.now() 49 return JsonFeeds(**attrs)
9 - refactor: too-few-public-methods 13 - refactor: too-few-public-methods 1 - warning: unused-import 4 - warning: unused-import
1 from django.db import models 2 from django_sputnik_maps.fields import AddressField 3 4 # all fields must be present in the model 5 class SampleModel(models.Model): 6 region = models.CharField(max_length=100) 7 place = models.CharField(max_length=100) 8 street = models.CharField(max_length=100) 9 house = models.IntegerField() 10 lat = models.FloatField() 11 lon = models.FloatField() 12 address = AddressField(max_length=200) 13
5 - refactor: too-few-public-methods
1 from django.conf import settings 2 from django.forms import widgets 3 4 5 class AddressWidget(widgets.TextInput): 6 '''a map will be drawn after the address field''' 7 template_name = 'django_sputnik_maps/widgets/mapwidget.html' 8 9 class Media: 10 css = { 11 'all': ('https://unpkg.com/leaflet@1.0.1/dist/leaflet.css', 12 settings.STATIC_URL + 'django_sputnik_maps/css/jquery-ui.min.css', 13 settings.STATIC_URL + 'django_sputnik_maps/css/base.css',) 14 15 } 16 js=( 17 "https://unpkg.com/leaflet@1.0.1/dist/leaflet.js", 18 settings.STATIC_URL + 'django_sputnik_maps/js/base.js', 19 settings.STATIC_URL + 'django_sputnik_maps/js/jquery-3.5.1.js', 20 settings.STATIC_URL + 'django_sputnik_maps/js/jquery-ui.min.js', 21 ) 22
9 - refactor: too-few-public-methods 5 - refactor: too-few-public-methods
1 # from django.db import models 2 from django.contrib import admin 3 from django_sputnik_maps.fields import AddressField 4 from django_sputnik_maps.widgets import AddressWidget 5 6 from .models import SampleModel 7 8 9 @admin.register(SampleModel) 10 class SampleModelAdmin(admin.ModelAdmin): 11 formfield_overrides = { 12 AddressField: { 13 'widget': AddressWidget 14 } 15 } 16
6 - error: relative-beyond-top-level 10 - refactor: too-few-public-methods
1 from django.db import models 2 3 4 class AddressField(models.CharField): 5 pass
4 - refactor: too-few-public-methods
1 from django.apps import AppConfig 2 3 4 class DjangoSputnikMapsConfig(AppConfig): 5 name = 'django_sputnik_maps'
4 - refactor: too-few-public-methods
1 from .widgets import AddressWidget
1 - error: relative-beyond-top-level 1 - warning: unused-import
1 import os 2 from flask import Flask, render_template, redirect, request, url_for 3 from flask_pymongo import PyMongo 4 from bson.objectid import ObjectId 5 6 from os import path 7 if path.exists("env.py"): 8 import env 9 10 MONGO_URI = os.environ.get("MONGO_URI") 11 12 app = Flask(__name__) 13 app.config["MONGO_DBNAME"] = 'quiz_questions' 14 app.config["MONGO_URI"] = MONGO_URI 15 16 17 mongo = PyMongo(app) 18 19 # Route for Home Page 20 21 22 @app.route('/') 23 @app.route('/get_questions') 24 def get_questions(): 25 return render_template("question_and_answer.html", 26 question_and_answer=mongo.db.question_and_answer.find()) 27 28 # Route to Add a Question 29 30 31 @app.route('/add_question') 32 def add_question(): 33 return render_template('addquestion.html', 34 categories=mongo.db.categories.find()) 35 36 # Route to Insert Question 37 38 39 @app.route('/insert_question', methods=['POST']) 40 def insert_question(): 41 question_and_answer = mongo.db.question_and_answer 42 question_and_answer.insert_one(request.form.to_dict()) 43 return redirect(url_for('get_questions')) 44 45 # Route to Edit Question 46 47 48 @app.route('/edit_question/<question_and_answer_id>') 49 def edit_question(question_and_answer_id): 50 the_question = mongo.db.question_and_answer.find_one( 51 {"_id": ObjectId(question_and_answer_id)}) 52 all_categories = mongo.db.categories.find() 53 return render_template('editquestion.html', 54 question_and_answer=the_question, 55 categories=all_categories) 56 57 # Route to Update Question 58 59 60 @app.route('/update_question/<question_and_answer_id>', methods=['POST']) 61 def update_question(question_and_answer_id): 62 question_and_answer = mongo.db.question_and_answer 63 question_and_answer.update({'_id': ObjectId(question_and_answer_id)}, 64 { 65 'category_name': request.form.get('category_name'), 66 'question': request.form.get('question'), 67 'answer': request.form.get('answer') 68 }) 69 return redirect(url_for('get_questions')) 70 71 # Route to Delete Question 72 73 74 @app.route('/delete_question/<question_and_answer_id>') 75 def delete_question(question_and_answer_id): 76 mongo.db.question_and_answer.remove( 77 {'_id': ObjectId(question_and_answer_id)}) 78 return redirect(url_for('get_questions')) 79 80 # Route for Shop Link 81 82 83 @app.route('/shop') 84 def get_shop(): 85 return render_template("shop.html") 86 87 # Route for Under Construction Link 88 89 90 @app.route('/under_construction') 91 def get_under_construction(): 92 return render_template("under_construction.html") 93 94 # Route for General Knowledge category 95 96 97 @app.route('/get_general_knowledge') 98 def get_general_knowledge(): 99 question_and_answer = list(mongo.db.question_and_answer.find( 100 {'category_name': 'General Knowledge'})) 101 return render_template("categories.html", 102 question_and_answer=question_and_answer) 103 104 # Route for Geography category 105 106 107 @app.route('/get_geography') 108 def get_geography(): 109 question_and_answer = list( 110 mongo.db.question_and_answer.find({'category_name': 'Geography'})) 111 return render_template("categories.html", 112 question_and_answer=question_and_answer) 113 114 # Route for History category 115 116 117 @app.route('/get_history') 118 def get_history(): 119 question_and_answer = list( 120 mongo.db.question_and_answer.find({'category_name': 'History'})) 121 return render_template("categories.html", 122 question_and_answer=question_and_answer) 123 124 # Route for Music category 125 126 127 @app.route('/get_music') 128 def get_music(): 129 question_and_answer = list( 130 mongo.db.question_and_answer.find({'category_name': 'Music'})) 131 return render_template("categories.html", 132 question_and_answer=question_and_answer) 133 134 # Route for Politics category 135 136 137 @app.route('/get_politics') 138 def get_politics(): 139 question_and_answer = list( 140 mongo.db.question_and_answer.find({'category_name': 'Politics'})) 141 return render_template("categories.html", 142 question_and_answer=question_and_answer) 143 144 # Route for Sports category 145 146 147 @app.route('/get_sport') 148 def get_sport(): 149 question_and_answer = list( 150 mongo.db.question_and_answer.find({'category_name': 'Sport'})) 151 return render_template("categories.html", 152 question_and_answer=question_and_answer) 153 154 # Route for TV and Film category 155 156 157 @app.route('/get_tv_and_film') 158 def get_tv_and_film(): 159 question_and_answer = list(mongo.db.question_and_answer.find({ 160 'category_name': 'TV and Film'})) 161 return render_template("categories.html", 162 question_and_answer=question_and_answer) 163 164 165 if __name__ == '__main__': 166 app.run(host=os.environ.get('IP'), 167 port=int(os.environ.get('PORT')), 168 debug=True)
8 - warning: unused-import
1 import unittest 2 from src import footer_pagination 3 4 5 class SimpleTests(unittest.TestCase): 6 7 def test_beginning_pages(self): 8 """Test the initial status of the set of pages in the beginning 9 10 """ 11 self.assertSequenceEqual((1, 1), footer_pagination.init_beginning_pages(5, 1)) 12 13 def test_end_pages(self): 14 """Test the initial status of the set of pages in the end pages 15 16 """ 17 self.assertSequenceEqual((5, 5), footer_pagination.init_end_pages(5, 1)) 18 19 def test_around_pages(self): 20 """Test the initial status of the set of around pages 21 22 """ 23 self.assertSequenceEqual((4, 4), footer_pagination.init_around_pages(4, 0, 5)) 24 25 def test_overlapping_pages(self): 26 """Test overlapping sets of pages 27 28 """ 29 self.assertTrue(footer_pagination.are_overlapping_pages((1, 3), (2, 4))) 30 31 def test_not_overlapping_pages(self): 32 """Test not overlapping sets of pages 33 34 """ 35 self.assertFalse(footer_pagination.are_overlapping_pages((1, 3), (6, 7))) 36 37 def test_merge_pages(self): 38 """Tests merging of two overlapping sets of pages 39 40 """ 41 self.assertSequenceEqual((1, 4), footer_pagination.merge_pages((1, 3), (2, 4))) 42 43 def test_update_overlap_pages(self): 44 """Test the update of two sets of pages that overlap 45 46 """ 47 self.assertSequenceEqual(((1, 4), None), footer_pagination.update_pages((1, 3), (2, 4))) 48 49 def test_update_not_overlap_pages(self): 50 """Test the update of two sets of pages that do not overlap 51 52 """ 53 self.assertSequenceEqual(((1, 3), (6, 7)), footer_pagination.update_pages((1, 3), (6, 7))) 54 55 def test_find_first_page(self): 56 """Test if the first page is contained in the sets of pages 57 58 """ 59 self.assertTrue(footer_pagination.find_page([(1, 2), (3, 5), None], 1)) 60 61 def test_not_find_first_page(self): 62 """Test if the first page is contained in the sets of pages 63 64 """ 65 self.assertFalse(footer_pagination.find_page([(2, 3), (4, 5), None], 1)) 66 67 def test_exist_remaining_pages(self): 68 """Test when two sets of pages have remaining pages between them 69 70 """ 71 self.assertTrue(footer_pagination.exist_remaining_pages((1, 3), (6, 7))) 72 73 def test_not_exist_remaining_pages(self): 74 """Test when two sets of pages do not have remaining pages between them 75 76 """ 77 self.assertFalse(footer_pagination.exist_remaining_pages((1, 7), (8, 9))) 78 79 80 81 def main(): 82 unittest.main() 83 84 85 if __name__ == '__main__': 86 main()
Clean Code: No Issues Detected
1 from django.contrib import admin 2 from .models import Result 3 admin.site.register(Result) 4 # Register your models here.
2 - error: relative-beyond-top-level
1 from django.urls import path 2 from .views import( 3 QuizListView, 4 quiz_view, 5 quiz_data_view, 6 save_quiz_view 7 ) 8 9 app_name = 'quizes' 10 11 urlpatterns = [ 12 path('',QuizListView.as_view(), name = 'main-view'), 13 path('<pk>/',quiz_view,name = 'quiz-view'), 14 path('<pk>/save/',save_quiz_view,name = 'save-view'), 15 path('<pk>/data/',quiz_data_view,name='quiz-data-view'), 16 ]
2 - error: relative-beyond-top-level
1 class Solution(object): 2 def max_area(self, heights): 3 """ 4 :type heights: List(int) 5 :rtype: int 6 """ 7 if not heights: 8 return 0 9 10 left = 0 11 right = len(heights) - 1 12 # calculate the area of the outer container 13 max_area = (right - left) * min(heights[left], heights[right]) 14 15 # start moving in-ward. 16 # In order to get a bigger area, the min of both left and right borders need to be higher 17 while left < right: 18 if heights[left] < heights[right]: # increment left for the possibility of finding a larger area 19 left += 1 20 else: 21 right -= 1 22 max_area = max(max_area, (right - left) * min(heights[left], heights[right])) 23 24 return max_area
1 - refactor: useless-object-inheritance 1 - refactor: too-few-public-methods
1 # 769 2 3 class Solution: 4 # a new chunk is form only if the current element is the max so far, 5 # and it is where it is supposed to be 6 def max_chunks(self, nums): 7 result = max_so_far = 0 8 9 for i, num in enumerate(nums): 10 max_so_far = max(max_so_far, num) 11 if max_so_far == i: 12 result += 1 13 14 return result
3 - refactor: too-few-public-methods
1 class Solution(object): 2 def max_consecutive_ones(self, numbers): 3 if not numbers: 4 return 0 5 6 longest, count = 0, 0 7 for num in numbers: 8 if num > 0: 9 count += 1 10 longest = max(longest, count) 11 else: 12 count = 0 13 14 return longest 15 16 solution = Solution() 17 numbers = [1, 1, 0, 1, 1, 1] 18 print(solution.max_consecutive_ones(numbers))
1 - refactor: useless-object-inheritance 2 - warning: redefined-outer-name 1 - refactor: too-few-public-methods
1 # 445 2 from utils.listNode import ListNode 3 4 class Solution: 5 def add(self, head1, head2): 6 num1 = self.listToInt(head1) 7 num2 = self.listToInt(head2) 8 9 return self.intToList(num1 + num2) 10 11 def listToInt(self, head): 12 result = 0 13 if not head: 14 return result 15 16 while head: 17 result = (result * 10) + head.value 18 head = head.next 19 20 return result 21 22 def intToList(self, num): 23 dummy = prev = ListNode(None) 24 25 for c in str(num): 26 prev.next = ListNode(int(c)) 27 prev = prev.next 28 29 return dummy.next 30 31 class Solution2: 32 def add(self, head1, head2): 33 rev1, rev2 = self.reverse(head1), self.reverse(head2) 34 35 carry = 0 36 37 total_head = total_tail = ListNode(None) 38 39 while rev1 or rev2: 40 total = carry 41 42 if rev1: 43 total += rev1.value 44 rev1 = rev1.next 45 46 if rev2: 47 total += rev2.value 48 rev2 = rev2.next 49 50 total_tail.next = ListNode(total % 10) 51 carry = total // 10 52 53 total_tail = total_tail.next 54 55 if carry: 56 total_tail.next = ListNode(carry) 57 58 return self.reverse(total_head.next) 59 60 61 def reverse(self, head): 62 if not head: 63 return head 64 65 rev = None 66 67 while head: 68 rev, rev.next, head = head, rev, head.next 69 70 return rev 71 72 one = ListNode(1) 73 two = ListNode(2) 74 three = ListNode(3) 75 four = ListNode(4) 76 five = ListNode(5) 77 six = ListNode(6) 78 seven = ListNode(7) 79 80 one.next = two 81 two.next = three 82 three.next = four 83 84 five.next = six 85 six.next = seven 86 87 print(one) 88 print(five) 89 90 solution = Solution2() 91 print(solution.add(one, five)) 92
Clean Code: No Issues Detected
1 # 729 2 3 from bisect import bisect 4 5 class Node: 6 def __init__(self, start, end): 7 self.start = start 8 self.end = end 9 self.left = self.right = None 10 11 def insert(self, node): 12 if node.start >= self.end: 13 if not self.right: 14 self.right = node 15 return True 16 17 return self.right.insert(node) 18 19 if node.end <= self.start: 20 if not self.left: 21 self.left = node 22 return True 23 24 return self.left.insert(node) 25 26 return False 27 28 # time: O(N * log N) in average cases. time needed to insert N events in the tree. Worst case: O(N**2) 29 # space: O(N) for the tree structure 30 class MyCalendar: 31 def __init__(self): 32 self.root = None 33 34 def book(self, start, end): 35 node = Node(start, end) 36 37 if not self.root: 38 self.root = node 39 return True 40 41 return self.root.insert(node) 42 43 class MyCalendar2: 44 def __init__(self): 45 self.events = [] 46 47 def book(self, start, end): 48 if start >= end: 49 raise ValueError('Start should be smaller than End') 50 51 if not self.events: 52 self.events.append((start, end)) 53 return True 54 55 start_list = list(map(lambda event: event[0], self.events)) 56 index = bisect(start_list, start) 57 58 if index == len(self.events) and self.events[-1][1] > start: 59 return False 60 61 if index == 0 and self.events[0][0] < end: 62 return False 63 64 if 0 < index < len(self.events) - 1: 65 prev, after = self.events[index - 1], self.events[index] 66 67 if prev[1] > start or after[0] < end: 68 return False 69 70 self.events.insert(index, (start, end)) 71 return True 72 73 def print_events(self): 74 print(self.events) 75 76 calendar = MyCalendar() 77 78 print(calendar.book(10, 20)) 79 print(calendar.book(15, 25)) 80 print(calendar.book(20, 30)) 81 print(calendar.book(30, 40)) 82 83 # calendar.print_events() 84 85 86 87 88
5 - refactor: too-few-public-methods 30 - refactor: too-few-public-methods
1 from utils.treeNode import TreeNode 2 from collections import defaultdict 3 4 class Solution: 5 def get_paths_count(self, node, target): 6 if not node: 7 return 0 8 sum_mapping = defaultdict(int) 9 sum_mapping[0] = 1 10 return self.helper(node, 0, target, sum_mapping) 11 12 def helper(self, node, curr_sum, target, sum_mapping): 13 if not node: 14 return 0 15 16 curr_sum += node.value 17 18 result = sum_mapping[curr_sum - target] 19 sum_mapping[curr_sum] += 1 20 21 result += self.helper(node.left, curr_sum, target, sum_mapping) 22 result += self.helper(node.right, curr_sum, target, sum_mapping) 23 24 sum_mapping[curr_sum] -= 1 25 return result 26 27 28 node1 = TreeNode(10) 29 node2 = TreeNode(5) 30 node3 = TreeNode(-3) 31 node4 = TreeNode(3) 32 node5 = TreeNode(2) 33 node6 = TreeNode(6) 34 node7 = TreeNode(11) 35 node8 = TreeNode(3) 36 node9 = TreeNode(-2) 37 node10 = TreeNode(1) 38 39 node1.left = node2 40 node1.right = node3 41 42 node2.left = node4 43 node2.right = node5 44 45 node4.left = node8 46 node4.right = node9 47 48 node5.left = node10 49 50 node3.right = node7 51 # node6.left = node7 52 53 print(node1) 54 solution = Solution() 55 print(solution.get_paths_count(node1, 8))
Clean Code: No Issues Detected
1 from collections import defaultdict 2 3 class Solution: 4 WHITE, GRAY, BLACK = 0, 1, 2 5 6 def eventually_safe_nodes(self, graph): 7 """ 8 :type graph: List[List[int]] 9 :rtype: List[int] 10 """ 11 colors = defaultdict(int) 12 13 result_set = set() 14 15 for node in range(len(graph)): 16 self.dfs(node, graph, colors, result_set) 17 18 return sorted(list(result_set)) 19 20 def dfs(self, node, graph, colors, result_set): 21 if colors[node] != self.WHITE: 22 return colors[node] == self.BLACK 23 24 colors[node] = self.GRAY 25 26 for nbr in graph[node]: 27 if colors[nbr] == self.BLACK: 28 continue 29 30 if colors[nbr] == self.GRAY or not self.dfs(nbr, graph, colors, result_set): 31 return False 32 33 colors[node] = self.BLACK 34 result_set.add(node) 35 return True 36 37 def eventually_safe_nodes2(self, graph): 38 n = len(graph) 39 out_degree = [0] * n 40 in_nodes = defaultdict(list) 41 terminales = [] 42 43 for i in range(n): 44 out_degree[i] = len(graph[i]) 45 if out_degree[i] == 0: 46 terminales.append(i) 47 48 for j in graph[i]: 49 in_nodes[j].append(i) 50 51 for term in terminales: 52 for in_node in in_nodes[term]: 53 out_degree[in_node] -= 1 54 if out_degree[in_node] == 0: 55 terminales.append(in_node) 56 57 return sorted(terminales) 58 59 60 61 62 solution = Solution() 63 graph = [[1,2],[2,3],[5],[0],[5],[],[]] 64 65 print(solution.eventually_safe_nodes2(graph))
6 - warning: redefined-outer-name 20 - warning: redefined-outer-name 37 - warning: redefined-outer-name 55 - warning: modified-iterating-list
1 from utils.treeNode import TreeNode 2 3 class Solution: 4 #binary search tree 5 def get_lca_bst(self, root, node1, node2): 6 if not node1 or not node2 or not root: 7 return None 8 9 if not root or root == node1 or root == node2: 10 return root 11 12 if (root.value - node1.value) * (root.value - node2.value) < 0: 13 return root 14 15 if root.value > node1.value: 16 return self.get_lca(root.left, node1, node2) 17 18 return self.get_lca(root.right, node1, node2) 19 20 #O(N) time and space 21 def get_lca(self, root, node1, node2): 22 if not root or root == node1 or root == node2: 23 return root 24 25 left_lca = self.get_lca(root.left, node1, node2) 26 right_lca = self.get_lca(root.right, node1, node2) 27 28 if left_lca and right_lca: 29 return root 30 31 return left_lca or right_lca
1 - warning: unused-import
1 class RangeSum: 2 def __init__(self, nums): 3 self.sums = [0 for _ in range(len(nums) + 1)] 4 for i, num in enumerate(nums): 5 self.sums[i + 1] = num + self.sums[i] 6 7 def get_range_sum(self, start, end): 8 return self.sums[end + 1] - self.sums[start] 9 10 nums = [1, 2, 3, 4, 5, 6, 7] 11 range_sum = RangeSum(nums) 12 print(range_sum.get_range_sum(1, 3))
2 - warning: redefined-outer-name 1 - refactor: too-few-public-methods
1 import unittest 2 3 class Solution(object): 4 def three_sum(self, numbers): 5 """ 6 :type numbers: List[int] 7 :rtype : List[List[int]] 8 """ 9 result = [] 10 if not numbers: 11 return result 12 numbers.sort() 13 i = 0 14 while i < len(numbers) - 2: 15 left = i + 1 16 right = len(numbers) - 1 17 while left < right: 18 triple_sum = numbers[i] + numbers[left] + numbers[right] 19 20 if triple_sum == 0: 21 result.append([numbers[i], numbers[left], numbers[right]]) 22 # move left to the next possible value 23 left += 1 24 while left < right and numbers[left] == numbers[left - 1]: 25 left += 1 26 # move right to the next possible value 27 right -= 1 28 while left < right and numbers[right] == numbers[right + 1]: 29 right -= 1 30 elif triple_sum < 0: 31 # move left to the next possible value 32 left += 1 33 while left < right and numbers[left] == numbers[left - 1]: 34 left += 1 35 else: 36 # move right to the next possible value 37 right -= 1 38 while left < right and numbers[right - 1] == numbers[right]: 39 right -= 1 40 41 # move i to the next possible value 42 i += 1 43 while i < len(numbers) - 2 and numbers[i] == numbers[i - 1]: 44 i += 1 45 46 return result 47 48 class Test(unittest.TestCase): 49 test_data = [([-1, 0, 1, 2, -1, 4], [[-1, -1, 2], [-1, 0, 1]])] 50 51 def test_three_way(self): 52 solution = Solution() 53 for data in self.test_data: 54 self.assertEqual(solution.three_sum(data[0]), data[1]) 55 56 if __name__ == '__main__': 57 unittest.main()
3 - refactor: useless-object-inheritance 3 - refactor: too-few-public-methods
1 class Solution: 2 def get_max_words(self, words, zeroes_count, ones_count): 3 memo = [[0 for _ in range(ones_count + 1)] for _ in range(zeroes_count + 1)] 4 5 for word in words: 6 zeroes = sum([True for c in word if c == '0']) 7 ones = len(word) - zeroes 8 9 for i in range(zeroes_count + 1): 10 for j in range(ones_count + 1): 11 can_build = i >= zeroes and j >= ones 12 if can_build: 13 memo[i][j] = max(memo[i][j], 1 + memo[i - zeroes][j - ones]) 14 15 return memo[-1][-1] 16 17 words = ["10", "0001", "111001", "1", "0"] 18 solution = Solution() 19 print(solution.get_max_words(words, 5, 3))
2 - warning: redefined-outer-name 6 - refactor: consider-using-generator 1 - refactor: too-few-public-methods
1 class Solution: 2 def get_min(self, nums): 3 if not nums: 4 return None 5 6 left, right = 0, len(nums) - 1 7 while left < right: 8 if nums[left] <= nums[right]: # not rotated 9 break 10 mid = (left + right) // 2 11 if nums[mid] < nums[left]: # min must be on the left of mid or mid 12 right = mid 13 else: # min must be on the right of mid 14 left = mid + 1 15 16 return nums[left] 17 18 solution = Solution() 19 nums = [7,0,1,2,3,4,5,6] 20 # nums = [4,5,6, 7, 8, 1, 2, 3] 21 print(solution.get_min(nums))
2 - warning: redefined-outer-name 1 - refactor: too-few-public-methods
1 # 495 2 3 # time: O(n) 4 # space: O(1) 5 class Solution: 6 def find_poisoned_duration(self, timeSeries, duration): 7 result = 0 8 if not timeSeries: 9 return result 10 11 timeSeries.append(float('inf')) 12 13 for i in range(1, len(timeSeries)): 14 result += min(timeSeries[i] - timeSeries[i - 1], duration) 15 16 return result 17 18 # time: O(with of window * number of attacks) 19 # space: O(1) 20 class Solution2: 21 def find_poisoned_duration(self, timeSeries, duration): 22 result = 0 23 if not timeSeries: 24 return result 25 26 temp_poison = 0 27 28 for i in range(timeSeries[0], timeSeries[-1] + 1): 29 if i in timeSeries: 30 temp_poison = duration 31 32 if temp_poison: 33 result += 1 34 temp_poison -= 1 35 36 result += temp_poison 37 38 return result 39 40 solution = Solution() 41 print(solution.find_poisoned_duration([1], 2)) 42 print(solution.find_poisoned_duration([1], 2))
5 - refactor: too-few-public-methods 20 - refactor: too-few-public-methods
1 # This bidirectional technique is useful when we want to get cumulutative information 2 # from the left and right of each index 3 class Solution: 4 def get_product_array(self, nums): 5 if not nums: 6 return None 7 result = [1] 8 for i in range(1, len(nums)): 9 result.append(result[-1] * nums[i - 1]) 10 11 product_right = 1 12 for i in range(len(nums) - 1, -1, -1): 13 result[i] *= product_right 14 product_right *= nums[i] 15 16 return result 17 18 solution = Solution() 19 nums = [2, 3, 4, 5] 20 [60, 40, 30, 24] 21 60 22 print(solution.get_product_array(nums))
4 - warning: redefined-outer-name 3 - refactor: too-few-public-methods 20 - warning: pointless-statement 21 - warning: pointless-statement
1 class Solution: 2 def is_valid_bst(self, node): 3 return self.helper(node, float('-inf'), float('inf')) 4 5 def helper(self, node, min_value, max_value): 6 if not node: 7 return True 8 9 if node.value < min_value or node.value > max_value: 10 return False 11 12 return ( 13 self.helper(node.left, min_value, node.value) 14 and self.helper(node.right, node.value, max_value) 15 ) 16 17 def is_valid_bst2(self, node): 18 self.is_valid = True 19 self.prev_value = float('-inf') 20 self.in_order(node) 21 return self.is_valid 22 23 def in_order(self, node): 24 if not node or not self.is_valid: 25 return 26 27 self.in_order(node.left) 28 29 if node.value <= self.prev_value: 30 self.is_valid = False 31 return 32 self.prev_value = node.value 33 34 self.in_order(node.right)
18 - warning: attribute-defined-outside-init 30 - warning: attribute-defined-outside-init 19 - warning: attribute-defined-outside-init 32 - warning: attribute-defined-outside-init
1 import unittest 2 3 # Time: O(N), Space: O(1) 4 5 def is_palyndrome(str): 6 if not str: return True 7 head = 0 8 tail = len(str) - 1 9 10 while head < tail: 11 # only compare digits and alphabetical values 12 if not str[head].isdigit() and not str[head].isalpha(): 13 head += 1 14 elif not str[tail].isdigit() and not str[tail].isalpha(): 15 tail -= 1 16 else: 17 if str[head].lower() != str[tail].lower(): 18 return False 19 20 head += 1 21 tail -= 1 22 23 return True 24 25 class Test(unittest.TestCase): 26 27 data = [('A man, A plan, a canal: Panama', True), ('abab', False)] 28 29 def test_is_palindrome(self): 30 for test_data in self.data: 31 actual = is_palyndrome(test_data[0]) 32 self.assertIs(actual, test_data[1]) 33 34 if __name__ == '__main__': 35 unittest.main()
5 - warning: redefined-builtin