code
stringlengths 20
13.2k
| label
stringlengths 21
6.26k
|
---|---|
1 def func(a, b, c=3, d=4):
2 print(a, b, c, d)
3 func(1, *(5,6))
| 2 - warning: bad-indentation
|
1 S = 'spam'
2 offset=0
3 for item in S:
4 print(item, 'appears at offset', offset)
5 offset+=1
6 print('***************************')
7 for (offset, item) in enumerate(S):
8 print(item+'appears at offset %d' % offset)
9 for item in enumerate(S):
10 print(item)
11 E = enumerate(S)
12 print(next(E))
13 #while E:
14 # next(E)
15 print([c * i for (i, c) in enumerate(S)])
| 4 - warning: bad-indentation
5 - warning: bad-indentation
8 - warning: bad-indentation
10 - warning: bad-indentation
|
1 while True:
2 reply=raw_input('Enter text:')
3 if reply == 'stop':
4 break
5 print(int(reply)**2)
6 print('Bye')
| 2 - warning: bad-indentation
3 - warning: bad-indentation
4 - warning: bad-indentation
5 - warning: bad-indentation
2 - error: undefined-variable
|
1 from vpython import *
2 canvas(width=1500, height=720, center=vector(00,70,0), background=color.white, range=150)
3 sphere( pos=vector(0,0,0), radius=2, color=color.red) # Origin of the orthonormal coordinate system
4
5 for i in range(-150,150,10): # Drawing floor
6 for j in range(-150,150,10): #
7 sphere( pos=vector(i,0,j), radius=0.3, color=color.black) #
8
9 H =curve() # Diamond diagonal
10 CL=curve() # Diamond left top side
11 CR=curve() # Diamond right top side
12 AL=curve() # Diamond left bottom side
13 AR=curve() # Diamond right bottom side
14
15 def IK(x,y,z):
16
17 global H
18 global CL
19 global CR
20 global AL
21 global AR
22
23 H.clear()
24 CL.clear()
25 CR.clear()
26 AL.clear()
27 AR.clear()
28
29 d=Ay-y # X Y diagonal calculations
30 e=x #
31 h=sqrt((e*e)+(d*d)) #
32 E=acos(d/h) #
33 if(e<0): #
34 E=(-E) #
35 X=sin(E)*h #
36 Y=cos(E)*h #
37
38 G=acos(h/(2*c)) # diamond sides calculations
39 Clx=sin(E-G)*c #
40 Cly=cos(E-G)*c #
41 Crx=sin(E+G)*c #
42 Cry=cos(E+G)*c #
43
44 dz=h # Z diagonal calculations
45 ez=z #
46 hz=sqrt((ez*ez)+(dz*dz)) #
47 D=acos(dz/hz) #
48 if(ez<0): #
49 D=(-D) #
50 Z=sin(D)*hz #
51
52 H =curve( A.pos, vector(X,Ay-Y,Z), radius=0.1, color=color.magenta, retain=30 ) # diagonal line
53 CL=curve( A.pos, vector(Clx,Ay-Cly,Z/2), radius=2, color=color.yellow ) # top left side line of the diamond
54 CR=curve( A.pos, vector(Crx,Ay-Cry,Z/2), radius=2, color=color.green ) # top right side line of the diamond
55 AL=curve( vector(X,Ay-Y,Z), vector(Clx,Ay-Cly,Z/2), radius=2, color=color.yellow ) # bottom left side line of the diamond
56 AR=curve( vector(X,Ay-Y,Z), vector(Crx,Ay-Cry,Z/2), radius=2, color=color.green ) # bottom right side line of the diamond
57
58 ################ Start Zigzag ################
59 c=112 # length of diamond side
60 Ay=200 # coordinates of the main axis
61 Ax=0 #
62 Az=0 #
63 A=sphere( pos=vector(Ax,Ay,0), radius=4, color=color.red) # main paw axis
64
65 Pz=[-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-70,-60,-50,-40,-30,-20,-10, 0, 10, 20, 30, 40, 50, 60, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70]
66 Py=[ 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10]
67 Px=[-70,-60,-50,-40,-30,-20,-10, 0, 10, 20, 30, 40, 50, 60, 70, 60, 50, 40, 30, 20, 10, 0,-10,-20,-30,-40,-50,-60,-70,-60,-50,-40,-30,-20,-10, 0, 10, 20, 30, 40, 50, 60, 70]
68
69 for i in range(0, 43,1):
70 rate(20)
71 sphere( pos=vector(Px[i],Py[i],Pz[i]), radius=1.5, color=color.red) # Path drawing with ball targets
72
73 while True:
74 for i in range(0, 43, 1):
75 rate(20)
76 IK(Px[i],Py[i],Pz[i])
77 for i in range(42,-1, -1):
78 rate(20)
79 IK(Px[i],Py[i],Pz[i])
80 ################ End Zigzag ################
| 1 - warning: wildcard-import
2 - error: undefined-variable
2 - error: undefined-variable
2 - error: undefined-variable
3 - error: undefined-variable
3 - error: undefined-variable
3 - error: undefined-variable
7 - error: undefined-variable
7 - error: undefined-variable
7 - error: undefined-variable
9 - error: undefined-variable
10 - error: undefined-variable
11 - error: undefined-variable
12 - error: undefined-variable
13 - error: undefined-variable
15 - refactor: too-many-locals
17 - warning: global-statement
18 - warning: global-statement
19 - warning: global-statement
20 - warning: global-statement
21 - warning: global-statement
31 - error: undefined-variable
32 - error: undefined-variable
35 - error: undefined-variable
36 - error: undefined-variable
38 - error: undefined-variable
39 - error: undefined-variable
40 - error: undefined-variable
41 - error: undefined-variable
42 - error: undefined-variable
46 - error: undefined-variable
47 - error: undefined-variable
50 - error: undefined-variable
52 - error: undefined-variable
52 - error: undefined-variable
52 - error: undefined-variable
53 - error: undefined-variable
53 - error: undefined-variable
53 - error: undefined-variable
54 - error: undefined-variable
54 - error: undefined-variable
54 - error: undefined-variable
55 - error: undefined-variable
55 - error: undefined-variable
55 - error: undefined-variable
55 - error: undefined-variable
56 - error: undefined-variable
56 - error: undefined-variable
56 - error: undefined-variable
56 - error: undefined-variable
63 - error: undefined-variable
63 - error: undefined-variable
63 - error: undefined-variable
70 - error: undefined-variable
71 - error: undefined-variable
71 - error: undefined-variable
71 - error: undefined-variable
75 - error: undefined-variable
78 - error: undefined-variable
|
1 from vpython import *
2 canvas(width=1500, height=720, center=vector(00,70,0), background=color.white, range=150)
3 sphere( pos=vector(0,0,0), radius=2, color=color.red) # Origin of the orthonormal coordinate system
4
5 for i in range(-150,150,10): # Drawing floor
6 for j in range(-150,150,10): #
7 sphere( pos=vector(i,0,j), radius=0.3, color=color.black) #
8
9 H =curve() # Diamond diagonal
10 CL=curve() # Diamond left top side
11 CR=curve() # Diamond right top side
12 AL=curve() # Diamond left bottom side
13 AR=curve() # Diamond right bottom side
14
15 def IK(x,y,z):
16
17 global H
18 global CL
19 global CR
20 global AL
21 global AR
22
23 H.clear()
24 CL.clear()
25 CR.clear()
26 AL.clear()
27 AR.clear()
28
29 d=Ay-y # X Y diagonal calculations
30 e=x #
31 h=sqrt((e*e)+(d*d)) #
32 E=acos(d/h) #
33 if(e<0): #
34 E=(-E) #
35 X=sin(E)*h #
36 Y=cos(E)*h #
37
38 G=acos(h/(2*c)) # diamond sides calculations
39 Clx=sin(E-G)*c #
40 Cly=cos(E-G)*c #
41 Crx=sin(E+G)*c #
42 Cry=cos(E+G)*c #
43
44 dz=h # Z diagonal calculations
45 ez=z #
46 hz=sqrt((ez*ez)+(dz*dz)) #
47 D=acos(dz/hz) #
48 if(ez<0): #
49 D=(-D) #
50 Z=sin(D)*hz #
51
52 H =curve( A.pos, vector(X,Ay-Y,Z), radius=0.1, color=color.magenta, retain=30 ) # diagonal line
53 CL=curve( A.pos, vector(Clx,Ay-Cly,Z/2), radius=2, color=color.yellow ) # top left side line of the diamond
54 CR=curve( A.pos, vector(Crx,Ay-Cry,Z/2), radius=2, color=color.green ) # top right side line of the diamond
55 AL=curve( vector(X,Ay-Y,Z), vector(Clx,Ay-Cly,Z/2), radius=2, color=color.yellow ) # bottom left side line of the diamond
56 AR=curve( vector(X,Ay-Y,Z), vector(Crx,Ay-Cry,Z/2), radius=2, color=color.green ) # bottom right side line of the diamond
57
58 ################ Start Screw ################
59 c=112 # length of diamond side
60 Ay=190 # coordinates of the main axis
61 Ax=0 #
62 Az=0 #
63 A=sphere( pos=vector(Ax,Ay,0), radius=4, color=color.red) # main paw axis
64
65 r=120
66 for i in range(0,1080,3):
67 rate(80)
68 sphere( pos=vector(sin(i/57.296)*r, i*0.06, cos(i/57.296)*r ), radius=1, color=color.red)
69 while True:
70 for i in range(0,1080,3):
71 rate(30)
72 IK(sin(i/57.296)*r, i*0.06, cos(i/57.296)*r )
73 for i in range(1080,0,-3):
74 rate(30)
75 IK(sin(i/57.296)*r, i*0.06, cos(i/57.296)*r )
76 ################ End Screw ################
| 1 - warning: wildcard-import
2 - error: undefined-variable
2 - error: undefined-variable
2 - error: undefined-variable
3 - error: undefined-variable
3 - error: undefined-variable
3 - error: undefined-variable
7 - error: undefined-variable
7 - error: undefined-variable
7 - error: undefined-variable
9 - error: undefined-variable
10 - error: undefined-variable
11 - error: undefined-variable
12 - error: undefined-variable
13 - error: undefined-variable
15 - refactor: too-many-locals
17 - warning: global-statement
18 - warning: global-statement
19 - warning: global-statement
20 - warning: global-statement
21 - warning: global-statement
31 - error: undefined-variable
32 - error: undefined-variable
35 - error: undefined-variable
36 - error: undefined-variable
38 - error: undefined-variable
39 - error: undefined-variable
40 - error: undefined-variable
41 - error: undefined-variable
42 - error: undefined-variable
46 - error: undefined-variable
47 - error: undefined-variable
50 - error: undefined-variable
52 - error: undefined-variable
52 - error: undefined-variable
52 - error: undefined-variable
53 - error: undefined-variable
53 - error: undefined-variable
53 - error: undefined-variable
54 - error: undefined-variable
54 - error: undefined-variable
54 - error: undefined-variable
55 - error: undefined-variable
55 - error: undefined-variable
55 - error: undefined-variable
55 - error: undefined-variable
56 - error: undefined-variable
56 - error: undefined-variable
56 - error: undefined-variable
56 - error: undefined-variable
63 - error: undefined-variable
63 - error: undefined-variable
63 - error: undefined-variable
67 - error: undefined-variable
68 - error: undefined-variable
68 - error: undefined-variable
68 - error: undefined-variable
68 - error: undefined-variable
68 - error: undefined-variable
71 - error: undefined-variable
72 - error: undefined-variable
72 - error: undefined-variable
74 - error: undefined-variable
75 - error: undefined-variable
75 - error: undefined-variable
|
1 #!/usr/bin/python
2
3 import sys
4 sys.path.append('./limelightCDN')
5 import limelightCDN
6 import ConfigParser
7 import os
8 import urllib2
9
10 profile='default'
11
12 urL = "https://api.lldns.net/cfapi/v1/svcinst/delivery/manual/shortname/shutterfly"
13 query = ""
14
15 def read_conf(profile):
16 config = ConfigParser.RawConfigParser()
17 config.read([os.path.expanduser('~/.llnw/credentials')])
18 username = config.get(profile, 'username')
19 apikey = config.get(profile, 'apikey')
20 return username,apikey
21
22 userName,apiKey = read_conf(profile)
23
24 #make request
25 usageReport = limelightCDN.Auth(apiKey)
26 response = usageReport.GET(urL,userName,queryParameters=query)
27 print response.read()
| 27 - error: syntax-error
|
1 #!/usr/bin/python
2 import hashlib
3 import hmac
4 import time
5 import os
6 import urllib
7 import urllib2
8
9 try:
10 import simplejson as json
11 except ImportError:
12 import json
13
14 class Auth:
15 def __init__(self,apiKey):
16 self.apiKey = apiKey
17 return None
18 def hmac(
19 self,
20 url,
21 httpMethod="GET",
22 queryParameters=None,
23 postData=None):
24 timestamp = str(int(round(time.time()*1000)))
25 datastring = httpMethod + url
26 if queryParameters != None :
27 datastring += queryParameters
28 datastring += timestamp
29 if postData != None :
30 datastring += postData
31 self.postData = postData
32 self.token = hmac.new(self.apiKey.decode('hex'), msg=datastring,digestmod=hashlib.sha256).hexdigest()
33 #return token,timestamp
34 return self.token,timestamp
35 #built-in GET request for REST-API
36 def GET(
37 self,
38 url,
39 username,
40 httpMethod="GET",
41 queryParameters=None,
42 postData=None):
43
44 token,timestamp = self.hmac(url,httpMethod,queryParameters,postData)
45 if queryParameters != None :
46 url = url + "?" + queryParameters
47 if postData != None :
48 req = urllib2.Request(url, postData)
49 else:
50 req = urllib2.Request(url)
51 req.add_header('Content-Type','application/json')
52 req.add_header('Accept','application/json')
53 req.add_header('X-LLNW-Security-Principal', username)
54 req.add_header('X-LLNW-Security-Timestamp', timestamp)
55 req.add_header('X-LLNW-Security-Token', token)
56 response = urllib2.urlopen(req)
57 return response
| 16 - error: syntax-error
|
1 import os, glob, sys, subprocess, zipfile, shutil, time
2
3 #read MSBUILD_PATH, OUT_DIR and ANDROID_* variables for signing APK files from external file 'build-all.cfg' (not checked into version control)
4 MSBUILD_PATH = 'C:/Program Files (x86)/MSBuild/12.0/Bin/MSBuild.exe'
5 OUT_DIR = 'Builds'
6 WEB_GZ = False
7 exec (file('build-all.cfg').read() if os.path.exists('build-all.cfg') else '')
8 WEB_GZ = ('.gz' if WEB_GZ else '')
9
10 #check if directories for unused assets already exist, abort if so
11 assert not os.path.exists('Data-Unused'), 'Temporary asset directory "' + 'Data-Unused' + '" still exists, please check (crashed when executed last time?)'
12
13 #build list of assets with path names in Data and in Data-Unused
14 assets = []
15 for root, dirs, filenames in os.walk('Data'):
16 for filename in filenames:
17 assets += [[root.replace('\\','/') + '/' + filename,root.replace('Data','Data-Unused',1).replace('\\','/') + '/' + filename]]
18
19 # platform specific setup
20 zl_dir = os.path.realpath(__file__+'/../../ZillaLib').replace('\\', '/')
21 if sys.platform == 'win32': os.environ['PATH'] += os.pathsep+zl_dir.replace('/', os.sep)+os.sep+'Tools'
22 linux_cpu_type = 'x86_64' if sys.maxsize > 2**32 else 'x86_32'
23
24 #options
25 is_rebuild = 'rebuild' in sys.argv
26 select_targets = [k for k in sys.argv if k in ['wasm','emscripten','nacl','android','win32','win64','linux','osx']]
27 if select_targets == []: select_targets = ['wasm','android','win32','win64','linux','osx']
28
29 #create directories for unused assets while building samples that don't need them, and at first move all assets over
30 for asset in assets:
31 if not os.path.exists(os.path.dirname(asset[1])): os.makedirs(os.path.dirname(asset[1]))
32 os.rename(asset[0], asset[1])
33
34 #create output dir
35 if not os.path.exists(OUT_DIR): os.makedirs(OUT_DIR)
36
37 #loop through all samples
38 BuildLastRun = 0
39 for num in range(1, 99):
40 try:
41 snum = str(num).zfill(2);
42 inl = (glob.glob(snum + "-*") or [''])[0]
43 if not inl: continue
44 inlcode = file(inl).read()
45 oneasset = ''
46 print '---------------------------------------------------------------------------------------------------------------------------------------------------------------------'
47 print '[ASSETS] Building Sample',num,'("' + inl + '"):'
48 for asset in assets:
49 if (asset[0] in inlcode):
50 os.rename(asset[1], asset[0])
51 print ' Used Asset:',asset[0]
52 oneasset = asset[0]
53 if oneasset: os.utime(oneasset, None) #touch asset file so assets get rebuilt
54
55 while BuildLastRun >= int(time.time()):pass #must be at least the next second since last build ended, otherwise make can get confused
56 def buildheader(typ):
57 print '---------------------------------------------------------------------------------------------------------------------------------------------------------------------'
58 print '[' + typ + '] Building Sample',num,'("' + inl + '"):'
59 def buildfooter():
60 print '---------------------------------------------------------------------------------------------------------------------------------------------------------------------'
61 print ''
62 sys.stdout.flush()
63 def building(pargs):
64 print ' **** Executing:',pargs,'...'
65 p = subprocess.Popen(pargs, bufsize=1, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
66 while True:
67 l = p.stdout.readline()
68 if not l: break
69 if not l.strip(): continue
70 sys.stderr.write(' ' + l.rstrip()[0:180] + "\n")
71 sys.stderr.flush()
72 pret = p.wait()
73 assert pret == 0, ' BUILD RETURNED ERROR, ABORTING'
74 global BuildLastRun
75 BuildLastRun = int(time.time())
76 def buildcopy(src, trg):
77 print ' **** Copying',src,'to',OUT_DIR+'/'+trg,'...'
78 shutil.copy2(src, OUT_DIR+'/'+trg)
79 def buildzip(trgzip, src, trg):
80 print ' **** Zipping',src,'into',OUT_DIR+'/'+trgzip,'as',trg,'...'
81 z = zipfile.ZipFile(OUT_DIR+'/'+trgzip,'w',zipfile.ZIP_DEFLATED);
82 z.write(src, trg);[z.write(r+os.sep+f, r.replace(src, trg, 1)+os.sep+f) for r,d,fs in os.walk(src) for f in fs]
83 z.close()
84 def buildcheck(name, trg):
85 if select_targets and name not in select_targets: return False
86 return is_rebuild or not os.path.exists(OUT_DIR+'/'+trg) or os.path.getmtime(OUT_DIR+'/'+trg) < os.path.getmtime(inl)
87
88 if sys.platform == 'win32':
89 if buildcheck('wasm', 'ZillaLibSample-' + snum + '.js'+WEB_GZ):
90 buildheader('WEBASSEMBLY')
91 building(['make', '-j', '4', 'wasm-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num), 'W=ZillaLibSampleMain.cpp' + (' ' + oneasset if oneasset else '')])
92 buildcopy('Release-wasm/ZillaLibSamples.js'+WEB_GZ, 'ZillaLibSample-' + snum + '.js'+WEB_GZ)
93 buildfooter()
94
95 if buildcheck('emscripten', 'ZillaLibSample-' + snum + '.js'+WEB_GZ):
96 buildheader('EMSCRIPTEN')
97 building(['make', '-j', '4', 'emscripten-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num), 'W=ZillaLibSampleMain.cpp' + (' ' + oneasset if oneasset else '')])
98 buildcopy('Release-emscripten/ZillaLibSamples' + ('_WithData' if oneasset else '') + '.js'+WEB_GZ, 'ZillaLibSample-' + snum + '.js'+WEB_GZ)
99 buildfooter()
100
101 if buildcheck('nacl', 'ZillaLibSample-' + snum + '.pexe'+WEB_GZ):
102 buildheader('NACL')
103 building(['make', '-j', '4', 'nacl-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num), 'W=ZillaLibSampleMain.cpp' + (' '+oneasset if oneasset else '')])
104 buildcopy('Release-nacl/ZillaLibSamples' + ('_WithData' if oneasset else '') + '.pexe'+WEB_GZ, 'ZillaLibSample-' + snum + '.pexe'+WEB_GZ)
105 buildfooter()
106
107 if buildcheck('android', 'ZillaLibSample-' + snum + '.apk'):
108 buildheader('ANDROID')
109 building(['make', '-j', '4', 'android-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num), 'W=ZillaLibSampleMain.cpp'])
110 building(['make', 'android-sign', 'SIGN_OUTAPK='+OUT_DIR+'/ZillaLibSample-' + snum + '.apk', 'SIGN_KEYSTORE='+ANDROID_SIGN_KEYSTORE, 'SIGN_STOREPASS='+ANDROID_SIGN_STOREPASS, 'SIGN_KEYALIAS='+ANDROID_SIGN_KEYALIAS, 'SIGN_KEYPASS='+ANDROID_SIGN_KEYPASS])
111 buildfooter()
112
113 if buildcheck('win32', 'ZillaLibSample-' + snum + '_Win32.zip'):
114 buildheader('WIN32')
115 if os.path.exists('Release-vs2013\ZillaLibSampleMain.obj'): os.remove('Release-vs2013\ZillaLibSampleMain.obj')
116 building('"'+MSBUILD_PATH+'" /p:Configuration=Release;Platform=Win32;CmdLinePreprocessorDefinitions="ZILLALIBSAMPLES_NUMBER=' + str(num) + (';ZILLALIBSAMPLES_HASDATA"' if oneasset else '";SkipDataAssets=1') + ' ZillaLibSamples-vs.vcxproj')
117 buildzip('ZillaLibSample-' + snum + '_Win32.zip', 'Release-vs2013/ZillaLibSamples' + ('_WithData' if oneasset else '') + '.exe', 'ZillaLibSamples-' + snum + '.exe')
118 buildfooter()
119
120 if buildcheck('win64', 'ZillaLibSample-' + snum + '_Win64.zip'):
121 buildheader('WIN64')
122 if os.path.exists('Release-vs2013x64\ZillaLibSampleMain.obj'): os.remove('Release-vs2013x64\ZillaLibSampleMain.obj')
123 building('"'+MSBUILD_PATH+'" /p:Configuration=Release;Platform=x64;CmdLinePreprocessorDefinitions="ZILLALIBSAMPLES_NUMBER=' + str(num) + (';ZILLALIBSAMPLES_HASDATA"' if oneasset else '";SkipDataAssets=1') + ' ZillaLibSamples-vs.vcxproj')
124 buildzip('ZillaLibSample-' + snum + '_Win64.zip', 'Release-vs2013x64/ZillaLibSamples' + ('_WithData' if oneasset else '') + '.exe', 'ZillaLibSamples-' + snum + '.exe')
125 buildfooter()
126
127 if sys.platform == 'linux2':
128 if buildcheck('linux', 'ZillaLibSample-' + snum + '_linux_' + linux_cpu_type + '.zip'):
129 buildheader('LINUX')
130 building(['make', '-j', '4', 'linux-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num) + (' ZILLALIBSAMPLES_HASDATA' if oneasset else ''), 'W=ZillaLibSampleMain.cpp' + (' ' + oneasset if oneasset else '')])
131 buildzip('ZillaLibSample-' + snum + '_linux_' + linux_cpu_type + '.zip', 'Release-linux/ZillaLibSamples_' + linux_cpu_type + ('_WithData' if oneasset else ''), 'ZillaLibSample-' + snum)
132 buildfooter()
133
134 if sys.platform == 'darwin':
135 if buildcheck('osx', 'ZillaLibSample-' + snum + '_osx.zip'):
136 buildheader('OSX')
137 building(['make', '-j', '4', 'osx-release', 'D=ZILLALIBSAMPLES_NUMBER=' + str(num) + (' ZILLALIBSAMPLES_HASDATA' if oneasset else '')])
138 buildzip('ZillaLibSample-' + snum + '_osx.zip', 'ZillaLibSamples-OSX.xcodeproj/Release/ZillaLibSamples.app', 'ZillaLibSample-' + snum + '.app')
139 buildfooter()
140
141 except: import traceback; traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]); break;
142 finally:
143 #move all assets back to unused for building the next sample
144 for asset in assets:
145 if os.path.exists(asset[0]): os.rename(asset[0], asset[1])
146
147 #removing temporary directories
148 for asset in assets:
149 os.rename(asset[1], asset[0])
150 try: os.rmdir(os.path.dirname(asset[1]))
151 except: pass
| 46 - error: syntax-error
|
1 for i in[501,24,25,77,388,22,0,324,297,376,296]:print format(i,'09b')
| 1 - error: syntax-error
|
1 while 1:
2 s=raw_input()
3 if s:print s
| 3 - error: syntax-error
|
1 from itertools import permutations as p
2 for i in p(raw_input()):print''.join(i)
| 2 - error: syntax-error
|
1 i=99;s=', %s.\n'
2 f=lambda i:'%d shinichiro%s of hamaji on the wall'%(i,'es'[:i*2-2])
3 while i:print f(i)+s%f(i)[:-12]+{1:'Go to the store and buy some more'+s%f(99)}.get(i,'Take one down and pass it around'+s%f(i-1));i-=1
| 3 - error: syntax-error
|
1 while 1:print raw_input()
| 1 - error: syntax-error
|
1 import argparse
2 import os
3 import sys
4 import json
5 import copy
6 #import ConfigParser
7 import pandas as pd
8 import time
9 import csv
10 import glob
11 import shutil
12 import re
13 #import path
14 from collections import namedtuple
15
16
17 def read_metrics_file(metrics):
18
19 if (len(metrics) == 1): #and path.exists(metrics[0])):
20 metrics_file= metrics[0]
21 with open(metrics_file, 'r') as f:
22 metrics= f.readline().split()
23 print(metrics)
24 f.close()
25 return metrics
26
27 else:
28 print("Error: Too many arguments or path does not exist")
29
30 def read_cmdline_metrics(metrics):
31 return metrics
32
33
34 parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')
35 parser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')
36 parser.add_argument('metrics', type=str, nargs='*', help='list of metrics or file for metrics')
37 parser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')
38
39
40 args= parser.parse_args()
41 file_path = args.file_path
42 metrics = args.read_metrics(args.metrics)
43
44
45
46 for i in range(0, len(metrics)):
47 if os.path.exists('{}/{}'.format(file_path, metrics[i])):
48 shutil.rmtree('{}/{}'.format(file_path, metrics[i]))
49 if not os.path.exists('{}/{}'.format(file_path, metrics[i])):
50 os.makedirs('{}/{}'.format(file_path, metrics[i]))
51
52
53 dirs= [i for i in os.listdir( file_path ) if i.endswith(".csv")]
54 dirs.sort()
55
56 used_count = []
57 for file_name in dirs:
58 with open(file_path + '/' + file_name) as csv_file:
59 data_frame = pd.read_csv(csv_file)
60 data_frame.head()
61
62
63 for i in range(0, len(metrics)):
64 contains_metric = data_frame['pCmdLine'].astype(str).str.contains(metrics[i], na=False, flags=re.IGNORECASE)
65 filtered = data_frame[contains_metric]
66 filtered.head()
67 if (len(filtered.index) > 1) :
68 filtered = filtered.loc[:, ~filtered.columns.str.contains('^Unnamed')]
69 filtered.to_csv('{}/{}/{}'.format(file_path, metrics[i], file_name))
70
71
72
73 for i in range(0, len(metrics)):
74 #path = "{}/{}".format(file_path, metrics[i])
75 path = file_path
76 all_files = glob.glob(path+ "/*.csv")
77 li = []
78 print(path)
79 for filtered_file in all_files:
80 df = pd.read_csv(filtered_file, index_col=None, header=0)
81 li.append(df)
82 print(filtered_file)
83
84 frame = pd.concat(li, axis=0, ignore_index=True)
85 frame = frame.sort_values(by='currentTime', ascending=True)
86 frame = frame.loc[:, ~frame.columns.str.contains('^Unnamed: 0')]
87 frame.drop(frame.columns[0], axis=1)
88 #frame= frame.groupby(['currentTime']).agg({
89 # 'filename':'first', 'pBlockIODelays':'sum','pChildrenKernelMode':'sum', 'pChildrenUserMode':'sum','pCmdLine':'first', 'pCpuTimeUserMode':'sum', 'pId':'sum', 'pName':'first', 'pNonvoluntaryContextSwitches':'sum', 'pNumThreads':'sum', 'pResidentSetSize':'sum','pVirtualMemoryBytes': 'sum', 'pVoluntaryContextSwitches':'sum'})
90
91 #frame = frame.groupby(['currentTime']).sum()
92
93 #frame = frame.diff(axis=1, periods=1)
94 frame.drop(frame.index[0])
95 frame['pCpuTime'] = frame['pCpuTimeUserMode'] + frame['pCpuTimeKernelMode']
96 #print frame
97 frame.to_csv('{}/{}/{}'.format(file_path, metrics[i], "agg_sum.csv"))
98
99
100
101
102
| 19 - warning: bad-indentation
20 - warning: bad-indentation
21 - warning: bad-indentation
22 - warning: bad-indentation
23 - warning: bad-indentation
24 - warning: bad-indentation
25 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
31 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
49 - warning: bad-indentation
50 - warning: bad-indentation
58 - warning: bad-indentation
59 - warning: bad-indentation
60 - warning: bad-indentation
63 - warning: bad-indentation
64 - warning: bad-indentation
65 - warning: bad-indentation
66 - warning: bad-indentation
67 - warning: bad-indentation
68 - warning: bad-indentation
69 - warning: bad-indentation
75 - warning: bad-indentation
76 - warning: bad-indentation
77 - warning: bad-indentation
78 - warning: bad-indentation
79 - warning: bad-indentation
80 - warning: bad-indentation
81 - warning: bad-indentation
82 - warning: bad-indentation
84 - warning: bad-indentation
85 - warning: bad-indentation
86 - warning: bad-indentation
87 - warning: bad-indentation
94 - warning: bad-indentation
95 - warning: bad-indentation
97 - warning: bad-indentation
17 - warning: redefined-outer-name
19 - refactor: no-else-return
21 - warning: unspecified-encoding
17 - refactor: inconsistent-return-statements
30 - warning: redefined-outer-name
58 - warning: unspecified-encoding
3 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
8 - warning: unused-import
9 - warning: unused-import
14 - warning: unused-import
|
1 import argparse
2 import os
3 import shutil
4 import sys
5 import json
6 import copy
7 import configparser
8 from collections import namedtuple
9
10 parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')
11 parser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')
12 parser.add_argument('delta_interval_time', action='store', help='stores time interval of when to take delta sample')
13 args= parser.parse_args()
14 file_path = args.file_path
15 if os.path.exists(file_path + '/delta_json'):
16 shutil.rmtree(file_path + '/delta_json')
17 if not os.path.exists(file_path + '/delta_json'):
18 os.makedirs(file_path + '/delta_json')
19
20 json_array = []
21 delta_name_array = []
22 dirs= sorted([i for i in os.listdir( file_path ) if i.endswith(".json")])
23 for file_name in dirs:
24 with open(file_path + '/' + file_name) as json_file:
25 print ('JSON FILES TANMAY:')
26 print(json_file)
27 try:
28 new_json_object = json.load(json_file)
29 json_array.append(new_json_object)
30 new_name= ((file_path+'/delta_json/'+file_name).split('.json')[0] + '_delta.json')
31 delta_name_array.append(new_name)
32
33 except Exception as e:
34 print ("{} invalid file".format(json_file))
35 pass
36 def file_subtraction(the_json_one, the_json_two):
37 json_three = copy.deepcopy(the_json_two)
38 if ('cCpuTime' in the_json_one.keys()):
39 json_three['cCpuTime']=the_json_two['cCpuTime']-the_json_one['cCpuTime']
40 if ('cCpuTimeKernelMode' in the_json_one.keys()):
41 json_three['cCpuTimeKernelMode']=the_json_two['cCpuTimeKernelMode']-the_json_one['cCpuTimeKernelMode']
42 if ('cCpuTimeUserMode' in the_json_one.keys()):
43 json_three['cCpuTimeUserMode']=the_json_two['cCpuTimeUserMode']-the_json_one['cCpuTimeUserMode']
44 if ('cDiskReadBytes' in the_json_one.keys()):
45 json_three['cDiskReadBytes']=the_json_two['cDiskReadBytes']-the_json_one['cDiskReadBytes']
46 if ('cDiskSectorIO' in the_json_one.keys()):
47 json_three['cDiskSectorIO']=the_json_two['cDiskSectorIO']-the_json_one['cDiskSectorIO']
48 if ('cDiskWriteBytes' in the_json_one.keys()):
49 json_three['cDiskWriteBytes']=the_json_two['cDiskWriteBytes']-the_json_one['cDiskWriteBytes']
50 if ('cNetworkBytesRecvd' in the_json_one.keys()):
51 json_three['cNetworkBytesRecvd']=the_json_two['cNetworkBytesRecvd']-the_json_one['cNetworkBytesRecvd']
52 if ('cNetworkBytesSent' in the_json_one.keys()):
53 json_three['cNetworkBytesSent']=the_json_two['cNetworkBytesSent']-the_json_one['cNetworkBytesSent']
54 if ('vCpuContextSwitches' in the_json_one.keys()):
55 json_three['vCpuContextSwitches']=the_json_two['vCpuContextSwitches']-the_json_one['vCpuContextSwitches']
56 if ('vCpuIdleTime' in the_json_one.keys()):
57 json_three['vCpuIdleTime']=the_json_two['vCpuIdleTime']-the_json_one['vCpuIdleTime']
58 if ('vCpuNice' in the_json_one.keys()):
59 json_three['vCpuNice']=the_json_two['vCpuNice']-the_json_one['vCpuNice']
60 if ('vCpuSteal' in the_json_one.keys()):
61 json_three['vCpuSteal']=the_json_two['vCpuSteal']-the_json_one['vCpuSteal']
62 if ('vCpuTime' in the_json_one.keys()):
63 json_three['vCpuTime']=the_json_two['vCpuTime']-the_json_one['vCpuTime']
64 if ('vCpuTimeIOWait' in the_json_one.keys()):
65 json_three['vCpuTimeIOWait']=the_json_two['vCpuTimeIOWait']-the_json_one['vCpuTimeIOWait']
66 if ('vCpuTimeKernelMode' in the_json_one.keys()):
67 json_three['vCpuTimeKernelMode']=the_json_two['vCpuTimeKernelMode']-the_json_one['vCpuTimeKernelMode']
68 if ('vCpuTimeSoftIntSrvc' in the_json_one.keys()):
69 json_three['vCpuTimeSoftIntSrvc']=the_json_two['vCpuTimeSoftIntSrvc']-the_json_one['vCpuTimeSoftIntSrvc']
70 if ('vCpuTimeUserMode' in the_json_one.keys()):
71 json_three['vCpuTimeUserMode']=the_json_two['vCpuTimeUserMode']-the_json_one['vCpuTimeUserMode']
72 if ('vDiskMergedReads' in the_json_one.keys()):
73 json_three['vDiskMergedReads']=the_json_two['vDiskMergedReads']-the_json_one['vDiskMergedReads']
74 if ('vDiskMergedWrites' in the_json_one.keys()):
75 json_three['vDiskMergedWrites']=the_json_two['vDiskMergedWrites']-the_json_one['vDiskMergedWrites']
76 if ('vDiskReadTime' in the_json_one.keys()):
77 json_three['vDiskReadTime']=the_json_two['vDiskReadTime']-the_json_one['vDiskReadTime']
78 if ('vDiskSectorWrites' in the_json_one.keys()):
79 json_three['vDiskSectorWrites']=the_json_two['vDiskSectorWrites']-the_json_one['vDiskSectorWrites']
80 if ('vDiskSuccessfulReads' in the_json_one.keys()):
81 json_three['vDiskSuccessfulReads']=the_json_two['vDiskSuccessfulReads']-the_json_one['vDiskSuccessfulReads']
82 if ('vDiskSuccessfulWrites' in the_json_one.keys()):
83 json_three['vDiskSuccessfulWrites']=the_json_two['vDiskSuccessfulWrites']-the_json_one['vDiskSuccessfulWrites']
84 if ('vDiskWriteTime' in the_json_one.keys()):
85 json_three['vDiskWriteTime']=the_json_two['vDiskWriteTime']-the_json_one['vDiskWriteTime']
86 if ('vNetworkBytesRecvd' in the_json_one.keys()):
87 json_three['vNetworkBytesRecvd']=the_json_two['vNetworkBytesRecvd']-the_json_one['vNetworkBytesRecvd']
88 if ('vNetworkBytesSent' in the_json_one.keys()):
89 json_three['vNetworkBytesSent']=the_json_two['vNetworkBytesSent']-the_json_one['vNetworkBytesSent']
90 if ('cProcessorStats' in the_json_one.keys()):
91 for (each_key) in the_json_two['cProcessorStats']:
92 if ('cCpu' in each_key and 'TIME' in each_key):
93 json_three['cProcessorStats'][each_key] = the_json_two['cProcessorStats'][each_key] - the_json_one['cProcessorStats'][each_key]
94 return json_three
95
96 delta_json_array=[]
97 count = 0
98 first = json_array[0]
99 for i in range(1, len(json_array)):
100 count += (json_array[i]["currentTime"] - json_array[i-1]["currentTime"])
101 if count >= int(args.delta_interval_time):
102 delta_json_array.append(file_subtraction(first, json_array[i]))
103 count = 0
104 first = json_array[i]
105
106 for i in range(len(delta_json_array)):
107 with open(delta_name_array[i], 'w') as fp:
108 json.dump(delta_json_array[i], fp, sort_keys=True, indent=2)
| 16 - warning: bad-indentation
18 - warning: bad-indentation
24 - warning: bad-indentation
25 - warning: bad-indentation
26 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
31 - warning: bad-indentation
33 - warning: bad-indentation
34 - warning: bad-indentation
35 - warning: bad-indentation
37 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
41 - warning: bad-indentation
42 - warning: bad-indentation
43 - warning: bad-indentation
44 - warning: bad-indentation
45 - warning: bad-indentation
46 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
49 - warning: bad-indentation
50 - warning: bad-indentation
51 - warning: bad-indentation
52 - warning: bad-indentation
53 - warning: bad-indentation
54 - warning: bad-indentation
55 - warning: bad-indentation
56 - warning: bad-indentation
57 - warning: bad-indentation
58 - warning: bad-indentation
59 - warning: bad-indentation
60 - warning: bad-indentation
61 - warning: bad-indentation
62 - warning: bad-indentation
63 - warning: bad-indentation
64 - warning: bad-indentation
65 - warning: bad-indentation
66 - warning: bad-indentation
67 - warning: bad-indentation
68 - warning: bad-indentation
69 - warning: bad-indentation
70 - warning: bad-indentation
71 - warning: bad-indentation
72 - warning: bad-indentation
73 - warning: bad-indentation
74 - warning: bad-indentation
75 - warning: bad-indentation
76 - warning: bad-indentation
77 - warning: bad-indentation
78 - warning: bad-indentation
79 - warning: bad-indentation
80 - warning: bad-indentation
81 - warning: bad-indentation
82 - warning: bad-indentation
83 - warning: bad-indentation
84 - warning: bad-indentation
85 - warning: bad-indentation
86 - warning: bad-indentation
87 - warning: bad-indentation
88 - warning: bad-indentation
89 - warning: bad-indentation
90 - warning: bad-indentation
91 - warning: bad-indentation
92 - warning: bad-indentation
93 - warning: bad-indentation
94 - warning: bad-indentation
100 - warning: bad-indentation
101 - warning: bad-indentation
102 - warning: bad-indentation
103 - warning: bad-indentation
104 - warning: bad-indentation
107 - warning: bad-indentation
108 - warning: bad-indentation
24 - warning: unspecified-encoding
33 - warning: broad-exception-caught
35 - warning: unnecessary-pass
36 - refactor: too-many-branches
36 - refactor: too-many-statements
107 - warning: unspecified-encoding
4 - warning: unused-import
7 - warning: unused-import
8 - warning: unused-import
|
1 import argparse
2 import os
3 import sys
4 import json
5 import copy
6 import ConfigParser
7 import pandas as pd
8 import time
9
10 import os
11 import glob
12 import pandas as pd
13
14
15 from collections import namedtuple
16
17 parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')
18 parser.add_argument('file_path', action='store', help='')
19 args= parser.parse_args()
20 file_path = args.file_path
21
22
23 dirs= [i for i in os.listdir( file_path ) if i.endswith(".csv")]
24 dirs.sort()
25 dfObj = pd.DataFrame()
26
27
28 used_count = []
29 pcmd_list =[]
30 for file_name in dirs:
31 with open(file_path + '/' + file_name) as csv_file:
32 data_frame = pd.read_csv(csv_file)
33 data_frame.head()
34 value_counts= data_frame['pCmdLine'].value_counts()
35 #df = value_counts.rename_axis('unique_values').reset_index(name='counts')
36 df = pd.DataFrame(value_counts)
37 pcmd_list.append(df)
38
39 series=data_frame.median()
40 series = series.rename(file_name)
41
42 dfObj = dfObj.append(series)
43 used_count.append(len(data_frame.index))
44
45 total = pcmd_list[0]
46 for i in pcmd_list[1:]:
47 total = total.add(i, fill_value=0)
48
49
50 total = total.sort_values(by="pCmdLine", ascending=False)
51 total.to_csv("processes_used.csv", sep=',')
52
53
54 dfObj.insert(len(dfObj.columns) ,"Times Used", used_count)
55 dfObj= dfObj.sort_values(by="Times Used", ascending=False)
56
57 dfObj.index=dfObj["pId"]
58 dfObj = dfObj.loc[:, ~dfObj.columns.str.contains('^Unnamed')]
59
60 dfObj.to_csv("process_info.csv", sep=',')
61
62
63
| 31 - warning: bad-indentation
32 - warning: bad-indentation
33 - warning: bad-indentation
34 - warning: bad-indentation
36 - warning: bad-indentation
37 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
42 - warning: bad-indentation
43 - warning: bad-indentation
47 - warning: bad-indentation
10 - warning: reimported
12 - warning: reimported
31 - warning: unspecified-encoding
3 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
8 - warning: unused-import
11 - warning: unused-import
15 - warning: unused-import
|
1 #Authors: David Perez and Tanmay Shah
2
3 import json
4 import os
5 import pandas as pd
6 import argparse
7
8
9 #usage: python csv_generation_2.py path_of_folder_with_json sampling_delta metrics(file or space delimited list, if file include --infile, leave blank for all metrics found in the json files.)
10
11 def read_metrics_file(metrics):
12 if (len(metrics) == 1 and path.exists(metrics[0])):
13 metrics_file= metrics[0]
14 with open(metrics_file, 'r') as f:
15 metrics= f.readline().split()
16 # print(metrics)
17 f.close()
18 return metrics
19 else:
20 print("Error: Too many arguments or path does not exist")
21
22 def read_cmdline_metrics(metrics):
23 return metrics
24
25 # vm_container dictionary to store the virtual machine and container data. Key is the filename and value is the virtual machine and container data.
26 vm_container = {}
27 #Parse for folder path, and metrics to add.
28 parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')
29 parser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')
30 parser.add_argument('sampling_delta', type=int, nargs='?', default=1, help='determines sampling size')
31 parser.add_argument('metrics', type=str, nargs='*', help='list of metrics or file for metrics')
32 parser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')
33
34 args= parser.parse_args()
35 file_path = args.file_path
36 metrics = args.read_metrics(args.metrics)
37 #currentTime is necessary to be included in metrics as it is used to create time series. We add it here incase its not already included
38 metrics.append('currentTime')
39 metrics = set(metrics)
40 dirs = os.listdir( file_path )
41
42 # processes dictionary to store process level data
43 processes = dict()
44 dirs= sorted([i for i in os.listdir( file_path ) if i.endswith(".json")])
45
46 for file in dirs:
47 with open(file_path+'/'+file) as f:
48 # Deserialize into python object
49 y = json.load(f)
50 # A dictionary which contains the value of vm_container dictionary
51 r = {}
52
53
54 # Check for any list or dictionary in y
55 # determines what is chosen out of the metrics.
56 #print metrics
57 for k in y:
58 if not (k == "pProcesses" or k == "cProcessorStats"):
59 if k in metrics or len(metrics) == 1:
60 r[k] = y[k]
61
62
63 if ("cProcessorStats" in y and "cNumProcessors" in y):
64 for k in y["cProcessorStats"]:
65 if (k in metrics or len(metrics) == 0):
66 r[k] = y["cProcessorStats"][k]
67
68 if ("pProcesses" in y):
69 totalProcesses = len(y["pProcesses"]) - 1
70 #print y["pProcesses"][len(y["pProcesses"]) - 1]
71
72
73 for k in y["pProcesses"][totalProcesses]:
74 if k == "pTime":
75 r["pTime"] = y["pProcesses"][totalProcesses]["pTime"]
76
77
78 # Loop through the process level data
79 for i in range(totalProcesses):
80 # A dictinary containing process level data
81 s = {"filename": file}
82
83 for k in y["pProcesses"][i]:
84 s[k] = y["pProcesses"][i][k]
85
86 s["currentTime"] = r["currentTime"]
87
88 # If the process id is already in the processes, append to the list of processes
89 pids = []
90 if y["pProcesses"][i]["pId"] in processes:
91 pids = processes[y["pProcesses"][i]["pId"]]
92 pids.append( s )
93 processes[y["pProcesses"][i]["pId"]] = pids
94
95 #write all metrics to csv file
96 vm_container[file] = r
97
98
99 #creates empty folder for process info
100 if not os.path.exists('./process_info/{}'.format(os.path.basename(os.path.normpath(file_path)))):
101 os.makedirs('./process_info/{}'.format(os.path.basename(os.path.normpath(file_path))))
102
103 for key, value in processes.items():
104 df1 = pd.DataFrame(value)
105 df1 = df1.sort_values(by='currentTime', ascending=True)
106 df1.to_csv("./process_info/{}/Pid, {}.csv".format(os.path.basename(os.path.normpath(file_path)),str(key)))
107
108 # Create a separate CSV files for each of the processes
109 # Dump dictionary to a JSON file
110 with open("vm_container.json","w") as f:
111 f.write(json.dumps(vm_container))
112
113 # Convert JSON to dataframe and convert it to CSV
114 df = pd.read_json("vm_container.json").T
115 df=df.iloc[::args.sampling_delta]
116 df.to_csv("vm_container.csv", sep=',')
117
118 # Convert JSON to dataframe and convert it to CSV
119 df = pd.read_json("vm_container.json").T
120 df=df.iloc[::args.sampling_delta]
121 df.to_csv("vm_container.tsv", sep='\t')
122
| 12 - warning: bad-indentation
13 - warning: bad-indentation
14 - warning: bad-indentation
15 - warning: bad-indentation
17 - warning: bad-indentation
18 - warning: bad-indentation
19 - warning: bad-indentation
20 - warning: bad-indentation
23 - warning: bad-indentation
101 - warning: bad-indentation
11 - warning: redefined-outer-name
14 - warning: redefined-outer-name
12 - refactor: no-else-return
12 - error: undefined-variable
14 - warning: unspecified-encoding
11 - refactor: inconsistent-return-statements
22 - warning: redefined-outer-name
43 - refactor: use-dict-literal
47 - warning: unspecified-encoding
58 - refactor: consider-using-in
110 - warning: unspecified-encoding
|
1 #author: David Perez
2 from plotly.subplots import make_subplots
3 import random
4 import json
5 import os, sys
6 import pandas as pd
7 import subprocess
8 import numpy as np
9
10 import plotly.express as px
11 import plotly.graph_objects as go
12 import argparse
13 from os import path
14 import math
15 import shutil
16 from os.path import abspath
17 from subprocess import call
18
19
20 from distutils.dir_util import copy_tree
21
22
23 def read_metrics_file(metrics):
24
25 if (len(metrics) == 1 and path.exists(metrics[0])):
26 metrics_file= metrics[0]
27 with open(metrics_file, 'r') as f:
28 metrics= f.readline().split()
29 f.close()
30 return ' '.join(metrics)
31
32
33 else:
34 print("Error: Too many arguments or path does not exist")
35
36 def read_cmdline_metrics(metrics):
37 return ' '.join(metrics)
38
39
40
41 #give, x folders, give metrics, give smoothening delta,
42
43 parser = argparse.ArgumentParser(description="generates plotly graphs by giving folders, metrics, and delta smoothening value")
44 parser.add_argument('-f', "--folders", action="store", nargs='*', help='determines sampling size')
45 parser.add_argument("-s", "--sampling_interval", type=str, nargs='?', default=1, action="store", help='determines sampling size')
46 parser.add_argument("-m", "--metrics", action="store", nargs='*', default=[], help='list of metrics to graph over')
47 parser.add_argument("-d", "--dynamic_creation", action="store_true", default=False, help='list of metrics to graph over')
48 parser.add_argument('--infile', dest='read_metrics', action='store_const', const=read_metrics_file, default=read_cmdline_metrics, help='reads metrics from a file or from command line')
49
50
51
52 args= parser.parse_args()
53 metrics = args.read_metrics(args.metrics)
54
55 #print(args.folders);
56 #print(args.sampling_interval);
57
58 print("making delta_json_gen script")
59 os.system("python delta_json_generation.py")
60 print("finished delta_json_gen script")
61
62 current_directory = os.getcwd()
63 final_directory = os.path.join(current_directory, r'graph_all_json')
64
65 if os.path.exists(final_directory):
66 shutil.rmtree(final_directory)
67 if not os.path.exists(final_directory):
68 os.makedirs(final_directory)
69
70 print("running delta_json_gen on each path given")
71 for path in args.folders:
72 path = os.path.expanduser(path)
73 os.system("python auto_generated_delta_script.py {} {}".format(path, args.sampling_interval))
74 copy_tree(path+"/delta_json", final_directory)
75
76 print("Finished running delta_json_gen on each path given")
77
78 print("Creating a csv file based on dela information created")
79 os.system("python csv_generation_2.py {} {} {}".format(final_directory, "1", metrics))
80 print("Finished Creating a csv file based on dela information created")
81
82 print("Starting Graphing process")
83 if (args.dynamic_creation) :
84 #print ("Tanmay METRICS HERE:")
85 #print (metrics)
86 os.system("python plotly_graph_generation.py {} {} -d".format("vm_container.csv", metrics))
87 else :
88 print ("Tanmay METRICS HERE:")
89 print (metrics)
90 os.system("python plotly_graph_generation.py {} {}".format("vm_container.csv", metrics))
91
92 print("Finished Graphing process")
93
| 25 - warning: bad-indentation
26 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
33 - warning: bad-indentation
34 - warning: bad-indentation
37 - warning: bad-indentation
66 - warning: bad-indentation
68 - warning: bad-indentation
72 - warning: bad-indentation
73 - warning: bad-indentation
74 - warning: bad-indentation
86 - warning: bad-indentation
88 - warning: bad-indentation
89 - warning: bad-indentation
90 - warning: bad-indentation
20 - warning: deprecated-module
23 - warning: redefined-outer-name
25 - refactor: no-else-return
27 - warning: unspecified-encoding
23 - refactor: inconsistent-return-statements
36 - warning: redefined-outer-name
2 - warning: unused-import
3 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
7 - warning: unused-import
8 - warning: unused-import
10 - warning: unused-import
11 - warning: unused-import
14 - warning: unused-import
16 - warning: unused-import
17 - warning: unused-import
|
1 #Creates a script based on graph_generation_config.ini to create a delta script to delta certain metrics, and avoids others.
2 #authors: David Perez and Tanmay Shah
3
4 import argparse
5 import os
6 import json
7 import configparser
8
9
10 from collections import namedtuple
11
12 generated_script= open("auto_generated_delta_script.py","w")
13 generated_script.write("import argparse\nimport os\nimport shutil\nimport sys\nimport json\nimport copy\nimport configparser\nfrom collections import namedtuple\n\n")
14
15 generated_script.write("parser = argparse.ArgumentParser(description='process path and file /or string of metrics.')\n")
16 generated_script.write("parser.add_argument('file_path', action='store', help='stores the filepath to the folder holding all the JSON files')\n")
17 generated_script.write("parser.add_argument('delta_interval_time', action='store', help='stores time interval of when to take delta sample')\n")
18 generated_script.write("args= parser.parse_args()\n")
19 generated_script.write("file_path = args.file_path\n")
20
21 generated_script.write("if os.path.exists(file_path + \'/delta_json\'):\n")
22 generated_script.write("\tshutil.rmtree(file_path + \'/delta_json\')\n")
23
24
25 generated_script.write("if not os.path.exists(file_path + '/delta_json'):\n")
26 generated_script.write("\tos.makedirs(file_path + '/delta_json')\n\n")
27
28 generated_script.write("json_array = []\n")
29 generated_script.write("delta_name_array = []\n")
30 generated_script.write("dirs= sorted([i for i in os.listdir( file_path ) if i.endswith(\".json\")])\n")
31 #generated_script.write("dirs.sort()\n")
32
33 generated_script.write("for file_name in dirs:\n")
34 generated_script.write("\twith open(file_path + '/' + file_name) as json_file: \n")
35 #generated_script.write("\t\tprint ('JSON FILES TANMAY:')\n")
36 generated_script.write("\t\tprint(json_file)\n")
37 generated_script.write("\t\ttry:\n")
38
39 generated_script.write("\t\t\tnew_json_object = json.load(json_file)\n")#, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\n")
40 generated_script.write("\t\t\tjson_array.append(new_json_object)\n")
41 generated_script.write("\t\t\tnew_name= ((file_path+'/delta_json/'+file_name).split('.json')[0] + '_delta.json')\n")
42
43 generated_script.write("\t\t\tdelta_name_array.append(new_name)\n\n")
44 generated_script.write("\t\texcept Exception as e:\n")
45
46 generated_script.write("\t\t\tprint (\"{} invalid file\".format(json_file))\n")
47 generated_script.write("\t\t\tpass\n")
48 config = configparser.ConfigParser()
49 config.optionxform = str
50 config.read('graph_generation_config.ini')
51
52
53 #script generation
54 generated_script.write("def file_subtraction(the_json_one, the_json_two):\n")
55 generated_script.write("\tjson_three = copy.deepcopy(the_json_two)\n")
56
57 #all common attributes across all verbos
58 for (each_key, each_val) in config.items('all'):
59 if ( each_val == 'numeric_delta'): #and each_key.isdigit()):
60 json_one = "the_json_one['" +each_key+"']"
61 json_two = "the_json_two['" +each_key+"']"
62 json_three = "json_three['" +each_key+"']"
63 generated_script.write("\t" + json_three +"=" + json_two +'-' + json_one+"\n")
64
65
66 #check and process attributes only for CPU or VM
67 verbos = ['cpu_level','vm_level']
68 for vKey in verbos:
69 for (each_key, each_val) in config.items(vKey):
70 if ( each_val == 'numeric_delta'): #and each_key.isdigit()):
71 generated_script.write("\tif ('" + each_key + "' in the_json_one.keys()):\n")
72 json_one = "the_json_one['" +each_key+"']"
73 json_two = "the_json_two['" +each_key+"']"
74 json_three = "json_three['" +each_key+"']"
75 generated_script.write("\t\t" + json_three +"=" + json_two +'-' + json_one+"\n")
76
77 if (config.get('cprocessorstats','cCpu#TIME')):
78 generated_script.write("\tif ('cProcessorStats' in the_json_one.keys()):\n")
79 generated_script.write("\t\tfor (each_key) in the_json_two['cProcessorStats']:\n")
80 generated_script.write("\t\t\tif ('cCpu' in each_key and 'TIME' in each_key):\n")
81 generated_script.write("\t\t\t\tjson_three['cProcessorStats'][each_key] = the_json_two['cProcessorStats'][each_key] - the_json_one['cProcessorStats'][each_key]\n")
82 generated_script.write("\treturn json_three\n\n")
83
84 generated_script.write("delta_json_array=[]\n")
85 generated_script.write("count = 0\n")
86 generated_script.write("first = json_array[0]\n")
87
88 generated_script.write("for i in range(1, len(json_array)):\n")
89
90 generated_script.write("\tcount += (json_array[i][\"currentTime\"] - json_array[i-1][\"currentTime\"])\n")
91 generated_script.write("\tif count >= int(args.delta_interval_time):\n")
92 generated_script.write("\t\tdelta_json_array.append(file_subtraction(first, json_array[i]))\n")
93 generated_script.write("\t\tcount = 0\n")
94 generated_script.write("\t\tfirst = json_array[i]\n")
95
96
97 generated_script.write("\n")
98 generated_script.write("for i in range(len(delta_json_array)):\n")
99
100
101 generated_script.write("\twith open(delta_name_array[i], 'w') as fp:\n")
102 generated_script.write("\t\tjson.dump(delta_json_array[i], fp, sort_keys=True, indent=2)\n")
| 59 - warning: bad-indentation
60 - warning: bad-indentation
61 - warning: bad-indentation
62 - warning: bad-indentation
63 - warning: bad-indentation
69 - warning: bad-indentation
70 - warning: bad-indentation
71 - warning: bad-indentation
72 - warning: bad-indentation
73 - warning: bad-indentation
74 - warning: bad-indentation
75 - warning: bad-indentation
12 - warning: unspecified-encoding
12 - refactor: consider-using-with
4 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
10 - warning: unused-import
|
1 from simulator.event_queue import EventQueue
2 from simulator.resource import *
3 from simulator.dag import Dag
4 from simulator.system import System
5 from workloads.toy.linear_dag import linear_dag_clockwork_data, linear_instance_list, linear_instance_placements
6
7 class SimpleSystem(System):
8 pools: Dict[str, ResourcePool]
9
10 def __init__(self,_events: EventQueue, _pools: Dict[str, ResourcePool]):
11 super().__init__(_events)
12 self.pools = _pools
13 self.dag_maps = {}
14
15 def schedule(self, curr_time, events, *args, **kwargs):
16 # First check for any completed functions
17 for name, pool in self.pools.items():
18 for resource in pool.get_all_resources():
19 completed = resource.remove_at_time(curr_time)
20 for (fid, tag) in completed:
21 assert tag in self.outstanding_requests, "Tag needs to map to an outstanding request"
22 self.outstanding_requests[tag] = (True, self.outstanding_requests[tag][1])
23 # Now process any new events
24 for (dag, input) in events:
25 # for linear_instance in linear_instance_list:
26 # print(linear_instance.id_res_map)
27 # print(linear_instance.running_time)
28 # print(linear_instance.running_cost)
29 # for price_instance in linear_instance_placements.price_list:
30 # print(price_instance.running_cost)
31 # for time_instance in linear_instance_placements.time_list:
32 # print(time_instance.running_time)
33 # sample_placement = (linear_instance_placements.get_sample_list(10000, 10000))[0]
34 # self.dag_maps = sample_placement.id_res_map
35 print(linear_dag_clockwork_data)
36 if linear_dag_clockwork_data[1][0] < 20 and linear_dag_clockwork_data[1][1] < 85:
37 self.dag_maps[dag.name] = 'STD_GPU'
38 elif linear_dag_clockwork_data[0][0] < 20 and linear_dag_clockwork_data[0][1] < 85:
39 self.dag_maps[dag.name] = 'STD_CPU'
40 else:
41 continue
42 # print(dag_maps)
43 # for sample_instance in linear_instance_placements.get_sample_list(10000, 10000):
44 # print(sample_instance.running_time)
45 # print(sample_instance.running_cost)
46 # print("Done")
47 # print("Hello")
48 dag.execute() # Need to do this to seal the DAG
49 self.outstanding_requests[self.__generate_tag(dag, curr_time)] = (True, dag)
50 # Now schedule functions
51 for tag, (flag, dag) in self.outstanding_requests.copy().items():
52 if flag:
53 if dag.has_next_function():
54 # Find which resource is faster
55 nxt = dag.peek_next_function()
56 # std_cpu = nxt.resources['STD_CPU']
57 # std_gpu = nxt.resources['STD_GPU']
58 # cpu_time = std_cpu['pre'].get_runtime() + std_cpu['exec'].get_runtime() + std_cpu['post'].get_runtime()
59 # gpu_time = std_gpu['pre'].get_runtime() + std_gpu['exec'].get_runtime() + std_gpu['post'].get_runtime()
60 # if cpu_time < gpu_time:
61 # pool = self.pools['STD_CPU_POOL']
62 # else:
63 # pool = self.pools['STD_GPU_POOL']
64 # print(self.dag_maps)
65 # print(nxt.unique_id)
66 if self.dag_maps[dag.name] == 'STD_GPU':
67 pool = self.pools['STD_GPU_POOL']
68 # print("GPU")
69 else:
70 pool = self.pools['STD_CPU_POOL']
71 # print("CPU")
72 # If there is a resource available, schedule it
73 result : Optional[Tuple[str, Resource]] = pool.find_first_available_resource(nxt, tag)
74 if result:
75 (name, rsrc) = result
76 rsrc.add_function(dag.next_function(), tag, curr_time)
77 self.outstanding_requests[tag] = (False, self.outstanding_requests[tag][1])
78 else:
79 # Remove if there is no next function
80 self.outstanding_requests.pop(tag)
81
82 def __generate_tag(self, dag: Dag, time: int):
83 return f"{dag.name}:{time}:{id(dag)}"
84
85 def __decode_tag(self, tag: str) -> Dag:
86 return self.outstanding_requests[tag] | 8 - warning: bad-indentation
10 - warning: bad-indentation
11 - warning: bad-indentation
12 - warning: bad-indentation
13 - warning: bad-indentation
15 - warning: bad-indentation
17 - warning: bad-indentation
18 - warning: bad-indentation
19 - warning: bad-indentation
20 - warning: bad-indentation
21 - warning: bad-indentation
22 - warning: bad-indentation
24 - warning: bad-indentation
35 - warning: bad-indentation
36 - warning: bad-indentation
37 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
41 - warning: bad-indentation
48 - warning: bad-indentation
49 - warning: bad-indentation
51 - warning: bad-indentation
52 - warning: bad-indentation
53 - warning: bad-indentation
55 - warning: bad-indentation
66 - warning: bad-indentation
67 - warning: bad-indentation
69 - warning: bad-indentation
70 - warning: bad-indentation
73 - warning: bad-indentation
74 - warning: bad-indentation
75 - warning: bad-indentation
76 - warning: bad-indentation
77 - warning: bad-indentation
78 - warning: bad-indentation
80 - warning: bad-indentation
82 - warning: bad-indentation
83 - warning: bad-indentation
85 - warning: bad-indentation
86 - warning: bad-indentation
2 - warning: wildcard-import
8 - error: undefined-variable
8 - error: undefined-variable
10 - error: undefined-variable
10 - error: undefined-variable
15 - refactor: too-many-locals
24 - warning: redefined-builtin
73 - error: undefined-variable
73 - error: undefined-variable
73 - error: undefined-variable
15 - refactor: too-many-branches
15 - warning: unused-argument
15 - warning: unused-argument
17 - warning: unused-variable
20 - warning: unused-variable
24 - warning: unused-variable
85 - warning: unused-private-member
7 - refactor: too-few-public-methods
5 - warning: unused-import
5 - warning: unused-import
|
1 import json
2 import numpy
3 from azureml.core.model import Model
4 import joblib
5
6
7 def init():
8 global LGBM_MODEL
9 # Load the model from file into a global object
10 model_path = Model.get_model_path(
11 model_name="driver_model")
12 LGBM_MODEL = joblib.load(model_path)
13
14
15 def run(raw_data, request_headers):
16 data = json.loads(raw_data)["data"]
17 data = numpy.array(data)
18 result = LGBM_MODEL.predict(data)
19
20 # Demonstrate how we can log custom data into the Application Insights
21 # traces collection.
22 # The 'X-Ms-Request-id' value is generated internally and can be used to
23 # correlate a log entry with the Application Insights requests collection.
24 # The HTTP 'traceparent' header may be set by the caller to implement
25 # distributed tracing (per the W3C Trace Context proposed specification)
26 # and can be used to correlate the request to external systems.
27 print(('{{"RequestId":"{0}", '
28 '"TraceParent":"{1}", '
29 '"NumberOfPredictions":{2}}}'
30 ).format(
31 request_headers.get("X-Ms-Request-Id", ""),
32 request_headers.get("Traceparent", ""),
33 len(result)
34 ))
35
36 return {"result": result.tolist()}
37
38
39 if __name__ == "__main__":
40 # Test scoring
41 init()
42 TEST_ROW = '{"data":[[0,1,8,1,0,0,1,0,0,0,0,0,0,0,12,1,0,0,0.5,0.3,0.610327781,7,1,-1,0,-1,1,1,1,2,1,65,1,0.316227766,0.669556409,0.352136337,3.464101615,0.1,0.8,0.6,1,1,6,3,6,2,9,1,1,1,12,0,1,1,0,0,1],[4,2,5,1,0,0,0,0,1,0,0,0,0,0,5,1,0,0,0.9,0.5,0.771362431,4,1,-1,0,0,11,1,1,0,1,103,1,0.316227766,0.60632002,0.358329457,2.828427125,0.4,0.5,0.4,3,3,8,4,10,2,7,2,0,3,10,0,0,1,1,0,1]]}' # NOQA: E501
43 PREDICTION = run(TEST_ROW, {})
44 print("Test result: ", PREDICTION)
| 8 - warning: global-variable-undefined
|
1 class Node :
2 def __init__(self, data) :
3 self.data = data
4 self.next = None
5 self.prev = None
6
7 class doublelinkedlist(object) :
8 def __init__(self) :
9 self.head = None
10 self.tail = None
11
12 def tambahbelakang(self, data) :
13 if self.head is None :
14 new_node = Node(data)
15 new_node.prev = None
16 self.head = new_node
17 else :
18 new_node = Node(data)
19 current_node = self.head
20 while current_node.next is not None :
21 current_node = current_node.next
22 current_node.next = new_node
23 new_node.prev = current_node
24 new_node.next = None
25 self.tail = new_node
26
27 print("Data ditambahkan.")
28 print("")
29
30 def tambahdepan(self, data) :
31 if self.head is None :
32 new_node = Node(data)
33 new_node.prev = None
34 self.head = new_node
35 else :
36 new_node = Node(data)
37 self.head.prev = new_node
38 new_node.next = self.head
39 self.head = new_node
40 new_node.prev = None
41
42 print("Data ditambahkan.")
43 print("")
44
45 def tambahsetelah(self, key, data) :
46 current_node = self.head
47 while current_node is not None :
48 if current_node.next is None and current_node.data == key :
49 self.tambahbelakang(data)
50 return
51 elif current_node.data == key :
52 new_node = Node(data)
53 nxt = current_node.next
54 current_node.next = new_node
55 new_node.next = nxt
56 new_node.prev = current_node
57 nxt.prev = new_node
58 current_node = current_node.next
59
60 print("Data ditambahkan.")
61 print("")
62
63 def tambahsebelum(self, key, data) :
64 current_node = self.head
65 while current_node is not None :
66 if current_node.prev is None and current_node.data == key :
67 self.tambahdepan(data)
68 return
69 elif current_node.data == key :
70 new_node = Node(data)
71 prev = current_node.prev
72 prev.next = new_node
73 current_node.prev = new_node
74 new_node.next = current_node
75 new_node.prev = prev
76 current_node = current_node.next
77
78 print("Data ditambahkan.")
79 print("")
80
81 def hapusdepan(self) :
82 if self.head is None :
83 print ("Data masih kosong.")
84 else :
85 if self.head.next is not None :
86 self.head.next.prev = None
87 self.head = self.head.next
88
89 print("Data dihapus.")
90 print("")
91
92 def hapusbelakang(self) :
93 if self.tail is None :
94 print ("Data masih kosong.")
95 else :
96 if self.tail.prev is not None :
97 self.tail.prev.next = None
98 self.tail = self.tail.prev
99 return
100
101 print("Data dihapus.")
102 print("")
103
104 def hapustarget (self, data) :
105 if self.head is None :
106 print ("Data masih kosong.")
107 return
108 current_node = self.head
109 while current_node.data is not data and current_node.next is not None :
110 current_node = current_node.next
111 if current_node.data is not data :
112 print ("Data tidak ditemukan.")
113 return
114 if current_node.prev is not None :
115 current_node.prev.next = current_node.next
116 else :
117 self.head = current_node.next
118
119 if current_node.next is not None :
120 current_node.next.prev = current_node.prev
121 else :
122 self.tail = current_node.prev
123
124 print("Data dihapus.")
125 print("")
126
127 def tampil(self) :
128 print("Data : ")
129 print("")
130
131 current_node = self.head
132 while current_node is not None :
133 print (current_node.data, end=" -> ")
134 current_node = current_node.next
135
136 def tampilreverse(self) :
137 current_node = self.tail
138 while current_node is not None :
139 print (current_node.data, end=", ")
140 current_node = current_node.prev
141
142 def menuUmum(self):
143 pilih = "y"
144 while ((pilih == "y") or (pilih == "Y")):
145 # os.system('clear')
146 print('Pilih menu yang anda inginkan')
147 print('==============================')
148 print('1. Tambah data di belakang')
149 print('2. Tambah data di depan')
150 print('3. Tambah data setelah data')
151 print('4. Tambah data sebelum data')
152 print('5. Hapus data di depan')
153 print('6. Hapus data di belakang')
154 print('7. Hapus data pilihan')
155 print('8. Tampilkan data')
156 pilihan = str(input("Masukkan Menu yang anda pilih : "))
157 if(pilihan == "1"):
158 node = str(input("Masukkan data : "))
159 self.tambahbelakang(node)
160 elif(pilihan == "2"):
161 node = str(input("Masukkan data : "))
162 self.tambahdepan(node)
163 elif(pilihan == "3"):
164 node = str(input("Masukkan data : "))
165 node2 = str(input("Masukkan setelah : "))
166 self.tambahsetelah(node2, node)
167 elif(pilihan == "4"):
168 node = str(input("Masukkan data : "))
169 node2 = str(input("Masukkan sebelum : "))
170 self.tambahsebelum(node2, node)
171 elif(pilihan == "5"):
172 self.hapusdepan()
173 elif(pilihan == "6"):
174 self.hapusbelakang()
175 elif(pilihan == "7"):
176 node = str(input("Masukkan data yang ingin dihapus : "))
177 self.hapustarget(node)
178 elif(pilihan == "8"):
179 self.tampil()
180 x = input("")
181 else :
182 pilih ="n"
183
184 if __name__ == "__main__" :
185 d = doublelinkedlist()
186 d.menuUmum()
| 1 - refactor: too-few-public-methods
7 - refactor: useless-object-inheritance
48 - refactor: no-else-return
66 - refactor: no-else-return
144 - refactor: consider-using-in
180 - warning: unused-variable
|
1 # A module is basically a file containing a set of functions to include in your application. There are core python modules, modules you can install using the pip package manager (including Django) as well as custom modules
2 import datetime
3 import time
4 import camelcase
5
6 import validator
7
8 today = datetime.date.today()
9 print(today)
10 print(time.time())
11
12 camel = camelcase.CamelCase()
13 print(camel.hump("camelCASE"))
14
15 email = "testtest.com"
16 if validator.validate_email(email):
17 print("email is good")
18 else:
19 print("emal is fucked up") | Clean Code: No Issues Detected
|
1 import time
2 import torch
3 import numpy as np
4 import matplotlib.pyplot as plt
5 import torch.optim as optim
6 import torch.nn as nn
7 from collections import OrderedDict
8 from PIL import Image
9 import seaborn as sns
10 import numpy as np
11 import pandas as pd
12 import json
13
14
15 # %%
16 import torch.nn as nn
17
18 class SentimentRNN(nn.Module):
19
20 def __init__(self, vocab_size, output_size, embedding_dim, hidden_dim, n_layers, drop_prob=0.5):
21 super(SentimentRNN, self).__init__()
22
23 self.output_size = output_size
24 self.n_layers = n_layers
25 self.hidden_dim = hidden_dim
26
27 # embedding and LSTM layers
28 self.embedding = nn.Embedding(vocab_size, embedding_dim)
29 self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers,
30 dropout=drop_prob, batch_first=True)
31
32 # dropout layer
33 self.dropout = nn.Dropout(0.3)
34
35 # linear and sigmoid layers
36 self.fc = nn.Linear(hidden_dim, output_size)
37 self.sig = nn.Sigmoid()
38
39
40 def forward(self, x, hidden):
41 """
42 Perform a forward pass of our model on some input and hidden state.
43 """
44 batch_size = x.size(0)
45 # embeddings and lstm_out
46 x = x.long()
47 embeds = self.embedding(x)
48 lstm_out, hidden = self.lstm(embeds, hidden)
49
50 # stack up lstm outputs
51 lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)
52
53 # dropout and fully-connected layer
54 out = self.dropout(lstm_out)
55 out = self.fc(out)
56 # sigmoid function
57 sig_out = self.sig(out)
58
59 # reshape to be batch_size first
60 sig_out = sig_out.view(batch_size, -1)
61 sig_out = sig_out[:, -1] # get last batch of labels
62
63 # return last sigmoid output and hidden state
64 return sig_out, hidden
65
66
67 def init_hidden(self, batch_size):
68 ''' Initializes hidden state '''
69 # Create two new tensors with sizes n_layers x batch_size x hidden_dim,
70 # initialized to zero, for hidden state and cell state of LSTM
71 weight = next(self.parameters()).data
72 hidden = (weight.new(self.n_layers, batch_size, self.hidden_dim).zero_(),
73 weight.new(self.n_layers, batch_size, self.hidden_dim).zero_())
74
75 return hidden
76
77
78
79 # %%
80 checkpoint = torch.load('model_devfest_2019.json', map_location=lambda storage, loc: storage)
81 vocab_to_int = json.load( open( "vocab_to_int.json" ) )
82
83
84 # %%
85 net = SentimentRNN(7366, 1, 800, 300, 2)
86 net.load_state_dict(checkpoint)
87 net.eval()
88
89
90 # %%
91 from string import punctuation
92
93 def pad_features(reviews_ints, seq_length):
94 features = np.zeros((len(reviews_ints), seq_length), dtype=int)
95
96 for i, row in enumerate(reviews_ints):
97 features[i, -len(row):] = np.array(row)[:seq_length]
98
99 return features
100
101 def tokenize_review(test_review):
102 test_review = test_review.lower() # lowercase
103 # get rid of punctuation
104 test_text = ''.join([c for c in test_review if c not in punctuation])
105
106 # splitting by spaces
107 test_words = test_text.split()
108
109 # tokens
110 test_ints = []
111 test_ints.append([vocab_to_int[word] for word in test_words])
112 return test_ints
113
114 def predict(net, test_review, sequence_length=200):
115 net.eval()
116 test_ints = tokenize_review(test_review)
117 seq_length=sequence_length
118 features = pad_features(test_ints, seq_length)
119 feature_tensor = torch.from_numpy(features)
120 batch_size = feature_tensor.size(0)
121 h = net.init_hidden(batch_size)
122 output, h = net(feature_tensor, h)
123 pred = torch.round(output.squeeze())
124 if(pred.item()==1):
125 return {"no hate detected!",output.squeeze().item()}
126 else:
127 return {"Hate speech detected.",output.squeeze().item()}
128
129 def getOutput(model,speech,seq_length):
130 test_ints = tokenize_review(speech)
131 features = pad_features(test_ints, seq_length)
132 feature_tensor = torch.from_numpy(features)
133 return predict(model,speech,seq_length)
134
135
136 # %%
137 speech = "please kill your self"
138 cls, probToNoHate =getOutput(net,speech,200)
139 print(cls)
140 print(probToNoHate)
141
| 5 - refactor: consider-using-from-import
6 - refactor: consider-using-from-import
10 - warning: reimported
16 - warning: reimported
16 - refactor: consider-using-from-import
18 - refactor: too-many-instance-attributes
20 - refactor: too-many-arguments
20 - refactor: too-many-positional-arguments
21 - refactor: super-with-arguments
81 - refactor: consider-using-with
81 - warning: unspecified-encoding
114 - warning: redefined-outer-name
124 - refactor: no-else-return
129 - warning: redefined-outer-name
132 - warning: unused-variable
1 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
7 - warning: unused-import
8 - warning: unused-import
9 - warning: unused-import
11 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Mon May 28 10:59:55 2018
4
5 @author: j.dixit
6 """
7
8 import numpy as np
9 import matplotlib.pyplot as plt
10
11 N = 100
12 D = 2
13
14 X = np.random.randn(N, D)
15 X[:50, :] = X[:50, :] - 2*np.ones((50, D)) #centered at -2
16 X[50:, :] = X[50:, :] + 2*np.ones((50, D)) #centered at +2
17
18 T = np.array([0]*50 + [1]*50) #setting forst 50 elements of array to 0 and next 50 to 1
19
20 ones = np.array([[1]*N]).T
21 Xb = np.concatenate((ones, X), axis = 1)
22
23 w = np.random.randn(D + 1)
24 Z = Xb.dot(w)
25
26 def sigmoid(a):
27 return 1/(1 + np.exp(-a))
28 #def forward(X, w, b):
29 # return sigmoid(X.dot(w) + b)
30 Y = sigmoid(Z)
31
32 def crossEntropyErrorFunction(T, Y):
33 E = 0
34 for i in range(N):
35 if T[i] == 1:
36 E -= np.log(Y[i])
37 else:
38 E -= np.log(1 - Y[i])
39 return E
40
41 crossEntropyError = crossEntropyErrorFunction(T, Y)
42 print("With random/normally distributed weights: ",crossEntropyError)
43
44 learning_rate = 0.1
45 L2 = 0.1
46
47 for i in range(100):
48 if i % 10 == 0:
49 print(crossEntropyErrorFunction(T, Y))
50
51 w += learning_rate*(np.dot((T-Y).T, Xb) - L2*w)
52 Y = sigmoid(Xb.dot(w))
53
54 print("Final w: ", w)
55
| 32 - warning: redefined-outer-name
32 - warning: redefined-outer-name
34 - warning: redefined-outer-name
9 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Tue May 29 21:54:38 2018
4
5 @author: jyoti
6 """
7 from __future__ import print_function, division
8 from builtins import range
9
10 import numpy as np # importing numpy with alias np
11 import matplotlib.pyplot as plt # importing matplotlib.pyplot with alias plt
12
13 No_of_observations = 50
14 No_of_Dimensions = 50
15
16 X_input = (np.random.random((No_of_observations, No_of_Dimensions))-0.5)*10 #Generating 50x50 matrix forX with random values centered round 0.5
17 w_dash = np.array([1, 0.5, -0.5] + [0]*(No_of_Dimensions-3)) # Making first 3 features significant by setting w for them as non-zero and others zero
18 Y_output = X_input.dot(w_dash) + np.random.randn(No_of_observations)*0.5 #Setting Y = X.w + some random noise
19
20 costs = [] #Setting empty list for costs
21 w = np.random.randn(No_of_Dimensions)/np.sqrt(No_of_Dimensions) #Setting w to random values
22 L1_coeff = 5
23 learning_rate = 0.001
24
25 for t in range(500):
26 Yhat = X_input.dot(w)
27 delta = Yhat - Y_output #the error between predicted output and actual output
28 w = w - learning_rate*(X_input.T.dot(delta) + L1_coeff*np.sign(w)) #performing gradient descent for w
29 meanSquareError = delta.dot(delta)/No_of_observations #Finding mean square error
30 costs.append(meanSquareError) #Appending mse for each iteration in costs list
31
32 plt.plot(costs)
33 plt.title("Plot of costs of L1 Regularization")
34 plt.ylabel("Costs")
35 plt.show()
36
37 print("final w:", w) #The final w output. As you can see, first 3 w's are significant , the rest are very small
38
39 # plot our w vs true w
40 plt.plot(w_dash, label='true w')
41 plt.plot(w, label='w_map')
42 plt.legend()
43 plt.show() | Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Sun May 27 15:21:54 2018
4
5 @author: jyoti
6 """
7
8 # -*- coding: utf-8 -*-
9 """
10 Created on Sat May 26 19:13:44 2018
11
12 @author: jyoti
13 """
14
15 import numpy as np
16 import pandas as pd
17 import matplotlib.pyplot as plt
18 from sklearn.utils import shuffle
19
20
21 def get_data():
22 df = pd.read_csv("ecommerce_data.csv")
23 data = df.as_matrix()
24 X = data[:, :-1]
25 Y = data[:, -1]
26 X = np.array(X)
27 Y = np.array(Y)
28 X[:, 1] = (X[:, 1]-X[:, 1].mean())/X[:, 1].std()
29 X[:, 2] = (X[:, 2]-X[:, 2].mean())/X[:, 2].std()
30 N, D = X.shape
31
32 X2 = np.zeros((N, D+3))
33 X2[:, 0: D-2] = X[:, 0: D-2]
34
35 for n in range(N):
36 t = int(X[n, D-1])
37 X2[n, t+(D-1)] = 1
38
39 Z = np.zeros((N, 4))
40 Z[np.arange(N), X[:, D-1].astype(np.int32)] = 1
41 #X2[:, -4:] = Z
42 assert(np.abs(X2[:, -4:]- Z).sum() < 10e-10)
43 return X2, Y
44
45 def get_binary_data():
46 X, Y = get_data()
47 X2 = X[Y <= 1]
48 Y2 = Y[Y <= 1]
49 return X2, Y2
50
51 X, Y = get_binary_data()
52 X, Y = shuffle(X, Y)
53 X_train = X[:-100]
54 Y_train = Y[:-100]
55 X_test = X[-100:]
56 Y_test = Y[-100:]
57
58 D = X.shape[1]
59 N = X.shape[0]
60 w = np.random.randn(D)
61 b = 0
62
63 def sigmoid(a):
64 return 1/(1 + np.exp(-a))
65
66
67 def forward(x, w, b):
68 return sigmoid(x.dot(w) + b)
69
70 def classification_rate(Y, P):
71 return np.mean(Y == P)
72
73 def crossEntropyErrorFunction(T, Y):
74 return -np.mean(T*np.log(Y) + (1 - T)*np.log(1 - Y))
75
76 train_costs = []
77 test_costs = []
78 learning_rate = 0.001
79
80 for i in range(10000):
81 pY_train = forward(X_train, w, b)
82 pY_test = forward(X_test, w, b)
83
84 ctrain = crossEntropyErrorFunction(Y_train, pY_train)
85 ctest = crossEntropyErrorFunction(Y_test, pY_test)
86 train_costs.append(ctrain)
87 test_costs.append(ctest)
88
89 w -= learning_rate*X_train.T.dot(pY_train - Y_train)
90 b -= learning_rate*(pY_train - Y_train).sum()
91 if i % 1000 == 0:
92 print(i, ctrain, ctest)
93
94 print("Final training classification rate: ", classification_rate(Y_train, np.round(pY_train)))
95 print("Final test classification rate: ", classification_rate(Y_test, np.round(pY_test)))
96
97 legend1, = plt.plot(train_costs, label="train cost")
98 legend2, = plt.plot(test_costs, label="test cost")
99
100 plt.legend([legend1, legend2])
101 plt.show() | 9 - warning: pointless-string-statement
24 - warning: redefined-outer-name
25 - warning: redefined-outer-name
30 - warning: redefined-outer-name
30 - warning: redefined-outer-name
46 - warning: redefined-outer-name
46 - warning: redefined-outer-name
67 - warning: redefined-outer-name
67 - warning: redefined-outer-name
70 - warning: redefined-outer-name
73 - warning: redefined-outer-name
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Sun May 27 13:33:29 2018
4
5 @author: jyoti
6 """
7
8 import numpy as np
9 import matplotlib.pyplot as plt
10
11 N = 100
12 D = 2
13
14 X = np.random.randn(N, D)
15 X[:50, :] = X[:50, :] - 2*np.ones((50, D)) #centered at -2
16 X[50:, :] = X[50:, :] + 2*np.ones((50, D)) #centered at +2
17
18 T = np.array([0]*50 + [1]*50) #setting forst 50 elements of array to 0 and next 50 to 1
19
20 ones = np.array([[1]*N]).T
21 Xb = np.concatenate((ones, X), axis = 1)
22
23 w = np.random.randn(D + 1)
24 Z = Xb.dot(w)
25
26 def sigmoid(a):
27 return 1/(1 + np.exp(-a))
28
29 #def forward(X, w, b):
30 # return sigmoid(X.dot(w) + b)
31 Y = sigmoid(Z)
32
33 def crossEntropyErrorFunction(T, Y):
34 E = 0
35 for i in range(N):
36 if T[i] == 1:
37 E -= np.log(Y[i])
38 else:
39 E -= np.log(1 - Y[i])
40 return E
41
42 crossEntropyError = crossEntropyErrorFunction(T, Y)
43 print("With random/normally distributed weights: ",crossEntropyError)
44
45 w = np.array([0, 4, 4])
46 Z = Xb.dot(w)
47
48 Y = sigmoid(Z)
49
50 crossEntropyError = crossEntropyErrorFunction(T, Y)
51 print("With calculated weights/closed form solution: ",crossEntropyError)
52
53 plt.scatter(X[:, 0], X[:, 1], c = T, s = 100, alpha = 0.5)
54 plt.title("Two Gaussian clouds and the discriminating line")
55 x_axis = np.linspace(-6, 6, 100)
56 y_axis = -x_axis
57 plt.plot(x_axis, y_axis)
58 plt.show() | 33 - warning: redefined-outer-name
33 - warning: redefined-outer-name
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Sun May 27 15:06:16 2018
4
5 @author: jyoti
6 """
7
8 import numpy as np
9 import matplotlib.pyplot as plt
10
11 N = 100
12 D = 2
13
14 X = np.random.randn(N, D)
15 X[:50, :] = X[:50, :] - 2*np.ones((50, D)) #centered at -2
16 X[50:, :] = X[50:, :] + 2*np.ones((50, D)) #centered at +2
17
18 T = np.array([0]*50 + [1]*50) #setting forst 50 elements of array to 0 and next 50 to 1
19
20 ones = np.array([[1]*N]).T
21 Xb = np.concatenate((ones, X), axis = 1)
22 w = np.random.randn(D + 1)
23
24
25 def sigmoid(a):
26 return 1/(1 + np.exp(-a))
27
28 Y = sigmoid(Xb.dot(w))
29
30 def crossEntropyErrorFunction(T, Y):
31 E = 0
32 for i in range(N):
33 if T[i] == 1:
34 E -= np.log(Y[i])
35 else:
36 E -= np.log(1 - Y[i])
37 return E
38
39 learning_rate = 0.1
40 for i in range(100):
41 if i % 10 == 0:
42 print(crossEntropyErrorFunction(T, Y))
43
44 w += learning_rate*Xb.T.dot(T - Y)
45 Y = sigmoid(Xb.dot(w))
46
47 print("Final weight, w: ", w) | 30 - warning: redefined-outer-name
30 - warning: redefined-outer-name
32 - warning: redefined-outer-name
9 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Tue May 29 22:07:08 2018
4
5 @author: jyoti
6 """
7
8 import numpy as np #importing the numpy package with alias np
9 import matplotlib.pyplot as plt #importing the matplotlib.pyplot as plt
10
11 N = 50
12 D = 50
13
14 X = (np.random.random((N, D))-0.5)*10
15 w_dash = np.array([1, 0.5, -0.5] + [0]*(D-3))
16 Y = X.dot(w_dash) + np.random.randn(N)*0.5
17
18 Y[-1]+=30 #setting last element of Y as Y + 30
19 Y[-2]+=30 #setting second last element of Y as Y + 30
20
21 plt.scatter(X, Y)
22 plt.title('Relationship between Y and X[:, 1]')
23 plt.xlabel('X[:, 1]')
24 plt.ylabel('Y')
25 plt.show()
26
27 X = np.vstack([np.ones(N), X]).T #appending bias data points colummn to X
28
29 w_ml = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, Y)) #finding weights for maximum likelihood estimation
30 Y_ml = np.dot(X, w_ml)
31
32 plt.scatter(X[:,1], Y)
33 plt.plot(X[:,1],Y_ml, color='red')
34 plt.title('Graph of maximum likelihood method(Red line: predictions)')
35 plt.xlabel('X[:, 1]')
36 plt.ylabel('Y')
37 plt.show()
38
39 costs = []
40 w = np.random.randn(D)/np.sqrt(D)
41 L1_coeff = 5
42 learning_rate = 0.001
43 for t in range(500):
44 Yhat = X.dot(w)
45 delta = Yhat - Y
46 w = w - learning_rate*(X.T.dot(delta) + L1_coeff*np.sign(w))
47 meanSquareError = delta.dot(delta)/N
48 costs.append(meanSquareError)
49
50 w_map = w
51 Y_map = X.dot(w_map)
52
53 plt.scatter(X[:,1], Y)
54 plt.plot(X[:,1],Y_ml, color='red',label="maximum likelihood")
55 plt.plot(X[:,1],Y_map, color='green', label="map")
56 plt.title('Graph of MAP v/s ML method')
57 plt.legend()
58 plt.xlabel('X[:, 1]')
59 plt.ylabel('Y')
60 plt.show() | Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Mon May 28 16:22:16 2018
4
5 @author: j.dixit
6 """
7
8 import numpy as np
9 import matplotlib.pyplot as plt
10
11 N = 4
12 D = 2
13
14 X = np.array([
15 [0, 0],
16 [0, 1],
17 [1, 0],
18 [1, 1]
19 ])
20
21 T = np.array([0, 1, 1, 0])
22
23 ones = np.array([[1]*N]).T
24 #plt.scatter(X[:, 0], X[:, 1], c=T)
25 #plt.show()
26
27 xy = np.matrix(X[:, 0]*X[:, 1]).T
28 Xb = np.array(np.concatenate((ones, xy, X), axis = 1))
29
30 w = np.random.rand(D + 2)
31
32 z = Xb.dot(w)
33
34 def sigmoid(z):
35 return 1/(1 + np.exp(-z))
36
37 Y = sigmoid(z)
38
39 def cross_entropy(T, Y):
40 E = 0
41 for i in range(N):
42 if T[i] == 1:
43 E -= np.log(Y[i])
44 else:
45 E -= np.log(1-np.log(Y[i]))
46 return E
47
48 learning_rate = 0.0001
49 error = []
50
51 for i in range(5000):
52 e = cross_entropy(T, Y)
53 error.append(e)
54 if i % 100 == 0:
55 print(e)
56
57 w += learning_rate*(np.dot((T-Y).T, Xb) - 0.01*w)
58
59 Y = sigmoid(Xb.dot(w))
60
61 plt.plot(error)
62 plt.title("Cross-entropy")
63 print("Final w: ", w)
64 print("Final classification rate", 1-np.abs(T-np.round(Y)).sum()/N)
| 34 - warning: redefined-outer-name
39 - warning: redefined-outer-name
39 - warning: redefined-outer-name
41 - warning: redefined-outer-name
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Sun Jun 10 17:55:24 2018
4
5 @author: jyoti
6 """
7 from __future__ import division, print_function
8 from builtins import range
9
10 import numpy as np
11 import matplotlib.pyplot as plt
12
13 class LinearRegression(object):
14 def __init__(self):
15 pass
16
17 def fit(self, X, Y, eta=10, epochs=2000):
18 N, D = X.shape
19 self.w = np.random.randn(D)
20 #self.b = 0
21
22
23 for i in range(epochs):
24 Yhat = self.predict(X)
25 delta = Yhat - Y #the error between predicted output and actual output
26 self.w = self.w - eta*(X.T.dot(delta)) #performing gradient descent for w
27
28 print("Final weights are ", self.w)
29 #print("Final bias point is ", self.b)
30 print("Final cost is ", self.costs)
31
32
33
34 def predict(self, X):
35 Y_cap = X.dot(self.w)
36 return Y_cap
37
38 def costs(self, X, Y):
39 Yhat = self.predict(X)
40 cost = (Yhat-Y).dot(Yhat-Y)
41 return cost
42
43 def main():
44 X = []
45 Y = []
46
47 for line in open("data_2d.csv"):
48 x1, x2, y = line.split(",")
49 X.append([float(x1), float(x2)])
50 Y.append(float(y))
51 X = np.array(X)
52 Y = np.array(Y)
53
54
55 model = LinearRegression()
56 model.fit(X, Y)
57 #prediction = model.predict()
58
59
60
61
62 if __name__ == '__main__':
63 main()
64
65
66 | 13 - refactor: useless-object-inheritance
18 - warning: unused-variable
23 - warning: unused-variable
19 - warning: attribute-defined-outside-init
26 - warning: attribute-defined-outside-init
47 - refactor: consider-using-with
47 - warning: unspecified-encoding
11 - warning: unused-import
|
1
2 # -*- coding: utf-8 -*-
3 """
4 Created on Sat Jun 9 13:01:51 2018
5
6 @author: jyoti
7 """
8
9 import numpy as np
10 import matplotlib.pyplot as plt
11
12 from util import getData
13
14 labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Sad', 'Surprise', 'Neutral']
15
16 def main():
17 X, Y = getData(balance_ones = False)
18
19 while(True):
20 for i in range(7):
21 x, y = X[Y == i], Y[Y == i]
22 N = len(y)
23 j = np.random.choice(N)
24 plt.imshow(x[j].reshape(48, 48), cmap = 'gray')
25 plt.title(labels[y[j]])
26 plt.show()
27 prompt = input("Quit the program? Y/N\n")
28 if prompt == 'Y':
29 break
30
31 if __name__ == '__main__':
32 main()
33
34
35 | Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Sat May 26 19:13:44 2018
4
5 @author: jyoti
6 """
7
8 import numpy as np
9 import pandas as pd
10 import matplotlib.pyplot as plt
11 from sklearn.utils import shuffle
12
13
14 def get_data():
15 df = pd.read_csv("ecommerce_data.csv")
16 data = df.as_matrix()
17 X = data[:, :-1]
18 Y = data[:, -1]
19 X = np.array(X)
20 Y = np.array(Y)
21 X[:, 1] = (X[:, 1]-X[:, 1].mean())/X[:, 1].std()
22 X[:, 2] = (X[:, 2]-X[:, 2].mean())/X[:, 2].std()
23 N, D = X.shape
24
25 X2 = np.zeros((N, D+3))
26 X2[:, 0: D-2] = X[:, 0: D-2]
27
28 for n in range(N):
29 t = int(X[n, D-1])
30 X2[n, t+(D-1)] = 1
31
32 Z = np.zeros((N, 4))
33 Z[np.arange(N), X[:, D-1].astype(np.int32)] = 1
34 #X2[:, -4:] = Z
35 assert(np.abs(X2[:, -4:]- Z).sum() < 10e-10)
36 return X2, Y
37
38 def get_binary_data():
39 X, Y = get_data()
40 X2 = X[Y <= 1]
41 Y2 = Y[Y <= 1]
42 return X2, Y2
43
44 X, Y = get_binary_data()
45 D = X.shape[1]
46 W = np.random.randn(D)
47 b = 0
48
49 def sigmoid(a):
50 return 1/(1 + np.exp(-a))
51
52 def forward(x, w, b):
53 return sigmoid(x.dot(w) + b)
54
55 P_Y_Given_X = forward(X, W, b)
56 predictions = np.round(P_Y_Given_X)
57
58 def classification_rate(Y, P):
59 return np.mean(Y == P)
60
61 print("Score: ", classification_rate(Y, predictions))
62
| 17 - warning: redefined-outer-name
18 - warning: redefined-outer-name
23 - warning: redefined-outer-name
39 - warning: redefined-outer-name
39 - warning: redefined-outer-name
52 - warning: redefined-outer-name
58 - warning: redefined-outer-name
10 - warning: unused-import
11 - warning: unused-import
|
1 def output_lable(n):
2 if n == 1:
3 return "Offensive "
4 elif n ==0:
5 return "Not Offensive "
6
7 def manual_testing(news):
8 testing_news = {"text":[news]}
9 new_def_test = pd.DataFrame(testing_news)
10 new_def_test["text"] = new_def_test["text"]
11 new_x_test = new_def_test["text"]
12 new_xv_test = tfidf_vect.transform(new_x_test)
13 pred_sgdc = model.predict(new_xv_test)
14 return pred_sgdc
15
16 words=news.split()
17 words2 =[]
18 for x in words:
19 res=manual_testing(x)
20 if res == 1:
21 words2.append('****')
22 else:
23 words2.append(x)
24
25 s=' '.join(words2)
26 return s | 2 - warning: bad-indentation
3 - warning: bad-indentation
4 - warning: bad-indentation
5 - warning: bad-indentation
8 - warning: bad-indentation
9 - warning: bad-indentation
10 - warning: bad-indentation
11 - warning: bad-indentation
12 - warning: bad-indentation
13 - warning: bad-indentation
14 - warning: bad-indentation
19 - warning: bad-indentation
20 - warning: bad-indentation
21 - warning: bad-indentation
22 - warning: bad-indentation
23 - warning: bad-indentation
2 - refactor: no-else-return
1 - refactor: inconsistent-return-statements
9 - error: undefined-variable
12 - error: undefined-variable
13 - error: undefined-variable
16 - error: undefined-variable
26 - error: return-outside-function
|
1 from getpass import getpass
2 import os
3 import sqlite3
4 from werkzeug.security import generate_password_hash
5 from flask import g
6 import traceback
7 import logging
8
9 path = os.getcwd()
10 DATABASE = os.path.join(path, 'ansible.db')
11
12 def init_db():
13 with app.app_context():
14 db = sqlite3.connect(DATABASE)
15 with app.open_resource('schema.sql', mode='r') as f:
16 db.cursor().executescript(f.read())
17 db.commit()
18
19 def get_db(app):
20 with app.app_context():
21 if 'db' not in g:
22 g.db = sqlite3.connect(
23 DATABASE,
24 detect_types=sqlite3.PARSE_DECLTYPES
25 )
26 g.db.row_factory = sqlite3.Row
27
28 return g.db
29
30 def insert(table,columnTuple,valueTuple):
31 try:
32 dbConnection = sqlite3.connect(DATABASE)
33 columnTupleString = ', '.join(columnTuple)
34 dbConnection.execute(
35 'INSERT INTO ' + table + ' (' + columnTupleString + ') VALUES (?, ?)',
36 (valueTuple)
37 )
38 dbConnection.commit()
39 except Exception as e:
40 logging.error(traceback.format_exc())
41
42 def select_one(table, return_columns, query_column, value):
43 try:
44 dbConnection = sqlite3.connect(DATABASE)
45 result = (dbConnection.execute(
46 'SELECT ' + ', '.join(return_columns) + ' FROM ' + table + ' WHERE ' + query_column + '= (?) Limit 1',
47 (value,)
48 ).fetchone())
49 return result
50 except Exception as e:
51 logging.error(traceback.format_exc())
52 print("User existence check failed")
53
54 def exists(table,column,value):
55 try:
56 dbConnection = sqlite3.connect(DATABASE)
57 result = dbConnection.execute(
58 'SELECT CASE WHEN EXISTS( SELECT 1 FROM ' + table + ' WHERE ' + column + '= (?)) THEN 1 ELSE 0 END',
59 (value,)
60 ).fetchone()
61 if result[0] == 1:
62 return True
63 else:
64 return False
65 except Exception as e:
66 logging.error(traceback.format_exc())
67
68
69 def update(table, update_dict, query_column, query_value):
70 try:
71 dbConnection = sqlite3.connect(DATABASE)
72 result = (dbConnection.execute(
73 'UPDATE ' + table + ' SET ' + build_set_statement(update_dict) + ' WHERE ' + query_column + '= (?)',
74 (query_value,)
75 ).fetchone())
76 dbConnection.commit()
77 return result
78 except Exception as e:
79 logging.error(traceback.format_exc())
80
81 def build_set_statement(updated_field_dict):
82 setItems = []
83 for field in updated_field_dict:
84 setItems.append(field + ' = \'' + updated_field_dict[field] + '\'')
85 setFields = ', '.join(setItems)
86 return setFields
87
| 13 - error: undefined-variable
15 - error: undefined-variable
39 - warning: broad-exception-caught
39 - warning: unused-variable
50 - warning: broad-exception-caught
42 - refactor: inconsistent-return-statements
50 - warning: unused-variable
65 - warning: broad-exception-caught
61 - refactor: simplifiable-if-statement
61 - refactor: no-else-return
54 - refactor: inconsistent-return-statements
65 - warning: unused-variable
78 - warning: broad-exception-caught
69 - refactor: inconsistent-return-statements
78 - warning: unused-variable
1 - warning: unused-import
4 - warning: unused-import
|
1 from cryptography.fernet import Fernet
2 import datetime
3 from flask import (flash, Flask, g, Markup, redirect, render_template, request,
4 send_from_directory, session, url_for)
5 import functools
6 import logging
7 import os
8 from secrets import token_urlsafe
9 import sqlite3
10 import sys
11 from werkzeug.utils import secure_filename
12 from werkzeug.security import check_password_hash, generate_password_hash
13 from build_dir import build_dir
14 import sanitize_path
15 from db import get_db
16 from user import create_user, user_exists, gen_default_user, get_user, update_user
17 import html
18
19
20 logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
21
22 app = Flask(__name__)
23 app.config["SECRET_KEY"] = os.urandom(256) # TODO: change to environemnt variable
24 app.config["CRYPTO_KEY"] = Fernet.generate_key() # TODO put this somewhere where it wont update often possibly environmnet analize impact of changing.
25
26 path = os.getcwd()
27 database = os.path.join(path, 'ansible.db')
28
29 db = get_db(app)
30
31 def login_required(view):
32 @functools.wraps(view)
33 def wrapped_view(**kwargs):
34 if 'authenticated' not in session:
35 return redirect(url_for('login'))
36 return view(**kwargs)
37 return wrapped_view
38
39 @app.route('/', defaults={'loc': ""}, methods=('GET',))
40 @app.route('/<path:loc>', methods=('GET',))
41 @login_required
42 def ansible(loc):
43 logging.debug('made it here')
44 sanitize_path.sanitize(loc)
45
46 # TODO: if loc is empty return the home directory for the node
47 # possible security concern - could ask for a higher level node
48 # TODO: for future addition of link sending - store encrypted version
49 # of top level directory in session can possibly use a werkzeug module
50 # TODO: check if input is an encrypted link (use a /share/ or something to indicate)
51 # TODO: process encrypted link
52 # TODO: process a normal link
53 # TODO: get the the home directory
54
55 # TODO: authenticate the requested directory
56
57 logging.debug(loc)
58
59 currentDir = os.path.join('cloud-drive', loc) #update to be maliable for sharing
60
61 currentPath = os.path.join(path, currentDir)
62
63 logging.debug(os.path.splitext(currentPath)[1])
64 logging.debug(currentDir)
65 logging.debug(path)
66 logging.debug(currentPath)
67 logging.debug(loc)
68
69 fileExtension = os.path.splitext(currentPath)[1]
70 if fileExtension:
71 splitUrl = currentPath.rsplit('/', 1)
72 localDir = splitUrl[0]
73 filename = splitUrl[1]
74 absPath = os.path.join(path, 'cloud-drive', localDir)
75 return send_from_directory(directory=absPath, filename=filename)
76
77 directoryDict = build_dir(currentPath)
78
79 return render_template('index-alt.html', directory=directoryDict, curDir=loc)
80
81 @app.route("/login", methods=('GET', 'POST'))
82 def login():
83 if request.method == 'POST':
84 username = request.form['username']
85 password = request.form['password']
86 error = None
87
88 user = get_user(username)
89 if user is not None:
90 user_password = user[1]
91 if not check_password_hash(user_password, password):
92 error = 'Incorrect password, please try again.'
93 else:
94 error = 'User not found'
95
96 if error is None:
97 session.clear()
98 session['authenticated'] = 'true'
99 session['user_id'] = token_urlsafe()
100 return redirect(url_for('ansible'))
101
102 flash(error)
103
104 return render_template('login.html')
105
106 @app.route("/signup", methods=('GET','POST'))
107 def signup():
108 if request.method == 'POST':
109 username = request.form['name']
110 password = request.form['password']
111 error = None
112
113 if not user_exists(username):
114 create_user(username,password)
115 else:
116 error = 'Username already exists.'
117
118 if error is None:
119 return redirect(url_for('login'))
120
121 flash(error)
122
123
124 return render_template('signup.html')
125
126 @app.route("/updatepassword", methods=('GET','POST'))
127 def update_password():
128 if request.method == 'POST':
129
130 username = request.form['username']
131 prev_password = request.form['password']
132 new_password = request.form['new_password']
133 verified_new_password = request.form['verify_new_password']
134
135 error = None
136 if(new_password == verified_new_password):
137 if user_exists(username):
138 update_user(username,prev_password,new_password)
139 else:
140 error = 'User doesnt exist.'
141 else:
142 error = 'Passwords do not match'
143
144 if error is None:
145 return redirect(url_for('login'))
146
147 flash(error)
148
149
150 return render_template('update-password.html')
151
152 @app.route("/logout", methods=('GET',))
153 def logout():
154 del session['authenticated']
155 return redirect(url_for('login'))
156
157
158
| 23 - warning: fixme
24 - warning: fixme
46 - warning: fixme
48 - warning: fixme
50 - warning: fixme
51 - warning: fixme
52 - warning: fixme
53 - warning: fixme
55 - warning: fixme
2 - warning: unused-import
3 - warning: unused-import
3 - warning: unused-import
9 - warning: unused-import
11 - warning: unused-import
12 - warning: unused-import
16 - warning: unused-import
17 - warning: unused-import
|
1 from bs4 import BeautifulSoup
2 import getpass
3 import requests
4 import os
5
6 def pTest(attack_string, attack_url, password):
7 payload = {'password': password}
8 with requests.Session() as s:
9 p = s.post(attack_url + 'login', data=payload)
10 r = requests.Request('GET', attack_url)
11 prepared = s.prepare_request(r)
12 prepared.url += attack_string
13 response = s.send(prepared)
14 print('Sending request with url:', prepared.url)
15 #print('Request successful:', response.ok)
16
17 if response.ok:
18 soup = BeautifulSoup(response.text, 'html.parser')
19 safeResponse = s.get(attack_url)
20 soup2 = BeautifulSoup(safeResponse.text, 'html.parser')
21
22 if (response.text == safeResponse.text):
23 print("Attack Failed - Attack Led to Top Directory")
24 else:
25 print("Attack may have succeded")
26 print("Attack response tags:")
27 for link in soup.find_all('a'):
28 print(link.get('href'))
29 print('')
30 print('Safe Output')
31 print('')
32 for link in soup2.find_all('a'):
33 print(link.get('href'))
34 else:
35 print('Attack Failed - No Such Directory')
36
37
38
39 def pWrap(attack_string):
40 pTest(attack_string=attack_string, attack_url=ATTACK_URL, password=PASSWORD)
41
42 PASSWORD = os.getenv('PWRD')
43 ATTACK_URL ='http://127.0.0.1:5050/'
44 ATTACK_STRINGS = [
45 '../../../..',
46 'test/../.././.../',
47 '..',
48 'level1/../..',
49 'level1/../../',
50 'pwd'
51 ]
52
53 if __name__ == '__main__':
54 if not PASSWORD:
55 PASSWORD = print('First set environment variable PWRD. (export PWRD=YOUR_PASSWORD)')
56 else:
57 for attack in ATTACK_STRINGS:
58 pWrap(attack)
| 9 - warning: unused-variable
2 - warning: unused-import
|
1 from db import insert, exists, select_one, update
2 from werkzeug.security import check_password_hash, generate_password_hash
3 import logging
4 import traceback
5
6 def create_user(username,password):
7 try:
8 formattedUsername = format_username(username)
9 hashedPassword = generate_password_hash(password)
10 insert( 'user', ('username', 'password'), (formattedUsername, hashedPassword))
11 except Exception as e:
12 logging.error(traceback.format_exc())
13
14 def user_exists(username):
15 try:
16 formattedUsername = format_username(username)
17 return exists('user','username',formattedUsername)
18 except Exception as e:
19 logging.error(traceback.format_exc())
20 print("User existence check failed")
21
22 def get_user(username):
23 try:
24 formattedUsername = format_username(username)
25 return select_one('user',('username','password'), 'username',formattedUsername)
26 except Exception as e:
27 logging.error(traceback.format_exc())
28 print("Failed to get user")
29
30
31 def update_user(username,password,new_password):
32 try:
33 formattedUsername = format_username(username)
34 user = get_user(formattedUsername)
35 user_password = user[1]
36 if(user is not None):
37 if(check_password_hash(user_password,password)):
38 newHashedPassword = generate_password_hash(new_password)
39 update('user',{'password':newHashedPassword},'username',formattedUsername)
40 except:
41 logging.error(traceback.format_exc())
42
43
44 def gen_default_user():
45
46 while(True):
47 password = getpass(prompt='Create a password, at least 8 characters: ')
48 password2 = getpass(prompt='Confirm password: ')
49 if password == password2:
50 if len(password) < 8:
51 print('Password must be at least 8 characters.')
52 else:
53 break
54 else:
55 print('Passwords do not match')
56 try:
57 create_user('default',password)
58 except:
59 logging.error(traceback.format_exc())
60
61 def format_username(username):
62 return username.lower() | 37 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
11 - warning: broad-exception-caught
11 - warning: unused-variable
18 - warning: broad-exception-caught
14 - refactor: inconsistent-return-statements
18 - warning: unused-variable
26 - warning: broad-exception-caught
22 - refactor: inconsistent-return-statements
26 - warning: unused-variable
40 - warning: bare-except
47 - error: undefined-variable
48 - error: undefined-variable
58 - warning: bare-except
|
1 # build_dir.py
2 import os
3
4
5 def build_dir(curPath):
6 directoryDict = {}
7 with os.scandir(curPath) as directory:
8 for entry in directory:
9 #dont include shortcuts and hidden files
10 if not entry.name.startswith('.'):
11 #stat dict reference:
12 #https://docs.python.org/2/library/stat.html
13 fileStats = entry.stat()
14 directoryDict[entry.name] = {"is_dir" : entry.is_dir(),
15 "size" : fileStats.st_size}
16 return directoryDict | Clean Code: No Issues Detected
|
1 import re
2
3 def sanitize(path):
4 # escape nasty double-dots
5 path = re.sub(r'\.\.', '', path)
6 # then remove any duplicate slashes
7 path = re.sub(r'(/)\1+', r'\1', path)
8 # then remove any leading slashes and dots
9 while(path and (path[0] == '/' or path[0] == '.')):
10 path = path[1:]
11 return path | Clean Code: No Issues Detected
|
1 import qi2
2
3 def fbn(option, array, mod, k, j):
4 if option == 0:
5 result = (array[j-1] + array[k-1]) % mod
6 elif option == 1:
7 result = (array[j-1] - array[k-1]) % mod
8 elif option == 2:
9 result = (array[j-1] * array[k-1]) % mod
10 else:
11 result = (array[j-1] ^ array[k-1]) % mod
12
13 return result
14
15 seed = '123456789'
16 #j = int(input("J:"))
17 j = 1
18 #k = int(input("K:"))
19 k = 8
20 #mod = int(input("MOD:"))
21 mod = 1000
22 n = int(input("Numero de iteracoes:"))
23 categories = int(input("Numero de categorias: "))
24 results = []
25
26 array = []
27 for i in range(len(seed)):
28 array.append(int(seed))
29
30 print("0: '+' \n1: '-' \n2: '*' \n3: '^'")
31 option = int(input("Defina a operação: "))
32 for i in range(n):
33 result = fbn(option, array, mod, k, j)
34 print("Resultado: ", result)
35 array.remove(array[0])
36 array.append(result)
37 results.append(result)
38
39 x2 = qi2.qi2Test(categories, n, results)
40
41
42 print("================= RESULTADOS =================")
43 print("X^2: ", x2)
44 print("GL =", categories - 1)
45 print("Probabilidade = 0.05") | 3 - warning: redefined-outer-name
3 - warning: redefined-outer-name
3 - warning: redefined-outer-name
3 - warning: redefined-outer-name
3 - warning: redefined-outer-name
5 - warning: redefined-outer-name
|
1 import matplotlib.pyplot as plt
2 import time
3 import qi2
4
5 # left XOR entre o cara do centro e da direita
6 def rule(array):
7 return array[0] ^ (array[1] or array[2])
8
9
10 # primeira linha do mosaico
11 def init(largura):
12 array = [0] * largura # inicio do mosaico, no começa inicializa com 1
13 # se for impar, coloca 1 no meio
14 if largura % 2:
15 array[largura // 2] = 1
16 else: # caso for par coloca so na metade (nao exata)
17 array.insert(largura//2, 1)
18
19 return array
20
21 def rule30(linhaAntiga):
22 largura = len(linhaAntiga)
23 linhaAntiga = [0] + linhaAntiga + [0] # ajustar com zeros na direita e esquerda da linha
24 novaLinha = []
25
26 for i in range(largura):
27 novaLinha.append( rule(linhaAntiga[i:i+3]) ) # coloca uma celula (1 ou 0)
28
29 return novaLinha
30
31 # usa largura e quantos bits vai utilizar pra fazer essa largura
32 def applyRule(largura, bits):
33 matriz = [init(largura)]
34
35 colunaCentro = []
36 colunaCentro.append(matriz[0][largura // 2])
37
38 while not matriz[-1][0]:
39 matriz.append(rule30(matriz[-1])) # executa a regra na ultima linha
40 colunaCentro.append(matriz[-1][largura // 2]) # atualiza o centro da matriz
41
42 return [matriz, colunaCentro[-bits:]]
43
44 def listToString(s):
45 # initialize an empty string
46 str1 = ""
47 # traverse in the string
48 for ele in s:
49 str1 += str(ele)
50 # return string
51 return str1
52
53 if __name__ == "__main__":
54 seed = int(str(time.time_ns())[14:17])
55 bits = 8
56
57 #start = time.time()
58 n = int(input("Número de iterações (n): "))
59 k = int(input("Número de categorias (k): "))
60 results = []
61 for i in range(n):
62 time.sleep(1)
63 result = applyRule((seed+bits)*2, bits)
64 rng = listToString(result[1])
65 rng = int(listToString(rng), 2)
66 print(rng)
67 results.append(rng)
68
69 #end = time.time()
70 '''
71 x2 = qi2.qi2Test(k, n, results)
72
73 print("================= RESULTADOS =================")
74 #print("Tempo de simulacao: ", end - start)
75 print("X²: ", x2)
76 print("Graus de Liberdade (GL):", k - 1)
77 print("Significância: 0.05")
78 ''' | 26 - warning: redefined-outer-name
32 - warning: redefined-outer-name
70 - warning: pointless-string-statement
1 - warning: unused-import
3 - warning: unused-import
|
1 import time
2 import numpy as np
3 import math
4 import matplotlib.pyplot as plt
5 from matplotlib.colors import NoNorm
6
7 import qi2
8
9 def squares(ctr, key):
10 y = x = ctr * key
11 z = y + key
12 two5 = np.uint64(32)
13 x = x * x + y; x = (x >> two5) | (x << two5)
14 x = x * x + z; x = (x >> two5) | (x << two5)
15 x = x * x + y; x = (x >> two5) | (x << two5)
16
17 return (x*x + z) >> two5
18
19
20 def draw(i):
21 nx = int(math.sqrt(i))
22 #print("tamanho da imagem", nx)
23 imagem = np.zeros((nx,nx), dtype=np.uint8)
24 #print("tam: ", i)
25 p = 0
26 ny = nx
27 for i in range(nx):
28 for j in range(ny):
29
30 imagem[i,j] = pixelvet[p]
31 #print(i, j, pixelvet[p])
32 p += 1
33
34
35 return imagem
36
37 if __name__ == "__main__":
38 np.seterr(all='ignore') # ignora erros de overflow, divisao/zero, underflow, etc...
39 key = np.uint64(0xf6235eca95b2c1e7)
40 #sum = np.uint64(0)
41 #pixelvet = []
42 #vetVal = []
43
44 n = np.uint64(input("Numero de iteracoes (n): "))
45 k = int(input("Numero de categorias (k): "))
46 gl = k - 1; print("Grau de Liberdade (GL): ", gl)
47 #p = float(input("Probabilidade de sucesso: "))
48
49 results = []
50
51 #start = time.time()
52 for i in range(n):
53 result = squares(np.uint64(i), key)
54 result = result / (2**32) # normaliza resultado de 32 bits
55 #print("[", i, "]:", result)
56 results.append(result)
57 #pixelvet.append(result)
58 #vetVal.append(result)
59
60 x2, intervals = qi2.qi2Test(k, n, results)
61
62 #end = time.time()
63 print("================= RESULTADOS =================")
64 #print("Media: ", hex(sum//n))
65 #print("Tempo de simulacao: ", end - start)
66
67 #pIndex = qi2.getProbabilityIndex(p)
68 #x2Max = qi2.table[gl-1][pIndex]
69 #print("x2Max: ", x2Max)
70 print("x2:" , x2)
71
72 qi2.histGraph(results, intervals)
73 '''
74 plt.figure("Graficos",figsize=(15,12))
75 plt.subplot(211)
76 imagem = draw(n)
77 plt.imshow(imagem, aspect="auto", cmap='gray', vmin=0, vmax=255,norm=NoNorm())
78 plt.axis("off")
79 plt.subplot(212)
80 plt.plot(vetVal, 'ro')
81 plt.grid(1)
82 plt.show()
83 ''' | 9 - warning: redefined-outer-name
20 - warning: redefined-outer-name
27 - refactor: redefined-argument-from-local
30 - error: undefined-variable
73 - warning: pointless-string-statement
1 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
|
1 import numpy as np
2 from random import randrange
3
4
5 def draw(value, probability):
6 return int(np.random.choice(value, 1, replace=False, p=probability))
7
8 if __name__ == "__main__":
9 # Criando os vetores de valores e suas probabilidades
10 bearingLifeExpect = np.arange(1000, 2000, 100)
11 probabilityLifeExpect = np.array([0.1, 0.13, 0.25, 0.13, 0.09, 0.12, 0.02, 0.06, 0.05, 0.05])
12 waitingTimeArray = np.arange(5, 20, 5)
13 probabilityWaitingTime = [0.6, 0.3, 0.1]
14
15 simluationTime = 10000 # 10.000h
16 bearing = [0,0,0] # Rolamentos
17 changingTime = [20, 30, 40] # Tempo de troca = 1: 20, 2: 30, 3: 40
18
19 # Sorteia tempo de vida para os rolamentos
20 for i in range(len(bearing)):
21 bearing[i] = draw(bearingLifeExpect, probabilityLifeExpect)
22
23 t = 0 # Contador para o tempo de simulacao
24 brokenBearings = 0 # Numero de rolamentos quebrados
25 totalCost = 0 # Custo total da simulacao
26
27 commingEvent = []
28 exitEvent = []
29
30
31 print("--------------------------------\nDefina o numero de rolamentos a serem trocados: ")
32 print("[1]: Troca UM rolamento quando algum rolamento quebra.")
33 print("[2]: Troca TRÊS rolamentos quando algum rolamento quebra.")
34 option = int(input("> "))
35 print("--------------------------------")
36
37 if option == 1:
38 print("Simulação 1: Troca de UM rolamento por vez\n")
39 print("--------------------------------")
40 while t <= simluationTime:
41
42 for i in range(len(bearing)):
43 if bearing[i] == t: # Caso rolamento atinga a vida util
44 newTime = draw(bearingLifeExpect, probabilityLifeExpect) # Define um novo tempo de vida para o rolamento
45 print("---------------")
46 print("Rolamento[", i, "]")
47 print("Quebrou em: ", t, "h\tExpectativa de vida: ", bearing[i], "h")
48 print("Nova expectativa de vida: ", newTime, "h")
49 bearing[i] += newTime # Soma lifetime anterior com novo para posteriormente
50 brokenBearings += 1 # Incrementa o numero de rolamentos quebrados
51
52 if brokenBearings > 0: # Caso haja um rolamento quebrado
53 waitingTime = draw(waitingTimeArray, probabilityWaitingTime) # Atribui nova vida util
54 spentTime = changingTime[brokenBearings-1] # Pega o tempo gasto para consertar os bearing
55 cost = 5 * (waitingTime + spentTime) + spentTime + brokenBearings * 20 # Calcula o valor do concerto
56 totalCost += cost
57
58
59 print("Tempo concerto: ", spentTime,"\tTempo espera: ", waitingTime)
60 print("Custo concerto: ", cost, "R$\tCusto total: ", totalCost, "R$")
61
62 brokenBearings = 0
63
64 t += 100
65
66 elif option == 2:
67 print("Simulação 2: Troca de TRÊS rolamento por vez\n")
68 print("--------------------------------")
69 while t <= simluationTime:
70
71 for i in range(len(bearing)):
72 if bearing[i] == t:
73 newTime1 = draw(bearingLifeExpect, probabilityLifeExpect)
74 newTime2 = draw(bearingLifeExpect, probabilityLifeExpect)
75 newTime3 = draw(bearingLifeExpect, probabilityLifeExpect)
76 print("---------------")
77 print("Rolamento[1]:")
78 print("Quebrou em: ", t, "h\tExpectativa de vida: ", bearing[0], "h")
79 print("Nova expectativa de vida: ", newTime1, "h")
80 print("---------------")
81 print("Rolamento[2]:")
82 print("Quebrou em: ", t, "h\tExpectativa de vida: ", bearing[1], "h")
83 print("Nova expectativa de vida: ", newTime2, "h")
84 print("---------------")
85 print("Rolamento[3]:")
86 print("Quebrou em: ", t, "h\tExpectativa de vida: ", bearing[2], "h")
87 print("Nova expectativa de vida: ", newTime3, "h")
88 print("---------------")
89 bearing[0] += newTime1
90 bearing[1] += newTime2
91 bearing[2] += newTime3
92
93 waitingTime = draw(waitingTimeArray, probabilityWaitingTime)
94 spentTime = changingTime[2]
95 cost = 5 * (waitingTime +spentTime) + spentTime + 3 * 20
96 totalCost += cost
97
98 print("Tempo concerto: ", spentTime,"\tTempo espera: ", waitingTime)
99 print("Custo concerto: ", cost, "R$\tCusto total: ", totalCost, "R$")
100
101 t += 100 | 2 - warning: unused-import
|
1 import numpy as np
2 from random import randrange
3
4 # gera o numero de clientes com base na probabilidade
5 def numberCustomers(value):
6 if value > 65:
7 return 8
8 elif value > 35 and value < 65:
9 return 10
10 elif value > 10 and value < 35:
11 return 12
12 else:
13 return 14
14
15 # gera o numero de duzias por cliente com base na probabilidade
16 def numberBagelsPerCustomer(value):
17 if value > 60:
18 return 1
19 elif value > 30 and value < 60:
20 return 2
21 elif value > 10 and value < 30:
22 return 3
23 else:
24 return 4
25
26 if __name__ == "__main__":
27 days = 15 # nº iteracoes
28 bagelCost = 3.8 # custo de fabrica da duzia de baguete
29 bagelPrice = 5.4 # preco da duzia de baguete
30 bagelsAverage = 0
31 for day in range(days):
32 print("\nDia ", day)
33 # Clientes
34 value = randrange(100)
35 customers = numberCustomers(value)
36 print("Nº Clientes: ", customers)
37 # Baguetes por cliente
38 value = randrange(100)
39 bagelsPerCustomer = numberBagelsPerCustomer(value)
40 print("Baguetes/Cliente: ", bagelsPerCustomer)
41 # Baguetes para assar
42 bagelsToCook = customers * bagelsPerCustomer
43 print("Baguetes para assar: ", bagelsToCook)
44
45 bagelsAverage += bagelsToCook
46
47 print("\n\nMedia de Baguetes: ", bagelsAverage/days) | 5 - warning: redefined-outer-name
6 - refactor: no-else-return
8 - refactor: chained-comparison
10 - refactor: chained-comparison
16 - warning: redefined-outer-name
17 - refactor: no-else-return
19 - refactor: chained-comparison
21 - refactor: chained-comparison
1 - warning: unused-import
|
1 import numpy as np
2 import time
3 import qi2
4
5 def wichmann(x, y, z):
6 x = 171 * (x % 177) - 2 * (x / 177)
7 y = 172 * (y % 177) - 35 * (y / 176)
8 z = 170 * (z % 178) - 63 * (z / 178)
9
10 if x < 0:
11 x = x + 30269
12 elif y < 0:
13 y = y + 30307
14 elif z < 0:
15 z + z + 30323
16
17 result = x/30269 + y/30307 + z/30323
18 result = result - int(result)
19
20 return result
21
22
23 if __name__ == "__main__":
24 np.seterr(all='ignore')
25 x = 1234
26 y = x + 1
27 z = y + 1
28 #iteracoes = 1000
29
30 n = np.uint64(input("Numero de iteracoes (n): "))
31 k = int(input("Numero de categorias (k): "))
32 gl = k - 1; print("Grau de Liberdade (GL): ", gl)
33 p = float(input("Probabilidade de sucesso: "))
34 results = []
35 #start = time.time()
36 for i in range(n):
37 w = wichmann(x, y, z)
38 y += 1
39 z += 2
40 print("w(", i, ") = ", y)
41 results.append(w)
42
43 #end = time.time()
44 x2, intervals = qi2.qi2Test(k, n, results)
45
46 print("================= RESULTADOS =================")
47 #print("Tempo de simulacao: ", end - start)
48 pIndex = qi2.getProbabilityIndex(p)
49 x2Max = qi2.table[gl-1][pIndex]
50 print("x2Max: ", x2Max)
51 print("x2:" , x2) | 5 - warning: redefined-outer-name
5 - warning: redefined-outer-name
5 - warning: redefined-outer-name
15 - warning: pointless-statement
2 - warning: unused-import
|
1 import numpy as np
2 from random import randrange, uniform
3
4 class Material():
5 Type = 0
6 Time = 0
7 Weight = 0
8 TimeStamp = 0
9
10 def __init__(self, Type):
11 self.Type = Type
12
13 def materialValues(self):
14 if self.Type == 0: # Material A
15 self.Weight = 200 # 200kg
16 self.Time = int(uniform(3,8)) # 5 +- 2 (uniforme)
17 elif self.Type == 1: # Material B
18 self.Weight = 100 # 100kg
19 self.Time = 6 # 6 (constante)
20 else: # Material C
21 self.Weight = 50 # 50kg
22 if randrange(100) <= 33:
23 self.Time = 2 # P(2) = 0.33
24 else:
25 self.Time = 3 # P(3) = 0.67
26
27
28 if __name__ == "__main__":
29 simulationTime = 60 # Tempo de simulacao (min)
30 totalWeight = 0 # Peso do elevador
31 i = 0 # Contador de minutos
32 averageTimeA = [] # Calcular tempo medio Mat A
33 averageTimeB = [] # Calcular tempo medio Mat B
34 movedMaterialC = 0 # Contagem de Material C
35 materialsLift = [] # Materiais dentro do elevador
36 materialsQueue = [] # Materiais na fila do elevador
37
38 while i < simulationTime:
39 print("\nTempo: ", int(i),"min")
40 mat = Material(randrange(3)) # Criando material (0~2)=(A~C)
41 mat.materialValues() # Definindo tempo e pesos
42 mat.TimeStamp = i # Definindo tempo que o material chegou
43 materialsQueue.append(mat) # Adicionando material na fila
44
45 print("MAT[",mat.Type,"]")
46 for m in materialsQueue: # Verifica a fila de materiais
47 if m.Weight + totalWeight <= 400: # Checa se pode entrar no elevador
48 if m.Type == 1:
49 averageTimeB.append(i - m.TimeStamp) # Monitora o material B
50 materialsQueue.remove(m)
51 materialsLift.append(m)
52 totalWeight += m.Weight
53 i = i + m.Time
54 if m.Type == 0: # Monitorar Material A
55 m.TimeStamp = i
56 elif m.Type == 2: # Monitorar Material C
57 movedMaterialC =+ 1
58
59 print("-----------------------------------")
60 waiting = []
61
62 queue = []
63 for m in materialsQueue:
64 queue.append(m.Type)
65
66 print("Fila:", queue)
67 lift = []
68 for m in materialsLift:
69 lift.append(m.Type)
70 print("Elevador:", lift)
71
72 print("Peso elevador:", totalWeight,"kg")
73 print("Tempo:", i,"min")
74 print("-----------------------------------")
75
76 if totalWeight == 400: # Chega no peso maximo
77 i = i + 4 # Tempo de subir, descarregar e descer
78 totalWeight = 0
79
80 for m in materialsLift:
81 if m.Type == 0:
82 averageTimeA.append((i - 1) - m.TimeStamp) # Monitora tempo total do Material A
83
84 materialsLift.clear() # Remove todos os itens do elevador
85
86 i += 1
87
88 print("\nTempo medio de transito Material A: ", sum(averageTimeA)/len(averageTimeA), "min")
89 print("Tempo medio de espera do Material B: ", sum(averageTimeB)/len(averageTimeB), "min")
90 print("Números de caixas de Material C: ", movedMaterialC) | 4 - refactor: too-few-public-methods
50 - warning: modified-iterating-list
1 - warning: unused-import
|
1 import time
2 import numpy as np
3 import qi2
4
5 def xorShift(y):
6 y ^= np.uint32(y << 13)
7 y ^= np.uint32(y >> 17)
8 y ^= np.uint32(y << 15)
9
10 return y
11
12 if __name__ == "__main__":
13 np.seterr(all='ignore')
14 seed = 2463534242
15 y = np.uint32(seed)
16 #a, b, c = 13, 17, 15
17 #iteracoes = 1000
18
19 n = np.uint64(input("Numero de iteracoes (n): "))
20 k = int(input("Numero de categorias (k): "))
21 gl = k - 1; print("Grau de Liberdade (GL): ", gl)
22 p = float(input("Probabilidade de sucesso: "))
23 results = []
24 #start = time.time()
25 for i in range(n):
26 y = (xorShift(y))
27 aux = y / 4294967295 # normaliza resultado
28 #print("Valor: ", aux)
29 #print("y(", i, ") = ", aux)
30 results.append(aux)
31
32 #end = time.time()
33 x2, intervals = qi2.qi2Test(k, n, results)
34
35 print("================= RESULTADOS =================")
36 #print("Tempo de simulacao: ", end - start)
37 pIndex = qi2.getProbabilityIndex(p)
38 x2Max = qi2.table[gl-1][pIndex]
39 print("x2Max: ", x2Max)
40 print("x2:" , x2) | 5 - warning: redefined-outer-name
1 - warning: unused-import
|
1 import time
2
3 # John von Neumann's Generator
4 def JVN(x):
5 x = x ** 2
6 x = x / 100
7 x = x % 10000
8 return int(x)
9
10 # Linear Congruential Generator
11 def LCG(x):
12 return (a * x + c) % m
13
14 if __name__ == "__main__":
15 # seed = 322
16 simulationTime = 20
17 # x = int(input("Valor inicial [X0]: "))
18 x = 3
19 # m = int(input("Módulo [M], M>0: "))
20 m = 10
21 # a = int(input("Multiplicador [A], M>A>0: "))
22 a = 2
23 # c = int(input("Incrementador [C], M>=C>=0: "))
24 c = 0
25 start = time.time()
26 print(start)
27 for i in range(simulationTime):
28 # seed = JVN(seed)
29 # print("Semente: ", seed)
30 x = LCG(x)
31 print('X[', i, ']: ', x)
32 end = time.time()
33
34 print("Tempo para o cálculo:", end - start) | 4 - warning: redefined-outer-name
11 - warning: redefined-outer-name
12 - error: possibly-used-before-assignment
12 - error: possibly-used-before-assignment
12 - error: possibly-used-before-assignment
|
1 from django.urls import path
2 from login import views
3
4 app_name = 'login'
5
6 urlpatterns = [
7 path('', views.pagina_login, name='pagina_login'),
8 ] | Clean Code: No Issues Detected
|
1 from django import forms
2
3 from conteudo.models import Video, Categoria
4
5
6 class VideoForm(forms.ModelForm):
7 error_messages = {
8 'campo invalido' : "Campo inválido"
9 }
10
11 class Meta:
12 model = Video
13 fields = ('video_id','categoria', 'nome', 'url', 'capa', 'visualizacao', 'nota', 'sinopse')
14
15 video_id = forms.CharField(widget=forms.HiddenInput(), required=False)
16
17 categoria = forms.ModelChoiceField(
18 error_messages={'required': 'Campo obrigatório', },
19 queryset=Categoria.objects.all().order_by(id),
20 empty_label='--- Selecionar a Categoria ---',
21 widget=forms.Select(attrs={'class': 'form-control form-control-sm'}),
22 required=True
23 )
24
25 nome = forms.CharField(
26 error_messages = {'required', 'Campo obrigatório',},
27 widget=forms.TextInput(attrs={'class': 'form-control form-control-sm', 'maxlength': '120'}),
28 required=True
29 )
30
| 11 - refactor: too-few-public-methods
6 - refactor: too-few-public-methods
|
1 from django.shortcuts import render
2
3 def pagina_inicial(request):
4 return render(request, 'index.html') | Clean Code: No Issues Detected
|
1 from django.db import models
2
3 class Categoria(models.Model):
4 nome = models.CharField(max_length=255, db_index=True)
5 slug = models.SlugField(max_length=200)
6
7 class Meta:
8 ordering = ('nome',)
9 verbose_name = 'categoria'
10 verbose_name_plural = 'categorias'
11
12 def __str__(self):
13 return self.nome
14
15 def videosCategoria(self):
16 return Video.objects.all().filter(categoria_id=self.id).order_by('-id')[:4]
17
18 class Video(models.Model):
19 categoria = models.ForeignKey(Categoria, on_delete=models.DO_NOTHING)
20 nome = models.CharField(max_length=255)
21 url = models.FileField(upload_to='conteudo/videos/')
22 capa = models.FileField(upload_to='conteudo/images/')
23 visualizacao = models.DecimalField(max_digits=10, decimal_places=1, default=0)
24 nota = models.FloatField(max_length=20)
25 sinopse = models.CharField(max_length=500)
26
27 class Meta:
28 ordering = ('nome',)
29 verbose_name = 'video'
30 verbose_name_plural = 'videos'
31
32 def __str__(self):
33 return self.nome | 28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
7 - refactor: too-few-public-methods
27 - refactor: too-few-public-methods
18 - refactor: too-few-public-methods
|
1 from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
2 from django.shortcuts import render, redirect, get_object_or_404
3
4 from conteudo.models import Video, Categoria
5
6 def exibir_catalogo(request):
7 categorias = Categoria.objects.all()
8 return render(request, 'conteudo/catalogo_videos.html', {'categorias': categorias})
9
10 def cadastro_video(request):
11 return render(request, 'conteudo/cadastro_video.html')
12
13 def editar_video(request):
14 return render(request, 'conteudo/editar_video.html')
15
16 def lista_categoria(request, id=None):
17 categorias = Categoria.objects.all()
18 if id != None:
19 videos_lista = Video.objects.all().filter(categoria_id=id)
20 else:
21 videos_lista = Video.objects.all()
22
23 paginator = Paginator(videos_lista, 3)
24 page = request.GET.get('page',1)
25
26 try:
27 videos = paginator.page(page)
28 except PageNotAnInteger:
29 videos = paginator.page(1)
30 except EmptyPage:
31 videos = paginator.page(paginator.num_pages)
32
33 return render(request, 'conteudo/lista_categoria.html', {'categorias': categorias, 'videos' : videos})
34
35 def exibir_video(request, id):
36 video = get_object_or_404(Video, id= id)
37 categorias = Categoria.objects.all()
38 return render(request, 'conteudo/player_video.html', {'video':video, 'categorias':categorias})
| 16 - warning: redefined-builtin
35 - warning: redefined-builtin
2 - warning: unused-import
|
1 from django.urls import path
2 from conteudo import views
3
4 app_name = 'conteudo'
5
6 urlpatterns = [
7 path('', views.exibir_catalogo, name='catalogo'),
8 path('cadastro_video/', views.cadastro_video, name='cadastro_video'),
9 path('editar_video/<int:id>/', views.editar_video, name='editar_video'),
10 path('<int:id>/', views.exibir_video, name='exibir_video'),
11
12 path('categoria/', views.lista_categoria, name='listar_todas_categorias'),
13 path('categoria/<int:id>/', views.lista_categoria, name='lista_categoria'),
14 ] | Clean Code: No Issues Detected
|
1 from django.shortcuts import render
2
3 def pagina_login(request):
4 return render(request, 'login/pagina_login.html')
| Clean Code: No Issues Detected
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Tue Mar 31 19:57:28 2020
4
5 @author: uni tech
6 """
7
8
9 import pandas as pd
10 import numpy as np
11 from sklearn import preprocessing
12 from sklearn.metrics import r2_score
13 from sklearn.impute import SimpleImputer
14 from sklearn.model_selection import train_test_split
15 from sklearn.linear_model import LinearRegression
16 from sklearn.tree import DecisionTreeRegressor
17 from sklearn.ensemble import RandomForestRegressor
18 from sklearn.svm import SVR
19
20
21
22 # Initializing datasets
23 train=pd.read_csv('train.csv')
24 test=pd.read_csv('test_walmart.csv')
25 features=pd.read_csv('features.csv')
26 stores=pd.read_csv('stores.csv')
27
28 # Mergign train and features datasets
29 df= pd.merge(features, train, on=['Store', 'Date', 'IsHoliday'], how='inner')
30
31 # One Hot Encoding categorical data
32 one_hot=pd.get_dummies(stores['Type'])
33 stores=stores.drop('Type', axis=1)
34 stores = stores.join(one_hot)
35
36
37
38
39 df = pd.merge(df, stores, on=['Store'], how='inner')
40
41 # Separating date, month, and year from Date
42 df['Date']=pd.to_datetime(df['Date'])
43 df['year']=df['Date'].dt.year
44 df['month']=df['Date'].dt.month
45 del df['Date']
46
47
48 holiday= pd.get_dummies(df['IsHoliday'])
49 df= df.drop('IsHoliday', axis=1)
50 df= df.join(holiday)
51
52
53 # Fixing null values in markdown with the help of imputer class
54 se= SimpleImputer()
55 markdown= pd.DataFrame(se.fit_transform(df[['MarkDown1','MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5']]),columns=['MarkDown1','MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5'])
56 df= df.drop(['MarkDown1','MarkDown2', 'MarkDown3', 'MarkDown4', 'MarkDown5'], axis=1)
57
58 df = pd.concat([df,markdown], axis=1)
59
60
61 X = np.array(df.drop(columns='Weekly_Sales'))
62 y= np.array(df['Weekly_Sales']).reshape(-1,1)
63
64
65 # Normalizing inputs and outputs
66 scalar= preprocessing.MinMaxScaler()
67 X= scalar.fit_transform(X)
68 y= scalar.fit_transform(y)
69
70
71 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
72
73
74 # Defining functions for regression
75 def linear_reg():
76 clf= LinearRegression()
77 return clf
78
79
80 def svm_reg():
81 clf= SVR(kernel='rbf', degree=3, gamma='scale')
82 return clf
83
84 def decision_tree():
85 clf=DecisionTreeRegressor(criterion='mse',splitter='best')
86 return clf
87
88 def random_forest():
89 clf= RandomForestRegressor(n_estimators=5, criterion='mse')
90 return clf
91
92 lr_ = linear_reg()
93 svm_ = svm_reg()
94 dt_ = decision_tree()
95 rf_ = random_forest()
96
97 models = [lr_ , dt_, svm_ , rf_]
98 for model in models:
99 y_train = y_train.ravel()
100 model.fit(X_train, y_train)
101 y_pred = model.predict(X_test)
102 score = r2_score(y_test, y_pred)
103 print(score)
104
105
106
107
108
109
| Clean Code: No Issues Detected
|
1 # analysis.py
2 # -----------
3 # Licensing Information: You are free to use or extend these projects for
4 # educational purposes provided that (1) you do not distribute or publish
5 # solutions, (2) you retain this notice, and (3) you provide clear
6 # attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
7 #
8 # Attribution Information: The Pacman AI projects were developed at UC Berkeley.
9 # The core projects and autograders were primarily created by John DeNero
10 # (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
11 # Student side autograding was added by Brad Miller, Nick Hay, and
12 # Pieter Abbeel (pabbeel@cs.berkeley.edu).
13
14
15 ######################
16 # ANALYSIS QUESTIONS #
17 ######################
18
19 # Set the given parameters to obtain the specified policies through
20 # value iteration.
21
22 def question2():
23 #the noise affects the prob of jumping into the pits and V values.
24 #the current settings of discount=0.9 noise=0.2 cannot lead the agent
25 #to crossing the bridge.If the noise decreases to be 0 or close to 0,
26 #then the search of agent is treated as a deterministic problem.
27 #the V values from left to right will be:[5.9, 6.56, 7.29, 8.1, 9].
28 #theoretically, the agent will cross the bridge from left to right.
29 answerDiscount = 0.9
30 answerNoise = 0
31 return answerDiscount, answerNoise
32
33 def question3a():
34 #if the living reward is a big penalty, the agent tends to end the game quickly
35 #small noise mean more likely to risk
36 answerDiscount = 1#0.9
37 answerNoise = 0.01#0.01
38 answerLivingReward = -5
39 return answerDiscount, answerNoise, answerLivingReward
40 # If not possible, return 'NOT POSSIBLE'
41
42 def question3b():
43 #low discount encourages the agent to get reward earlier(+1) than later (+10)
44 #positive living reward makes the agent want to live longer
45 #dont want to risk of jumping into pits
46 answerDiscount = 0.2#0.2
47 answerNoise = 0.01
48 answerLivingReward =0 #0.5
49 return answerDiscount, answerNoise, answerLivingReward
50 # If not possible, return 'NOT POSSIBLE'
51
52 def question3c():
53 #if there's no living penalty,then the agent would prefer (+10)
54 #small noise lets the agent not worried about pits
55 #reasonable discount will make the agent find a shortcut
56 answerDiscount = 0.5#0.7,0.5 works
57 answerNoise = 0.01
58 answerLivingReward = 0
59 return answerDiscount, answerNoise, answerLivingReward
60 # If not possible, return 'NOT POSSIBLE'
61
62 def question3d():
63 #no discount and low living penalty make the agent prefer (+10)
64 #large noise increases the risk of jumping into pits
65 answerDiscount = 1
66 answerNoise = 0.3
67 answerLivingReward = -0.2
68 return answerDiscount, answerNoise, answerLivingReward
69 # If not possible, return 'NOT POSSIBLE'
70
71 def question3e():
72 #since living reward is very large, living longer brings more rewards
73 answerDiscount = 1
74 answerNoise = 0
75 answerLivingReward = 100
76 return answerDiscount, answerNoise, answerLivingReward
77 # If not possible, return 'NOT POSSIBLE'
78
79 def question6():
80 answerEpsilon = None
81 answerLearningRate = None
82 return answerEpsilon, answerLearningRate
83 # If not possible, return 'NOT POSSIBLE'
84
85 if __name__ == '__main__':
86 print 'Answers to analysis questions:'
87 import analysis
88 for q in [q for q in dir(analysis) if q.startswith('question')]:
89 response = getattr(analysis, q)()
90 print ' Question %s:\t%s' % (q, str(response))
| 86 - error: syntax-error
|
1
2 # coding: utf-8
3
4 # In[1]:
5
6
7 from sklearn.datasets import load_iris
8
9
10 # In[2]:
11
12
13 iris = load_iris()
14
15
16 # In[4]:
17
18
19 print(iris.feature_names)
20
21
22 # In[5]:
23
24
25 print(iris.target_names)
26
27
28 # In[7]:
29
30
31 print(iris.data[0])
32
33
34 # In[8]:
35
36
37 print(iris.target[0])
38
39
40 # In[13]:
41
42
43 for i in range(len(iris.target)):
44 print("Example %d: label %s, features %s" % (i, iris.target[i], iris.data[i]))
45
46
47 # In[17]:
48
49
50 import numpy as np
51
52
53 # In[15]:
54
55
56 iris = load_iris()
57
58
59 # In[18]:
60
61
62 test_idx = [0,50,100]
63
64
65 # In[19]:
66
67
68 train_target =np.delete(iris.target, test_idx)
69
70
71 # In[20]:
72
73
74 train_data = np.delete(iris.data, test_idx, axis=0)
75
76
77 # In[21]:
78
79
80 test_target = iris.target[test_idx]
81
82
83 # In[23]:
84
85
86 test_data = iris.data[test_idx]
87
88
89 # In[24]:
90
91
92 from sklearn import tree
93
94
95 # In[25]:
96
97
98 clf = tree.DecisionTreeClassifier()
99
100
101 # In[26]:
102
103
104 clf.fit(train_data, train_target)
105
106
107 # In[28]:
108
109
110 print(test_target)
111
112
113 # In[29]:
114
115
116 print(clf.predict(test_data))
117
118
119 # In[39]:
120
121
122 import pydotplus
123
124
125 # In[30]:
126
127
128 from sklearn.externals.six import StringIO
129
130
131 # In[32]:
132
133
134 import pydot
135
136
137 # In[49]:
138
139
140 import graphviz
141
142
143 # In[33]:
144
145
146 dot_data = StringIO()
147
148
149 # In[34]:
150
151
152 tree.export_graphviz(clf,
153 out_file=dot_data,
154 feature_names=iris.feature_names,
155 class_names=iris.target_names,
156 filled=True, rounded=True,
157 impurity=False)
158
159
160 # In[52]:
161
162
163 graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
164
165
166 # In[ ]:
167
168
169 graph.write_pdf("iris.pdf")
170
| 134 - warning: unused-import
140 - warning: unused-import
|
1 import scrapy
2 from mzitu.items import MzituItem
3
4 class MziSpider(scrapy.Spider):
5 name = 'mzi'
6 # allowed_domains = ['www.xxx.com']
7 start_urls = ['https://www.mzitu.com/']
8 #第几页
9 def parse(self, response):
10 page_num=response.xpath('/html/body/div[2]/div[1]/div[3]/div/a[4]/text()').extract_first()
11 for i in range(0,4):
12 if i+1==1:
13 url='https://www.mzitu.com/'
14 else:
15 url='https://www.mzitu.com/page/%s/'%(i+1)
16 # print('第%s页 --'%i,url)
17 yield scrapy.Request(url=url,callback=self.page_parse,meta={'ref':url})
18 #获取各个图集url
19 def page_parse(self,response):
20
21 fef=response.meta['ref']
22 li_list=response.xpath('//div[@class="postlist"]/ul/li')
23 for li in li_list[0:10]:
24 tuji_url=li.xpath('./a/@href').extract_first()
25 tuji_title=li.xpath('./span[1]/a/text()').extract_first()
26 yield scrapy.Request(url=tuji_url,headers={'referer':fef},callback=self.tuji_parse,meta={'tuji_url':tuji_url,'ref':tuji_url})
27 #获取每个图集的页数
28 def tuji_parse(self,response):
29 item=MzituItem()
30 ref=response.meta['ref']
31 tuji_url=response.meta['tuji_url']
32 tuji_page_num=response.xpath('/html/body/div[2]/div[1]/div[4]/a[5]/span/text()').extract_first()
33 for i in range(int(tuji_page_num)):
34 if i+1==1:
35 url=tuji_url
36 else:
37 url=tuji_url+'/%s'%(i+1)
38 item['img_referer']=url
39 # print('图集第%s页 -url--'%i,url)
40 yield scrapy.Request(url=url,headers={'referer':ref},callback=self.img_parse,meta={'item':item})
41 #下载图集的图片
42 def img_parse(self,response):
43 item=response.meta['item']
44 img_url=response.xpath('/html/body/div[2]/div[1]/div[3]/p/a/img/@src').extract_first()
45 img_path=response.xpath('/html/body/div[2]/div[1]/div[3]/p/a/img/@alt').extract_first()
46 item['img_path']=img_path
47 # print(img_url)
48 item['img_url']=img_url
49 # print(item['img_url'])
50 # print(item['img_path'])
51 yield item
52
53
54
| 10 - warning: unused-variable
25 - warning: unused-variable
|
1 import scrapy
2
3 from qiubaipro.items import QiubaiproItem
4 class Test2Spider(scrapy.Spider):
5 name = 'test2'
6 # allowed_domains = ['https://www.qiushibaike.com/']
7 start_urls = ['https://www.qiushibaike.com/']
8
9 def parse(self, response):
10 li_list = response.xpath('//*[@id="content"]/div/div[2]/div/ul/li')
11 all_data = []
12 for li in li_list:
13 name = li.xpath('./div/div/a/span/text()')[0].extract()
14 text = li.xpath('./div/a/text()')[0].extract()
15 # print(name + ":" + text)
16 # dict = {
17 # "name": name,
18 # "text": text
19 # }
20 # all_data.append(dict)
21 item=QiubaiproItem()
22 item['name']= name
23 item['text']=text
24 yield item
25
26
| 11 - warning: unused-variable
4 - refactor: too-few-public-methods
|
1 import scrapy
2
3 from pian.items import PianItem
4 class BizhiSpider(scrapy.Spider):
5 name = 'bizhi'
6 # allowed_domains = ['www.xxx.com']
7 start_urls = ['http://www.netbian.com/meinv/']
8
9 def parse(self,response):
10 page_num=response.xpath('//*[@id="main"]/div[4]/a[8]/text()').extract_first()
11 #获取各个页的网址
12 for i in range(5):
13 if i+1==1:
14 url='http://www.netbian.com/meinv/'
15 else:
16 url='http://www.netbian.com/meinv/index_%s.htm'%(i+1)
17 yield scrapy.Request(url=url,callback=self.parse_page)
18 def parse_page(self, response):
19 item = PianItem()
20 li_list=response.xpath('//div[@class="list"]/ul/li')
21 #获取当前页面是第几页
22 page=response.xpath('//*[@id="main"]/div[4]/b/text()').extract_first()
23 item['mulu']='第%s页'%(page)
24 #获取壁纸的原图地址
25 for li in li_list:
26 try:
27 geren_url='http://www.netbian.com'+li.xpath('./a/@href').extract_first()
28 except:
29 continue
30 yield scrapy.Request(url=geren_url, callback=self.parse_detail,meta={'item':item})
31
32 def parse_detail(self,response):
33 item = response.meta['item']
34 #获取图片地址
35 img_url=response.xpath('//div[@class="pic"]/p/a/img/@src').extract_first()
36 item['url']=img_url
37 yield item
| 10 - warning: unused-variable
28 - warning: bare-except
|
1 # Define your item pipelines here
2 #
3 # Don't forget to add your pipeline to the ITEM_PIPELINES setting
4 # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
5
6
7 # useful for handling different item types with a single interface
8 from itemadapter import ItemAdapter
9 #导入相应的模块
10 from scrapy.pipelines.images import ImagesPipeline
11 import scrapy
12 from . import settings
13 import os
14 # class PianPipeline:
15 # def process_item(self, item, spider):
16 # return item
17
18 class PianImgPipeline(ImagesPipeline):
19 # 该方法用来对图片url发起请求
20 def get_media_requests(self, item, info):
21 print('开始下载')
22 #在这里需要把item传给file_path方法进行处理。不按图片分页存放的话,可以不写meta参数
23 return scrapy.Request(item['url'],meta={'item':item})
24 #该方法是用来设置图片的下载路径以及图片的名字
25 def file_path(self, request, response=None, info=None):
26 item=request.meta['item']
27 #分类文件夹,
28 wenjianjia=item['mulu']
29 '''
30 根目录,也就是settings文件下创建的存储图片根目录
31 注意:根目录的设置的时候,不要加“./”,否则下面创建文件夹的时候,会自动创建一个根目录名字的文件夹
32 '''
33 img_source=settings.IMAGES_STORE
34 #图片存放的文件夹路径
35 img_path = os.path.join(img_source, wenjianjia)
36 #判断文件夹存放的位置是否存在,不存在则新建文件夹
37 if not os.path.exists(img_path):
38 os.makedirs(img_path)
39 #更改图片名字
40 url=request.url
41 url=url.split('/')[-1]
42 file_name=url
43 #图片存放路径
44 image_path=os.path.join(wenjianjia,file_name)
45 #返回图片的存放路径
46
47 return image_path
48 def item_completed(self, results, item, info):
49 print('下载完成')
50 return item | 12 - error: no-name-in-module
20 - warning: unused-argument
29 - warning: pointless-string-statement
25 - warning: unused-argument
25 - warning: unused-argument
48 - warning: unused-argument
48 - warning: unused-argument
8 - warning: unused-import
|
1 # Define your item pipelines here
2 #
3 # Don't forget to add your pipeline to the ITEM_PIPELINES setting
4 # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
5
6
7 # useful for handling different item types with a single interface
8 from itemadapter import ItemAdapter
9 from scrapy.pipelines.images import ImagesPipeline
10 import scrapy
11 from . import settings
12 import os
13 # class MzituPipeline:
14 # def process_item(self, item, spider):
15 # return item
16 class myPipeline(ImagesPipeline):
17 def get_media_requests(self, item, info):
18 print('下载开始')
19 return scrapy.Request(item['img_url'],headers={'referer':item['img_referer']},meta={'item':item})
20 def file_path(self, request, response=None, info=None):
21 item=request.meta['item']
22 #获取目录
23 floder=item['img_path']
24 source_path=settings.IMAGES_STORE
25 #路径
26 img_path=os.path.join(source_path,floder)
27 if not os.path.exists(img_path):
28 os.makedirs(img_path)
29
30 url = request.url
31 url = url.split('/')[-1]
32 img_name=url
33 img_file_path=os.path.join(floder,img_name)
34 print(img_file_path)
35
36 return img_file_path
37 def item_completed(self, results, item, info):
38 print('下载结束')
39 return item | 11 - error: no-name-in-module
17 - warning: unused-argument
20 - warning: unused-argument
20 - warning: unused-argument
37 - warning: unused-argument
37 - warning: unused-argument
8 - warning: unused-import
|
1 import json
2 import uuid
3 from django.core.serializers import serialize
4
5 from django.db import IntegrityError
6 from django.test import TestCase
7 import pytest
8 from django_uuid_pk.fields import StringUUID
9
10 from django_uuid_pk.tests.models import (AutoUUIDFieldModel, ManualUUIDFieldModel, NamespaceUUIDFieldModel,
11 BrokenNamespaceUUIDFieldModel, PrimaryKeyUUIDFieldModel,
12 BrokenPrimaryKeyUUIDFieldModel, ModelUUIDField)
13
14
15
16 def assertJSON(data):
17 try:
18 json.loads(data)
19 except ValueError:
20 raise
21
22
23
24 @pytest.mark.django_db
25 class UUIDFieldTestCase(TestCase):
26 def test_protocols(self):
27 obj = ModelUUIDField.objects.create()
28 self.assertTrue(isinstance(obj.uuid1, uuid.UUID))
29 self.assertTrue(isinstance(obj.uuid3, uuid.UUID))
30 self.assertTrue(isinstance(obj.uuid4, uuid.UUID))
31 self.assertTrue(isinstance(obj.uuid5, uuid.UUID))
32
33 def test_auto_uuid4(self):
34 obj = AutoUUIDFieldModel.objects.create()
35 self.assertTrue(obj.uuid)
36 self.assertEquals(len(obj.uuid), 32)
37 #self.assertTrue(isinstance(obj.uuid, uuid.UUID))
38 self.assertEquals(obj.uuid.version, 4)
39
40 def test_raises_exception(self):
41 self.assertRaises(IntegrityError, ManualUUIDFieldModel.objects.create)
42
43 def test_manual(self):
44 obj = ManualUUIDFieldModel.objects.create(uuid=uuid.uuid4())
45 self.assertTrue(obj)
46 self.assertEquals(len(obj.uuid), 32)
47 #self.assertTrue(isinstance(obj.uuid, uuid.UUID))
48 self.assertEquals(obj.uuid.version, 4)
49
50 def test_namespace(self):
51 obj = NamespaceUUIDFieldModel.objects.create()
52 self.assertTrue(obj)
53 self.assertEquals(len(obj.uuid), 32)
54 #self.assertTrue(isinstance(obj.uuid, uuid.UUID))
55 self.assertEquals(obj.uuid.version, 5)
56
57 def test_broken_namespace(self):
58 self.assertRaises(ValueError, BrokenNamespaceUUIDFieldModel.objects.create)
59
60 def test_wrongvalue(self):
61 obj = PrimaryKeyUUIDFieldModel.objects.create()
62 with pytest.raises(ValueError):
63 obj.uuid = 1
64
65 def test_assign1(self):
66 obj = PrimaryKeyUUIDFieldModel.objects.create()
67 obj.uuid = uuid.UUID('5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4')
68 obj.save()
69 assert str(obj.uuid) == '5b27d1bde7c346f3aaf211e4d32f60d4'
70 #assert obj.uuid == '5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4'
71 assert obj.uuid == uuid.UUID('5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4')
72
73 def test_assign2(self):
74 obj = PrimaryKeyUUIDFieldModel.objects.create()
75 obj.uuid = '5b27d1bd-e7c3-46f3-aaf2-11e4d32f60d4'
76 obj.save()
77 assert str(obj.uuid) == '5b27d1bde7c346f3aaf211e4d32f60d4'
78
79
80 def test_primary_key(self):
81 obj = PrimaryKeyUUIDFieldModel.objects.create()
82 assert obj.pk
83
84 obj = PrimaryKeyUUIDFieldModel()
85 assert not obj.pk
86
87 # reset primary key if save() fails
88 BrokenPrimaryKeyUUIDFieldModel.objects.create(unique=1)
89 obj = BrokenPrimaryKeyUUIDFieldModel(unique=1)
90 with pytest.raises(IntegrityError):
91 obj.save()
92 assert not obj.pk
93
94 def test_serialize(self):
95 obj = PrimaryKeyUUIDFieldModel.objects.create()
96 obj.uuid = uuid.UUID("2e9280cfdc8e42bdbf0afa3043acaa7e")
97 obj.save()
98 serialized = serialize('json', PrimaryKeyUUIDFieldModel.objects.all())
99 assertJSON(serialized)
100
101 #def test_json(self):
102 # obj = PrimaryKeyUUIDFieldModel.objects.create()
103 # obj.save()
104 # serialized = json.dumps(obj)
105 # assertJSON(serialized)
106
107 #deserialized = json.loads(serialized, object_hook=registry.object_hook)
108 #
109 #print 111, deserialized
110 #
111 #assert PrimaryKeyUUIDField(**deserialized).uuid == obj.uuid
| 19 - warning: try-except-raise
8 - warning: unused-import
|
1 import os
2 import sys
3 from django.conf import settings
4
5
6 def pytest_configure(config):
7 if not settings.configured:
8 os.environ['DJANGO_SETTINGS_MODULE'] = 'django_uuid_pk.tests.settings'
9
10
11
12 def runtests(args=None):
13 import pytest
14
15 if not args:
16 args = []
17
18 if not any(a for a in args[1:] if not a.startswith('-')):
19 args.append('django_uuid_pk/tests')
20
21 sys.exit(pytest.main(args))
22
23
24 if __name__ == '__main__':
25 runtests(sys.argv)
| 6 - warning: unused-argument
|
1 # from __future__ import absolute_import
2 # from .tests import *
3 # from .models import *
| Clean Code: No Issues Detected
|
1 import uuid
2 from django.db import models
3 from django_uuid_pk.fields import UUIDField
4
5
6 class ModelUUIDField(models.Model):
7 uuid1 = UUIDField(version=1, auto=True)
8 uuid3 = UUIDField(namespace=uuid.NAMESPACE_URL, version=3, auto=True)
9 uuid4 = UUIDField(version=4, auto=True)
10 uuid5 = UUIDField(namespace=uuid.NAMESPACE_URL, version=5, auto=True)
11
12 class AutoUUIDFieldModel(models.Model):
13 uuid = UUIDField(auto=True)
14
15
16 class ManualUUIDFieldModel(models.Model):
17 uuid = UUIDField(auto=False)
18
19
20 class NamespaceUUIDFieldModel(models.Model):
21 uuid = UUIDField(auto=True, namespace=uuid.NAMESPACE_URL, version=5)
22
23
24 class BrokenNamespaceUUIDFieldModel(models.Model):
25 uuid = UUIDField(auto=True, namespace='lala', version=5)
26
27
28 class PrimaryKeyUUIDFieldModel(models.Model):
29 uuid = UUIDField(primary_key=True)
30 #char = models.CharField(max_length=10, null=True)
31
32 class BrokenPrimaryKeyUUIDFieldModel(models.Model):
33 uuid = UUIDField(primary_key=True)
34 unique = models.IntegerField(unique=True)
35
36 def __repr__(self):
37 return {}
| 6 - refactor: too-few-public-methods
12 - refactor: too-few-public-methods
16 - refactor: too-few-public-methods
20 - refactor: too-few-public-methods
24 - refactor: too-few-public-methods
28 - refactor: too-few-public-methods
36 - error: invalid-repr-returned
32 - refactor: too-few-public-methods
|
1 import os
2
3 SITE_ID = 1
4 STATIC_URL = '/static/'
5 SECRET_KEY =';pkj;lkj;lkjh;lkj;oi'
6 db = os.environ.get('DBENGINE', None)
7 if db == 'pg':
8 DATABASES = {
9 'default': {
10 'ENGINE': 'django.db.backends.postgresql_psycopg2',
11 'NAME': 'django_uuid_pk',
12 'HOST': '127.0.0.1',
13 'PORT': '',
14 'USER': 'postgres',
15 'PASSWORD': '',
16 'OPTIONS': {
17 'autocommit': True, # same value for all versions of django (is the default in 1.6)
18 }}}
19 elif db == 'mysql':
20 DATABASES = {
21 'default': {
22 'ENGINE': 'django.db.backends.mysql',
23 'NAME': 'django_uuid_pk',
24 'HOST': '127.0.0.1',
25 'PORT': '',
26 'USER': 'aa',
27 'PASSWORD': ''}}
28 else:
29 DATABASES = {
30 'default': {
31 'ENGINE': 'django.db.backends.sqlite3',
32 'NAME': 'django_uuid_pk.sqlite',
33 'HOST': '',
34 'PORT': ''}}
35
36 INSTALLED_APPS = ('django.contrib.auth',
37 'django.contrib.contenttypes',
38 'django.contrib.sessions',
39 'django.contrib.sites',
40 'django_uuid_pk.tests')
41
42 ALLOWED_HOSTS = ('127.0.0.1',)
43
44 LOGGING = {
45 'version': 1,
46 'disable_existing_loggers': False,
47 'formatters': {
48 'simple': {
49 'format': '%(levelname)-8s: %(asctime)s %(name)10s: %(funcName)40s %(message)s'
50 }
51 },
52 'handlers': {
53 'console': {
54 'level': 'DEBUG',
55 'class': 'logging.StreamHandler',
56 'formatter': 'simple'
57 },
58 },
59 }
| Clean Code: No Issues Detected
|
1
2 import math
3 import utils
4
5 class Entity( object ) :
6
7 def __init__( self, i, j, di, dj, cellSize, canvasWidth, canvasHeight ) :
8 super( Entity, self ).__init__()
9
10 self.i = i
11 self.j = j
12
13 self._cellSize = cellSize
14 self._canvasWidth = canvasWidth
15 self._canvasHeight = canvasHeight
16 self._di = di
17 self._dj = dj
18
19 self._x, self._y = utils.grid2screen( i, j, cellSize, canvasWidth, canvasHeight )
20 self._w = di * cellSize
21 self._h = dj * cellSize
22
23 self._xc = self._x + self._cellSize * ( math.floor( ( self._di - 1 ) / 2. ) + 0.5 if self._di % 2 == 0 else 0.0 )
24 self._yc = self._y + self._cellSize * ( math.floor( ( self._dj - 1 ) / 2. ) + 0.5 if self._dj % 2 == 0 else 0.0 )
25
26 def x( self ) :
27 return self._x
28
29 def y( self ) :
30 return self._y
31
32 def xc( self ) :
33 return self._xc
34
35 def yc( self ) :
36 return self._yc
37
38 def w( self ) :
39 return self._w
40
41 def h( self ) :
42 return self._h
43
44 def update( self ) :
45 self._x, self._y = utils.grid2screen( self.i, self.j,
46 self._cellSize,
47 self._canvasWidth,
48 self._canvasHeight )
49
50 self._xc = self._x + self._cellSize * ( math.floor( ( self._di - 1 ) / 2. ) + 0.5 if self._di % 2 == 0 else 0.0 )
51 self._yc = self._y + self._cellSize * ( math.floor( ( self._dj - 1 ) / 2. ) + 0.5 if self._dj % 2 == 0 else 0.0 )
52
53 def hit( self, other ) :
54 _dx = abs( self._xc - other.xc() )
55 _dy = abs( self._yc - other.yc() )
56
57 if ( _dx < ( self._w / 2. ) + ( other.w() / 2. ) and
58 _dy < ( self._h / 2. ) + ( other.h() / 2. ) ) :
59 return True
60 else :
61 return False | 5 - refactor: useless-object-inheritance
5 - refactor: too-many-instance-attributes
7 - refactor: too-many-arguments
7 - refactor: too-many-positional-arguments
8 - refactor: super-with-arguments
57 - refactor: simplifiable-if-statement
57 - refactor: no-else-return
|
1
2 import math
3
4 def grid2screen( i, j, cellSize, canvasWidth, canvasHeight ) :
5 x = ( i + 0.5 ) * cellSize
6 y = canvasHeight - ( j + 0.5 ) * cellSize
7 return x, y
8
9 def screen2grid( x, y, cellSize, canvasWidth, canvasHeight ) :
10 i = math.floor( x / cellSize - 0.5 )
11 j = math.floor( ( canvasHeight - y ) / cellSize - 0.5 )
12 return i, j | 4 - warning: unused-argument
9 - warning: unused-argument
|
1
2 import pygame
3 import base
4
5 class Apple( base.Entity ) :
6
7 def __init__( self, i, j, cellSize, canvasWidth, canvasHeight ) :
8 super( Apple, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight )
9
10 self._color = ( 255, 255, 0 )
11 self._alive = True
12
13 def draw( self, canvas ) :
14 _xleft = self._x - 0.5 * self._cellSize
15 _ytop = self._y - 0.5 * self._cellSize
16
17 pygame.draw.rect( canvas,
18 self._color,
19 (_xleft, _ytop, self._w, self._h) ) | 7 - refactor: too-many-arguments
7 - refactor: too-many-positional-arguments
8 - refactor: super-with-arguments
5 - refactor: too-few-public-methods
|
1
2 import pygame
3 import base
4
5 from collections import deque
6
7 class SnakePart( base.Entity ) :
8
9 def __init__( self, i, j, color, cellSize, canvasWidth, canvasHeight ) :
10 super( SnakePart, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight )
11
12 self.color = color
13 self.lasti = i
14 self.lastj = j
15
16 def draw( self, canvas ) :
17 _xleft = self._x - 0.5 * self._cellSize
18 _ytop = self._y - 0.5 * self._cellSize
19
20 pygame.draw.rect( canvas,
21 self.color,
22 (_xleft, _ytop, self._w, self._h) )
23
24 class Snake( base.Entity ) :
25
26 def __init__( self, i, j, cellSize, canvasWidth, canvasHeight ) :
27 super( Snake, self ).__init__( i, j, 1, 1, cellSize, canvasWidth, canvasHeight )
28
29 self._bodyParts = [ SnakePart( i, j, ( 50, 50, 50 ), cellSize, canvasWidth, canvasHeight ) ]
30 self._speed = 800.
31 self._direction = 'left'
32 self._displacement = 0.0
33 self._frameTime = 0.001
34
35 self._nx = int( canvasWidth / cellSize )
36 self._ny = int( canvasHeight / cellSize )
37
38 self._alive = True
39
40 def alive( self ) :
41 return self._alive
42
43 def head( self ) :
44 return self._bodyParts[0]
45
46 def tail( self ) :
47 return self._bodyParts[-1]
48
49 def setDirection( self, direction ) :
50 if len( self._bodyParts ) > 1 :
51 # chequear si quieren ir a la direccion contraria
52 if ( self._direction == 'left' and direction == 'right' or
53 self._direction == 'right' and direction == 'left' or
54 self._direction == 'up' and direction == 'down' or
55 self._direction == 'down' and direction == 'up' ) :
56 # mantener la misma direccion
57 self._direction = self._direction
58 else :
59 # cambiar la direction
60 self._direction = direction
61 else :
62 self._direction = direction
63
64 def grow( self ) :
65 _i = self.tail().lasti
66 _j = self.tail().lastj
67
68 _newPart = SnakePart( _i, _j,
69 ( 50, 50, 50 ),
70 self._cellSize,
71 self._canvasWidth,
72 self._canvasHeight )
73 self._bodyParts.append( _newPart )
74
75 def update( self ) :
76 self._displacement = self._displacement + self._speed * self._frameTime
77 if self._displacement > self._cellSize :
78 self.head().lasti = self.head().i
79 self.head().lastj = self.head().j
80 # mover una casilla en la direccion adecuada
81 if self._direction == 'up' :
82 self.head().j += 1
83 elif self._direction == 'down' :
84 self.head().j -= 1
85 elif self._direction == 'right' :
86 self.head().i += 1
87 elif self._direction == 'left' :
88 self.head().i -= 1
89
90 for k in range( 1, len( self._bodyParts ) ) :
91 self._bodyParts[k].lasti = self._bodyParts[k].i
92 self._bodyParts[k].lastj = self._bodyParts[k].j
93
94 self._bodyParts[k].i = self._bodyParts[k-1].lasti
95 self._bodyParts[k].j = self._bodyParts[k-1].lastj
96
97 # resetear el acumulador
98 self._displacement = 0.0
99
100 if self.head()._x > 800. and self._direction == 'right' :
101 self.head().i = 0
102
103 if self.head()._x < 0. and self._direction == 'left' :
104 self.head().i = self._nx
105
106 if self.head()._y > 600. and self._direction == 'down' :
107 self.head().j = self._ny
108
109 if self.head()._y < 0. and self._direction == 'up' :
110 self.head().j = 0
111
112 for k in range( len( self._bodyParts ) ) :
113 self._bodyParts[k].update()
114
115 for i in range( 1, len( self._bodyParts ) ) :
116 if self.head().hit( self._bodyParts[i] ):
117 self._alive = False
118
119 def draw( self, canvas ) :
120 for k in range( len( self._bodyParts ) ) :
121 self._bodyParts[k].draw( canvas )
122
123 ## # la misma forma de iterar
124 ## for bodyPart in self._bodyParts :
125 ## bodyPart.draw( canvas ) | 9 - refactor: too-many-arguments
9 - refactor: too-many-positional-arguments
10 - refactor: super-with-arguments
107 - warning: attribute-defined-outside-init
110 - warning: attribute-defined-outside-init
101 - warning: attribute-defined-outside-init
104 - warning: attribute-defined-outside-init
7 - refactor: too-few-public-methods
24 - refactor: too-many-instance-attributes
26 - refactor: too-many-arguments
26 - refactor: too-many-positional-arguments
27 - refactor: super-with-arguments
52 - refactor: too-many-boolean-expressions
100 - warning: protected-access
103 - warning: protected-access
106 - warning: protected-access
109 - warning: protected-access
75 - refactor: too-many-branches
5 - warning: unused-import
|
1
2 import pygame
3 import world
4
5 class Text( object ) :
6
7 def __init__( self, x, y, message, size, color ) :
8 super( Text, self).__init__()
9
10 self._message = message
11 self._textFont = pygame.font.Font( None, size )
12 self._textSurface = self._textFont.render( message, True, color )
13 self._textRect = self._textSurface.get_rect()
14 self._textRect.center = ( x, y )
15
16 def draw( self, canvas ) :
17 canvas.blit( self._textSurface, self._textRect )
18
19 class Screen( object ) :
20
21 def __init__( self, canvas, backgroundColor ) :
22 super( Screen, self ).__init__()
23
24 self._canvas = canvas
25 self._backgroundColor = backgroundColor
26 self._texts = []
27
28 self._keys = None
29
30 def setKeys( self, keys ) :
31 self._keys = keys
32
33 def addText( self, text ) :
34 self._texts.append( text )
35
36 def draw( self ) :
37 self._canvas.fill( self._backgroundColor )
38
39 for i in range( len( self._texts ) ) :
40 self._texts[i].draw( self._canvas )
41
42 def update( self ) :
43 pass
44
45 class MenuScreen( Screen ) :
46
47 def __init__( self, canvas ) :
48 super( MenuScreen, self ).__init__( canvas, ( 255, 255, 0 ) )
49
50 self._textTitle = Text( 100, 100, 'SNAKE', 50, ( 0, 0, 0 ) )
51 self._textPlay = Text( 100, 400, 'PLAY', 40, ( 255, 255, 255 ) )
52
53 self.addText( self._textTitle )
54 self.addText( self._textPlay )
55
56 class GameOverScreen( Screen ) :
57
58 def __init__( self, canvas ) :
59 super( GameOverScreen, self ).__init__( canvas, ( 0, 0, 0 ) )
60
61 self._textGameOver = Text( 100, 100, 'GAME OVER :(', 50, ( 255, 0, 255 ) )
62 self._textContinue = Text( 100, 400, 'Continue???', 40, ( 255, 255, 255 ) )
63
64 self.addText( self._textGameOver )
65 self.addText( self._textContinue )
66
67 class GameScreen( Screen ) :
68
69 def __init__( self, canvas, canvasWidth, canvasHeight ) :
70 super( GameScreen, self ).__init__( canvas, ( 255, 255, 255 ) )
71
72 self._world = world.World( 40, canvasWidth, canvasHeight )
73
74 def draw( self ) :
75 super( GameScreen, self ).draw()
76
77 self._world.draw( self._canvas )
78
79 def update( self ) :
80 self._world.setKeys( self._keys )
81 self._world.update()
82
83 def lose( self ) :
84 return self._world.lose()
85
86 def win( self ) :
87 return self._world.win() | 5 - refactor: useless-object-inheritance
7 - refactor: too-many-arguments
7 - refactor: too-many-positional-arguments
8 - refactor: super-with-arguments
5 - refactor: too-few-public-methods
19 - refactor: useless-object-inheritance
22 - refactor: super-with-arguments
48 - refactor: super-with-arguments
59 - refactor: super-with-arguments
70 - refactor: super-with-arguments
75 - refactor: super-with-arguments
|
1
2 import pygame
3 import random
4 import time
5
6 from snake import Snake
7 from collectables import Apple
8
9 import screen
10
11 class Game :
12
13 def __init__( self ) :
14 pygame.init()
15 self._canvasWidth = 800
16 self._canvasHeight = 600
17 self._canvas = pygame.display.set_mode( ( self._canvasWidth, self._canvasHeight ) )
18 self._gameExit = False
19 self._keys = { 'up' : False,
20 'down' : False,
21 'right' : False,
22 'left' : False,
23 'enter' : False,
24 'escape' : False }
25
26 self._screen = screen.MenuScreen( self._canvas )
27 self._screenName = 'menu'
28
29 def _getEvents( self ) :
30 for event in pygame.event.get() :
31 if event.type == pygame.QUIT :
32 self._gameExit = True
33 elif event.type == pygame.KEYDOWN :
34 if event.key == pygame.K_UP :
35 self._keys['up'] = True
36 elif event.key == pygame.K_DOWN :
37 self._keys['down'] = True
38 elif event.key == pygame.K_RIGHT :
39 self._keys['right'] = True
40 elif event.key == pygame.K_LEFT :
41 self._keys['left'] = True
42 elif event.key == pygame.K_RETURN :
43 self._keys['enter'] = True
44 elif event.key == pygame.K_ESCAPE :
45 self._keys['escape'] = True
46 elif event.type == pygame.KEYUP :
47 if event.key == pygame.K_UP :
48 self._keys['up'] = False
49 elif event.key == pygame.K_DOWN :
50 self._keys['down'] = False
51 elif event.key == pygame.K_RIGHT :
52 self._keys['right'] = False
53 elif event.key == pygame.K_LEFT :
54 self._keys['left'] = False
55 elif event.key == pygame.K_RETURN :
56 self._keys['enter'] = False
57 elif event.key == pygame.K_ESCAPE :
58 self._keys['escape'] = False
59
60 def _updateScreen( self ) :
61 self._screen.setKeys( self._keys )
62 self._screen.update()
63 self._screen.draw()
64
65 if self._screenName == 'menu' and self._keys['enter'] == True :
66 self._screen = screen.GameScreen( self._canvas, self._canvasWidth, self._canvasHeight )
67 self._screenName = 'game'
68
69 elif self._screenName == 'game' and self._screen.lose() :
70 self._screen = screen.GameOverScreen( self._canvas )
71 self._screenName = 'gameover'
72
73 elif self._screenName == 'game' and self._screen.win() :
74 self._screen = screen.MenuScreen( self._canvas )
75 self._screenName = 'menu'
76
77 elif self._screenName == 'gameover' and self._keys['enter'] == True :
78 self._screen = screen.GameScreen( self._canvas, self._canvasWidth, self._canvasHeight )
79 self._screenName = 'game'
80
81 elif self._screenName == 'gameover' and self._keys['escape'] == True :
82 self._screen = screen.MenuScreen( self._canvas )
83 self._screenName = 'menu'
84
85 def run( self ) :
86
87 while not self._gameExit :
88 self._getEvents()
89 self._updateScreen()
90
91 # actualizar el canvas
92 pygame.display.update()
93 # esperar un ratito
94 time.sleep( 0.001 )
95
96 if __name__ == '__main__' :
97 _game = Game()
98 _game.run() | 29 - refactor: too-many-branches
11 - refactor: too-few-public-methods
3 - warning: unused-import
6 - warning: unused-import
7 - warning: unused-import
|
1 # Generated by Django 2.2.4 on 2019-08-22 20:52
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('portafolio', '0004_auto_20190822_2251'),
10 ]
11
12 operations = [
13 migrations.AlterField(
14 model_name='project',
15 name='description',
16 field=models.TextField(verbose_name='Desccripció'),
17 ),
18 migrations.AlterField(
19 model_name='project',
20 name='moreinfo',
21 field=models.CharField(max_length=200, verbose_name='Mes Informació'),
22 ),
23 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 2.2.4 on 2019-08-22 20:47
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('portafolio', '0001_initial'),
10 ]
11
12 operations = [
13 migrations.AlterField(
14 model_name='project',
15 name='createdDate',
16 field=models.DateTimeField(auto_now_add=True, verbose_name='Data de creació'),
17 ),
18 migrations.AlterField(
19 model_name='project',
20 name='description',
21 field=models.TextField(verbose_name='Desccripció'),
22 ),
23 migrations.AlterField(
24 model_name='project',
25 name='image',
26 field=models.ImageField(upload_to='projects', verbose_name='Imatge'),
27 ),
28 migrations.AlterField(
29 model_name='project',
30 name='title',
31 field=models.CharField(max_length=200, verbose_name='Títol'),
32 ),
33 migrations.AlterField(
34 model_name='project',
35 name='updatedDate',
36 field=models.DateTimeField(auto_now=True, verbose_name='Data dactualització'),
37 ),
38 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 2.2.4 on 2019-08-22 20:50
2
3 from django.db import migrations, models
4 import django.utils.timezone
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 ('portafolio', '0002_auto_20190822_2247'),
11 ]
12
13 operations = [
14 migrations.AddField(
15 model_name='project',
16 name='moreinfo',
17 field=models.TextField(default=django.utils.timezone.now, verbose_name='Mes Informació'),
18 preserve_default=False,
19 ),
20 ]
| 7 - refactor: too-few-public-methods
|
1 # Generated by Django 2.2.4 on 2019-08-22 20:51
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('portafolio', '0003_project_moreinfo'),
10 ]
11
12 operations = [
13 migrations.AlterField(
14 model_name='project',
15 name='description',
16 field=models.CharField(max_length=200, verbose_name='Desccripció'),
17 ),
18 ]
| 6 - refactor: too-few-public-methods
|
1 from django.db import models
2
3 # Create your models here.
4 class Project(models.Model):
5 title = models.CharField(max_length=200, verbose_name = 'Títol')
6 moreinfo = models.URLField(null=True, blank=True,verbose_name = 'Mes Informació')
7 description = models.TextField(verbose_name = 'Desccripció')
8 image = models.ImageField(verbose_name = 'Imatge', upload_to = 'projects')
9 createdDate = models.DateTimeField(auto_now_add=True, verbose_name = 'Data de creació')
10 updatedDate = models.DateTimeField(auto_now=True, verbose_name = 'Data dactualització')
11
12 class Meta:
13 verbose_name = 'Projecte'
14 verbose_name_plural = 'Projectes'
15 ordering = ["-createdDate"]
16
17 def __str__(self):
18 return self.title | 12 - refactor: too-few-public-methods
4 - refactor: too-few-public-methods
|
1 # -*- coding: utf-8 -*-
2
3 import datetime, json, os
4 import flask
5 from ezb_dbprx.config import settings
6 from ezb_dbprx.utils import logger_setup, db_handler
7 from flask.ext.basicauth import BasicAuth # http://flask-basicauth.readthedocs.org/en/latest/
8
9
10 ## setup
11 app = flask.Flask(__name__)
12 log = logger_setup.setup_logger()
13 #
14 app.config['BASIC_AUTH_USERNAME'] = settings.BASIC_AUTH_USERNAME
15 app.config['BASIC_AUTH_PASSWORD'] = settings.BASIC_AUTH_PASSWORD
16 basic_auth = BasicAuth(app)
17
18
19 ## experimentation ##
20
21
22 @app.route( '/hello1/', methods=['GET'] )
23 def hi_a():
24 """ Tests simple json response return. """
25 return flask.jsonify( {'hello': 'world'} )
26
27
28 @app.route( '/hello2/', methods=['GET'] )
29 def hi_b():
30 """ Tests logging. """
31 log.info( u'hi there' )
32 return flask.jsonify( {'hello': 'world2'} )
33
34
35 @app.route( '/basic_auth/', methods=['GET'] )
36 @basic_auth.required
37 def try_basic_auth():
38 """ Tests basic-auth. """
39 log.info( u'in proxy_app.try_basic_auth()' )
40 return flask.jsonify( {'got': 'through'} )
41
42
43 @app.route( '/forbidden/', methods=['GET'] )
44 def try_forbidden():
45 """ Tests forbidden response. """
46 log.debug( u'in proxy_app.try_forbidden()' )
47 return flask.abort( 403 )
48
49
50 @app.route( '/post_test/', methods=['POST'] )
51 def handle_post():
52 """ Tests perceiving params response return. """
53 value_a = flask.request.form['key_a'].strip()
54 return flask.jsonify( {u'key_a': value_a} )
55
56
57 ## real work ##
58
59
60 @app.route( u'/my_ip/', methods=['GET'] )
61 def show_ip():
62 """ Returns ip.
63 Note: this was a test, but could be useful for debugging. """
64 ip = flask.request.remote_addr
65 log.debug( u'in proxy_app.show_ip(); remote_addr, `%s`' % ip )
66 return flask.jsonify( {u'client_ip': ip} )
67
68
69 @app.route( u'/search_new_request/', methods=['GET'] )
70 @basic_auth.required
71 def search():
72 """ Searches for new requests. """
73 client_ip = flask.request.remote_addr
74 if not client_ip in settings.LEGIT_IPS.keys():
75 log.debug( u'- in proxy_app.search_new_request(); client_ip `%s` not in LEGIT_IPS; returning forbidden' % client_ip )
76 return flask.abort( 403 )
77 db = db_handler.DB_Handler( log )
78 result_list = db.search_new_request()
79 return_dict = {
80 u'request_type': u'search_new_request',
81 u'datetime': unicode( datetime.datetime.now() ),
82 u'result': result_list }
83 return flask.jsonify( return_dict )
84
85
86 @app.route( u'/update_request_status/', methods=['POST'] )
87 @basic_auth.required
88 def update_request_status():
89 """ Updates db request status. """
90 log.debug( u'- in proxy_app.update_request_status(); starting' )
91 client_ip = flask.request.remote_addr
92 log.debug( u'- in proxy_app.update_request_status(); client_ip, `%s`' % client_ip )
93 if not client_ip in settings.LEGIT_IPS.keys():
94 log.debug( u'- in proxy_app.update_request_status(); returning forbidden' )
95 return flask.abort( 403 )
96 log.debug( u'- in proxy_app; update_request_status(); ip legit' )
97 log.debug( u'- in proxy_app; update_request_status(); flask.request.form.keys(), %s' % sorted(flask.request.form.keys()) )
98 db_id = flask.request.form[u'db_id'] # flask will return a '400 - Bad Request' if getting a value fails
99 status = flask.request.form[u'status']
100 try:
101 assert status in [ u'in_process', u'processed' ] # never changing it to its original 'not_yet_processed'
102 assert db_id.isdigit()
103 except Exception as e:
104 log.error( u'- in proxy_app; update_request_status(); params grabbed; keys good but value(s) bad; db_id, `%s`; status, `%s`' % (db_id, status) )
105 return flask.abort( 400, u'Bad data.' )
106 log.debug( u'- in proxy_app; update_request_status(); params grabbed & data is valid' )
107 db = db_handler.DB_Handler( log )
108 result_dict = db.update_request_status( db_id, status )
109 assert result_dict.keys() == [ u'status_update_result' ]
110 return_dict = {
111 u'request_type': u'update_request_status',
112 u'db_id': db_id,
113 u'requested_new_status': status,
114 u'datetime': unicode( datetime.datetime.now() ),
115 u'result': result_dict[ u'status_update_result' ]
116 }
117 return flask.jsonify( return_dict )
118
119
120 @app.route( u'/add_history_note/', methods=['POST'] )
121 @basic_auth.required
122 def add_history_note():
123 """ Adds history note. """
124 log.debug( u'- in proxy_app.add_history_note(); starting' )
125 if not flask.request.remote_addr in settings.LEGIT_IPS.keys():
126 log.debug( u'- in proxy_app.add_history_note(); returning forbidden for ip, `%s`' % flask.request.remote_addr )
127 return flask.abort( 403 )
128 ( db_id, db_h ) = ( flask.request.form[u'db_id'], db_handler.DB_Handler(log) ) # flask will return a '400 - Bad Request' if getting a value fails
129 result dbh.add_history_note( db_id )
130 return_dict = {
131 u'request_type': u'add_history_note', u'db_id': db_id,
132 u'datetime': unicode( datetime.datetime.now() ), u'result': result }
133 return flask.jsonify( return_dict )
134
135
136
137 # if __name__ == '__main__':
138 # if os.getenv('DEVBOX') == 'true':
139 # app.run( host='0.0.0.0', debug=True )
140 # else:
141 # app.run()
| 129 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2
3 import json, os
4
5
6 ## db access
7 DB_HOST = unicode( os.environ.get(u'ezb_dbprx__DB_HOST') )
8 DB_PORT = int( unicode(os.environ.get(u'ezb_dbprx__DB_PORT')) )
9 DB_USERNAME = unicode( os.environ.get( u'ezb_dbprx__DB_USERNAME') )
10 DB_PASSWORD = unicode( os.environ.get(u'ezb_dbprx__DB_PASSWORD') )
11 DB_NAME = unicode( os.environ.get( u'ezb_dbprx__DB_NAME') )
12
13 ## db sql
14 SEARCH_SQL = unicode( os.environ.get( u'ezb_dbprx__SEARCH_SQL') ) # for db_handler.DB_Handler.search_new_request()
15 UPDATE_REQUEST_STATUS_SQL_PATTERN = unicode( os.environ.get( u'ezb_dbprx__UPDATE_REQUEST_STATUS_SQL_PATTERN') ) # for db_handler.DB_Handler.update_request_status()
16 CONFIRM_REQUEST_STATUS_SQL_PATTERN = unicode( os.environ.get( u'ezb_dbprx__CONFIRM_REQUEST_STATUS_SQL_PATTERN') ) # for db_handler.DB_Handler.update_request_status()
17 CREATE_HISTORY_ENTRY_PATTERN = unicode( os.environ.get( u'ezb_dbprx__CREATE_HISTORY_ENTRY_SQL_PATTERN') ) # for db_handler.DB_Handler.add_history_entry()
18
19
20 ## file-logger
21 LOG_DIR = unicode( os.environ.get(u'ezb_dbprx__LOG_DIR') )
22 LOG_LEVEL = unicode( os.environ.get(u'ezb_dbprx__LOG_LEVEL') )
23
24 ## basic auth
25 BASIC_AUTH_USERNAME = unicode( os.environ.get(u'ezb_dbprx__BASIC_AUTH_USERNAME') )
26 BASIC_AUTH_PASSWORD = unicode( os.environ.get(u'ezb_dbprx__BASIC_AUTH_PASSWORD') )
27
28 ## other
29 LEGIT_IPS = json.loads( unicode(os.environ.get(u'ezb_dbprx__LEGIT_IPS')) )
30
31 # end
| 7 - error: undefined-variable
7 - warning: redundant-u-string-prefix
8 - error: undefined-variable
8 - warning: redundant-u-string-prefix
9 - error: undefined-variable
9 - warning: redundant-u-string-prefix
10 - error: undefined-variable
10 - warning: redundant-u-string-prefix
11 - error: undefined-variable
11 - warning: redundant-u-string-prefix
14 - error: undefined-variable
14 - warning: redundant-u-string-prefix
15 - error: undefined-variable
15 - warning: redundant-u-string-prefix
16 - error: undefined-variable
16 - warning: redundant-u-string-prefix
17 - error: undefined-variable
17 - warning: redundant-u-string-prefix
21 - error: undefined-variable
21 - warning: redundant-u-string-prefix
22 - error: undefined-variable
22 - warning: redundant-u-string-prefix
25 - error: undefined-variable
25 - warning: redundant-u-string-prefix
26 - error: undefined-variable
26 - warning: redundant-u-string-prefix
29 - error: undefined-variable
29 - warning: redundant-u-string-prefix
|
1 # -*- coding: utf-8 -*-
2
3 """ Handles log setup. """
4
5 import logging, os
6 import logging.handlers
7 from ezb_dbprx.config import settings
8
9
10 def setup_logger():
11 """ Returns a logger to write to a file. """
12 filename = u'%s/ezb_dbprx.log' % settings.LOG_DIR
13 formatter = logging.Formatter( u'[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s' )
14 logger = logging.getLogger( u'ezb_dbprx' )
15 level_dict = { u'debug': logging.DEBUG, u'info':logging.INFO }
16 logger.setLevel( level_dict[settings.LOG_LEVEL] )
17 file_handler = logging.handlers.RotatingFileHandler( filename, maxBytes=(5*1024*1024), backupCount=1 )
18 file_handler.setFormatter( formatter )
19 logger.addHandler( file_handler )
20 return logger
| 12 - warning: redundant-u-string-prefix
13 - warning: redundant-u-string-prefix
14 - warning: redundant-u-string-prefix
15 - warning: redundant-u-string-prefix
15 - warning: redundant-u-string-prefix
5 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2
3 """ Handles db connection and executes sql. """
4
5 import datetime, json, os, pprint, random, sys
6 import MySQLdb
7 from ezb_dbprx.config import settings
8
9
10 class DB_Handler(object):
11
12 def __init__(self, file_logger ):
13 """ Sets up basics. """
14 self.db_host = settings.DB_HOST
15 self.db_port = settings.DB_PORT
16 self.db_username = settings.DB_USERNAME
17 self.db_password = settings.DB_PASSWORD
18 self.db_name = settings.DB_NAME
19 self.connection_object = None # populated during queries
20 self.cursor_object = None # populated during queries
21 self.file_logger = file_logger
22 self.key_mapper = { # converts database fields into more generic keys
23 u'alt_edition': u'preference_alternate_edition', # needed?
24 u'barcode': u'patron_barcode',
25 u'bibno': u'item_bib_number', # needed?
26 u'created': u'db_create_date',
27 u'email': u'patron_email',
28 u'eppn': u'patron_shib_eppn',
29 u'firstname': u'patron_name_first',
30 u'group': u'patron_shib_group',
31 u'id': u'db_id',
32 u'isbn': u'item_isbn',
33 u'lastname': u'patron_name_last',
34 u'loc': u'libary_location', # needed?
35 u'name': u'patron_name_firstlast',
36 u'patronId': u'patron_id', # needed?
37 u'pref': u'preference_quick', # needed?
38 u'request_status': u'db_request_status',
39 u'sfxurl': u'item_openurl',
40 u'staffnote': u'staff_note',
41 u'title': u'item_title',
42 u'volumes': u'item_volumes',
43 u'wc_accession': u'item_worldcat_id'
44 }
45
46 ## execute_sql() ##
47
48 def execute_sql(self, sql):
49 """ Executes sql; returns tuple of row-dicts.
50 Example return data: ( {row1field1key: row1field1value, row1field2key: row1field2value}, {row2field1key: row2field1value, row2field2key: row2field2value} )
51 Called by self.search_new_request(), self.update_request_status(), and self.update_history_note() """
52 try:
53 self._setup_db_connection()
54 if not self.cursor_object:
55 return
56 self.cursor_object.execute( sql )
57 dict_list = self.cursor_object.fetchall() # really a tuple of row-dicts
58 dict_list = self._unicodify_resultset( dict_list )
59 return dict_list
60 except Exception as e:
61 message = u'in db_handler.execute_sql(); error: %s' % unicode( repr(e).decode(u'utf8', u'replace') )
62 self.file_logger.error( message )
63 return None
64 finally:
65 self._close_db_connection()
66
67 def _setup_db_connection( self ):
68 """ Sets up connection; populates instance attributes.
69 Called by execute_sql() """
70 self.file_logger.debug( u'in db_handler._setup_db_connection(); starting' )
71 try:
72 self.connection_object = MySQLdb.connect(
73 host=self.db_host, port=self.db_port, user=self.db_username, passwd=self.db_password, db=self.db_name )
74 self.file_logger.debug( u'in db_handler._setup_db_connection(); connection-object set' )
75 self.cursor_object = self.connection_object.cursor(MySQLdb.cursors.DictCursor)
76 return
77 except Exception as e:
78 message = u'in db_handler._setup_db_connection(); error: %s' % unicode( repr(e).decode(u'utf8', u'replace') )
79 self.file_logger.error( message )
80
81 def _unicodify_resultset( self, dict_list ):
82 """ Takes tuple of row-dicts;
83 Makes true list and ensures all keys and values are unicode;
84 Returns list of type-corrected dicts.
85 Called by execute_sql() """
86 result_list = []
87 for row_dict in dict_list:
88 new_row_dict = {}
89 for key,value in row_dict.items():
90 if type(value) == datetime.datetime:
91 value = unicode(value)
92 new_row_dict[ unicode(key) ] = unicode(value)
93 result_list.append( new_row_dict )
94 return result_list
95
96 def _close_db_connection( self ):
97 """ Closes db connection.
98 Called by execute_sql() """
99 try:
100 self.cursor_object.close()
101 self.connection_object.close()
102 return
103 except Exception as e:
104 message = u'in db_handler._close_db_connection(); error: %s' % unicode( repr(e).decode(u'utf8', u'replace') )
105 self.file_logger.error( message )
106
107 ## search_new_request() ##
108
109 def search_new_request( self ):
110 """ Returns json string of list of dicts on find, empty-list on no-find.
111 Called by: proxy_app.search_new_request() """
112 sql = settings.SEARCH_SQL
113 self.file_logger.debug( u'in db_handler.search_new_request; sql, %s' % sql )
114 raw_dict_list = self.execute_sql( sql )
115 self.file_logger.debug( u'in db_handler.search_new_request; raw_dict_list, %s' % raw_dict_list )
116 return_val = []
117 if raw_dict_list:
118 if len( raw_dict_list ) > 0:
119 return_val = self._massage_raw_data( raw_dict_list )
120 return return_val
121
122 def _massage_raw_data( self, raw_dict_list ):
123 """ Makes keys more generic.
124 Returns list of updated dicts
125 Called by search_new_request() .
126 Possible TODO: add None to self.key_mapper if item isn't needed; test for that here and don't return it. """
127 updated_list = []
128 for entry in raw_dict_list:
129 massaged_dict = {}
130 for (key, value) in entry.items():
131 new_key = self.key_mapper[key]
132 massaged_dict[new_key] = value
133 updated_list.append( massaged_dict )
134 return updated_list
135
136 ## update_request_status ##
137
138 def update_request_status( self, db_id, status ):
139 """ Updates request table status field.
140 Called by proxy_app.update_request_status() """
141 ## update the status
142 update_sql = settings.UPDATE_REQUEST_STATUS_SQL_PATTERN % ( status, db_id )
143 self.file_logger.debug( u'in db_handler.update_request_status(); update_sql, %s' % update_sql )
144 try:
145 self.execute_sql( update_sql )
146 except Exception as e:
147 self.file_logger.error( u'in db_handler.update_request_status(); problem executing update; exception: %s' % e )
148 return { u'status_update_result': u'status_update_failed_on_exception' }
149 ## confirm the update was successful
150 confirmation_sql = settings.CONFIRM_REQUEST_STATUS_SQL_PATTERN % db_id
151 self.file_logger.debug( u'in db_handler.update_request_status(); confirmation_sql, %s' % confirmation_sql )
152 try:
153 result_dict_list = self.execute_sql( confirmation_sql )
154 self.file_logger.debug( u'in db_handler.update_request_status; result_dict_list, %s' % result_dict_list )
155 if result_dict_list[0][u'request_status'] == status:
156 return { u'status_update_result': u'status_updated' }
157 else:
158 return { u'status_update_result': u'status_confirmation_failed' }
159 except Exception as e:
160 self.file_logger.error( u'in db_handler.update_request_status(); problem executing confirmation; exception: %s' % e )
161 return { u'status_update_result': u'status_confirmation_failed_on_exception' }
162
163 ## add history note ##
164
165 def add_history_entry( self, request_id ):
166 """ Creates history table record.
167 Called by proxy_app.add_history_note() """
168 add_history_sql = settings.CREATE_HISTORY_ENTRY_PATTERN % request_id
169 self.file_logger.debug( u'in db_handler.add_history_entry(); add_history_sql, %s' % add_history_sql )
170 result = self.execute_sql( sql )
171 self.file_logger.debug( u'in db_handler.add_history_entry(); result, `%s`' % result )
172 return
173
174 # end class DB_Handler()
175
| 10 - refactor: useless-object-inheritance
10 - refactor: too-many-instance-attributes
23 - warning: redundant-u-string-prefix
23 - warning: redundant-u-string-prefix
24 - warning: redundant-u-string-prefix
24 - warning: redundant-u-string-prefix
25 - warning: redundant-u-string-prefix
25 - warning: redundant-u-string-prefix
26 - warning: redundant-u-string-prefix
26 - warning: redundant-u-string-prefix
27 - warning: redundant-u-string-prefix
27 - warning: redundant-u-string-prefix
28 - warning: redundant-u-string-prefix
28 - warning: redundant-u-string-prefix
29 - warning: redundant-u-string-prefix
29 - warning: redundant-u-string-prefix
30 - warning: redundant-u-string-prefix
30 - warning: redundant-u-string-prefix
31 - warning: redundant-u-string-prefix
31 - warning: redundant-u-string-prefix
32 - warning: redundant-u-string-prefix
32 - warning: redundant-u-string-prefix
33 - warning: redundant-u-string-prefix
33 - warning: redundant-u-string-prefix
34 - warning: redundant-u-string-prefix
34 - warning: redundant-u-string-prefix
35 - warning: redundant-u-string-prefix
35 - warning: redundant-u-string-prefix
36 - warning: redundant-u-string-prefix
36 - warning: redundant-u-string-prefix
37 - warning: redundant-u-string-prefix
37 - warning: redundant-u-string-prefix
38 - warning: redundant-u-string-prefix
38 - warning: redundant-u-string-prefix
39 - warning: redundant-u-string-prefix
39 - warning: redundant-u-string-prefix
40 - warning: redundant-u-string-prefix
40 - warning: redundant-u-string-prefix
41 - warning: redundant-u-string-prefix
41 - warning: redundant-u-string-prefix
42 - warning: redundant-u-string-prefix
42 - warning: redundant-u-string-prefix
43 - warning: redundant-u-string-prefix
43 - warning: redundant-u-string-prefix
60 - warning: broad-exception-caught
61 - warning: redundant-u-string-prefix
61 - error: undefined-variable
61 - warning: redundant-u-string-prefix
61 - warning: redundant-u-string-prefix
48 - refactor: inconsistent-return-statements
70 - warning: redundant-u-string-prefix
77 - warning: broad-exception-caught
74 - warning: redundant-u-string-prefix
78 - warning: redundant-u-string-prefix
78 - error: undefined-variable
78 - warning: redundant-u-string-prefix
78 - warning: redundant-u-string-prefix
91 - error: undefined-variable
92 - error: undefined-variable
92 - error: undefined-variable
103 - warning: broad-exception-caught
104 - warning: redundant-u-string-prefix
104 - error: undefined-variable
104 - warning: redundant-u-string-prefix
104 - warning: redundant-u-string-prefix
113 - warning: redundant-u-string-prefix
115 - warning: redundant-u-string-prefix
143 - warning: redundant-u-string-prefix
146 - warning: broad-exception-caught
147 - warning: redundant-u-string-prefix
148 - warning: redundant-u-string-prefix
148 - warning: redundant-u-string-prefix
151 - warning: redundant-u-string-prefix
159 - warning: broad-exception-caught
154 - warning: redundant-u-string-prefix
155 - refactor: no-else-return
155 - warning: redundant-u-string-prefix
156 - warning: redundant-u-string-prefix
156 - warning: redundant-u-string-prefix
158 - warning: redundant-u-string-prefix
158 - warning: redundant-u-string-prefix
160 - warning: redundant-u-string-prefix
161 - warning: redundant-u-string-prefix
161 - warning: redundant-u-string-prefix
169 - warning: redundant-u-string-prefix
170 - error: undefined-variable
171 - warning: redundant-u-string-prefix
165 - refactor: useless-return
5 - warning: unused-import
5 - warning: unused-import
5 - warning: unused-import
5 - warning: unused-import
5 - warning: unused-import
|
1 import pandas as pd
2 import numpy as np
3
4 # ------- Read CSV data ----------
5
6
7 # stop=pd.read_csv('Arkiv/stops.txt')
8 stop_times=pd.read_csv('Arkiv/stop_times.txt')
9 # calendar=pd.read_csv('Arkiv/calendar.txt')
10 calendar_dates=pd.read_csv('Arkiv/calendar_dates.txt')
11 trips=pd.read_csv('Arkiv/trips.txt')
12
13 # ----------Conditional Subset ----------
14
15 new_calendar_dates=calendar_dates[(calendar_dates.date>20200225 )& (calendar_dates.date<20200301)]
16
17
18
19 #----------- remove useless columns from calendar data----------------------------
20
21 new_csv=stop_times.iloc[0:,0:5]
22
23
24 # ---------------Merge them on service_id and make a new column named "unique_trip_id"
25 a=trips
26 b=new_calendar_dates
27 c=pd.merge(a, b, on='service_id', how='left')
28 c['unique_trip_id']=c.index+1
29 e=stop_times
30 f=pd.merge(c, e, on='trip_id', how='left')
31 df=f
32
33
34
35
36 # result['unique_trip_id'] = result.groupby(['trip_id','end_date']).ngroup()
37 # result=result.sort_values(by=['unique_trip_id', 'stop_sequence'])
38
39
40 # unique_trip_id=1
41 # new=[]
42 # for i in range(0,len(my_list)-1):
43 # if my_list[i] == my_list[i+1]:
44 # new.append(unique_trip_id)
45 # else:
46 # unique_trip_id+=1
47 # new.append(unique_trip_id)
48
49 # -------- Make int into string and combine two column on new columns-------
50
51
52 df['unique_trip_id']=df['unique_trip_id'].map(lambda x: x+1)
53 df['first']=df['unique_trip_id'].map(lambda x: str(x))
54 df['second']=df['stop_sequence'].map(lambda x: str(x))
55 df['first_date']=df['start_date'].map(lambda x: str(x))
56 df['second_date']=df['end_date'].map(lambda x: str(x))
57 df['unique_sub_trip_id']= df[['first', 'second']].apply(lambda x: '.'.join(x), axis=1)
58 df['arrival_time']= df[['second_date', 'arrival_time']].apply(lambda x: ' '.join(x), axis=1)
59 df['departure_time']= df[['first_date', 'departure_time']].apply(lambda x: ' '.join(x), axis=1)
60
61 # --------- Rerange data ---------------
62
63 df=df[['unique_trip_id','unique_sub_trip_id','trip_id','stop_id','stop_sequence','arrival_time','departure_time']]
64
65 unique_trip_id_list=df.unique_trip_id.unique().tolist()
66
67 df_list=[]
68 for i in unique_trip_id_list:
69 df1 = df.loc[df['unique_trip_id'] == i]
70 df1['arrival_time'] = df1['arrival_time'].shift(-1)
71 df1['stop_sequence'] = df1['stop_sequence'].shift(-1)
72 df_list.append(df1)
73 final_result=pd.concat(df_list)
74
75 final_result.to_csv('result.csv')
| 69 - warning: bad-indentation
70 - warning: bad-indentation
71 - warning: bad-indentation
72 - warning: bad-indentation
53 - warning: unnecessary-lambda
54 - warning: unnecessary-lambda
55 - warning: unnecessary-lambda
56 - warning: unnecessary-lambda
57 - warning: unnecessary-lambda
58 - warning: unnecessary-lambda
59 - warning: unnecessary-lambda
2 - warning: unused-import
|
1 import pandas as pd
2 import numpy as np
3
4 stop=pd.read_csv('Arkiv/stops.txt')
5 stop_times=pd.read_csv('Arkiv/stop_times.txt')
6 calendar=pd.read_csv('Arkiv/calendar.txt')
7 calendar_dates=pd.read_csv('Arkiv/calendar_dates.txt')
8 trips=pd.read_csv('Arkiv/trips.txt')
9
10 print(stop.shape)
11 print(stop_times.shape)
12 print(calendar.shape)
13 print(calendar_dates.shape)
14 print(trips.shape)
15
16 # ----------Conditional Subset ----------
17
18 new_calendar_dates=calendar_dates[(calendar_dates.date>20200225 )& (calendar_dates.date<20200301)]
19
20 print(new_calendar_dates.date.min())
21 print(new_calendar_dates.date.max())
22 print(new_calendar_dates.shape)
23
24
25 trips=trips.iloc[0:,1:3]
26 print(trips.head())
27 print(trips.shape) | 2 - warning: unused-import
|
1 from housing.items import HousingItemBuy
2 from scrapy import Spider
3 from scrapy.http.request import Request
4
5 #To parse the JSON received
6 import json
7
8 class HousingSpider(Spider):
9 name = "housing"
10 allowed_domains = ["housing.com"]
11 custom_settings = {'USER_AGENT' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'}
12
13
14 def start_requests(self):
15 #We have 1080 pages to fetch
16 for count in range(1,1081):
17
18 print "Getting page : %s" %count
19
20 yield Request("https://buy.housing.com/api/v1/buy/index/filter?poly=f97f947ffae6408ac295&results_per_page=30&p=" + str(count) + "&resale_total_count=30045&np_total_count=2329", self.parse_buy)
21
22
23 def parse_buy(self, response):
24
25 #Since the response is purely JSON
26 text = response.body
27
28 #Parsing it using the builtin json utility
29 parsed_json = json.loads(text)
30
31 #For each entry, we will store all the information we defined earlier in items.py
32 #The parsed json can be read as a dict. Examining the JSON, we can easily navigate
33 #to where we have the data we need.
34
35 for iter in range(30):
36 item = HousingItemBuy()
37 item['ad_price'] = parsed_json["hits"][iter]["formatted_price"]
38 item['ad_url'] = parsed_json["hits"][iter]["inventory_canonical_url"]
39 item['ad_title'] = parsed_json["hits"][iter]["title"]
40 item['ad_coordinates'] = parsed_json["hits"][iter]["location_coordinates"]
41 item['ad_date_added'] = parsed_json["hits"][iter]["date_added"]
42 item['ad_area'] = parsed_json["hits"][iter]["inventory_configs"][0]["area"]
43 item['ad_bedrooms'] = parsed_json["hits"][iter]["inventory_configs"][0]["number_of_bedrooms"]
44 item['ad_toilets'] = parsed_json["hits"][iter]["inventory_configs"][0]["number_of_toilets"]
45 item['ad_contact_persons_number'] = parsed_json["hits"][iter]["contact_persons_info"][0]["contact_no"]
46 item['ad_contact_persons_id'] = parsed_json["hits"][iter]["contact_persons_info"][0]["profile_id"]
47 item['ad_contact_persons_name'] = parsed_json["hits"][iter]["contact_persons_info"][0]["name"]
48
49 #Some entries do not have the ad_city/ad_locality variable.
50 try:
51 item['ad_city'] = parsed_json["hits"][iter]["display_city"][0]
52 except :
53 item['ad_city'] = "None given"
54
55 try:
56 item['ad_locality'] = parsed_json["hits"][iter]["display_city"][1]
57 except :
58 item['ad_locality'] = "None given"
59
60 item['ad_gas_pipeline'] = parsed_json["hits"][iter]["inventory_amenities"]["has_gas_pipeline"]
61 item['ad_lift'] = parsed_json["hits"][iter]["inventory_amenities"]["has_lift"]
62 item['ad_parking'] = parsed_json["hits"][iter]["inventory_amenities"]["has_parking"]
63 item['ad_gym'] = parsed_json["hits"][iter]["inventory_amenities"]["has_gym"]
64 item['ad_swimming_pool'] = parsed_json["hits"][iter]["inventory_amenities"]["has_swimming_pool"]
65 item['ad_id'] = parsed_json["hits"][iter]["id"]
66 yield item | 18 - error: syntax-error
|
1 from scrapy.spiders import BaseSpider
2 from scrapy101.items import Scrapy101Item
3
4 class Scrapy101Spider(BaseSpider):
5 name = "dmoz"
6 allowed_domains = ["dmoz.org/"]
7 start_urls = ["http://www.dmoz.org/"]
8
9 def parse(self, response):
10 for div in response.xpath('/html/body/div[3]/div[3]/div[1]/div'):
11 for entry in div.xpath('span'):
12 item = Scrapy101Item()
13 item['title'] = entry.xpath('a/text()').extract()
14 print item['title'] | 14 - error: syntax-error
|
1 from scrapy import Item, Field
2
3 class CardekhoItem(Item):
4 title = Field()
5 price = Field()
6 distance = Field() | 3 - refactor: too-few-public-methods
|
1 from scrapy import Item, Field
2
3 class Scrapy101Item(Item):
4 title = Field() | 4 - warning: bad-indentation
3 - refactor: too-few-public-methods
|
1 from cardekho.items import CardekhoItem
2 from scrapy import Spider
3 from scrapy.http.request import Request
4
5 class CardekhoSpider(Spider):
6 name = "cardekho"
7 allowed_domains = ["http://www.cardekho.com"]
8 start_urls = ["http://www.cardekho.com/used-cars+in+mumbai-all/"]
9
10 #This is to not get redirected by CarDekho. We are identifying ourselves as a web-browser.
11 custom_settings = {'USER_AGENT' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.111 Safari/537.36'}
12
13 def start_requests(self):
14 #There are 162 pages, we are asking Scrapy to get us all of them.
15 for i in range(162):
16 yield Request("http://www.cardekho.com/used-cars+in+mumbai-all/" + str(i), self.parse)
17
18 def parse(self, response):
19 for sel in response.xpath('/html/body/main/div/div[2]/div[2]/div[9]/form/ul/li'):
20 item = CardekhoItem()
21 item ['title'] = sel.xpath('div[1]/div[2]/div[1]/a/text()').extract()
22 item ['price'] = sel.xpath('div[1]/div[3]/div[1]/text()').extract()
23 item ['distance'] = sel.xpath('div[1]/div[2]/div[3]/ul/li[1]/div[2]/span/text()').extract()
24 yield item | Clean Code: No Issues Detected
|
1 from scrapy import Item, Field
2
3 class HousingItemBuy(Item):
4 ad_id = Field()
5 ad_title = Field()
6 ad_price = Field()
7 ad_area = Field()
8 ad_url = Field()
9 ad_date_added = Field()
10 ad_coordinates = Field()
11 ad_bedrooms = Field()
12 ad_toilets = Field()
13 ad_gas_pipeline = Field()
14 ad_lift = Field()
15 ad_parking = Field()
16 ad_gym = Field()
17 ad_swimming_pool = Field()
18 ad_city = Field()
19 ad_locality = Field()
20 ad_contact_persons_name = Field()
21 ad_contact_persons_number = Field()
22 ad_contact_persons_id = Field()
23 count = Field() | 3 - refactor: too-few-public-methods
|
1
2 from django.urls import path
3 # from.views import address,add_to_cart,mobile,checkout,orders,ProductView,ProductDetailView,CustomerRegistrationView,ProfileView,show_cart,laptop,fashion_top,fashion_bottom,gym_product,home_decor,plus_cart,minus_cart,remove_cart,payment_done,orders
4 from django.conf import settings
5 from django.conf.urls.static import static
6 # from django.contrib.auth import views as auth_views
7 from fashion.views import HomeView,perfume_view,product_view,shoes_view,watch_view,tshirt_view,ProductDetailView,add_to_cart,CustomerRegistrationView,ProfileView,address,show_cart,remove_cart,checkout,orders
8 from django.contrib.auth import views as auth_views
9 from .forms import LoginForm,MyPasswordChangeForm
10 # ,MyPasswordResetForm,MySetPasswordForm
11
12 urlpatterns = [
13 path('',HomeView.as_view(),name='home'),
14 path('alldata/',product_view,name="alldata"),
15 path('perfume/',perfume_view,name="perfume"),
16 path('perfume/<slug:data>/',perfume_view,name="perfume"),
17 path('watches/',watch_view,name="watches"),
18 path('watches/<slug:data>/',watch_view,name="watches"),
19 path('tshirts/',tshirt_view,name="tshirts"),
20 path('tshirts/<slug:data>/',tshirt_view,name="tshirts"),
21 path('shoes/',shoes_view,name="shoes"),
22 path('shoes/<slug:data>/',shoes_view,name="shoes"),
23 path('product-detail/<int:pk>',ProductDetailView.as_view(),name="product-detail"),
24 path('add-to-cart/',add_to_cart,name="add-to-cart"),
25 path('cart/',show_cart,name='cart'),
26 path('removecart/<int:pk>/',remove_cart,name='removecart'),
27 path('profile/',ProfileView.as_view(),name="profile"),
28 path('address/',address,name="address"),
29 path('orders/',orders,name="orders"),
30 path('regestration/',CustomerRegistrationView.as_view(),name="customerregistration"),
31 path('login/', auth_views.LoginView.as_view(template_name='fashion/login.html',authentication_form=LoginForm), name='login'),
32 path('logout/', auth_views.LogoutView.as_view(next_page='login') ,name='logout'),
33 path('passwordchange/',auth_views.PasswordChangeView.as_view(template_name='fashion/passwordchange.html',form_class=MyPasswordChangeForm,success_url='/passwordchangedone/'),name="passwordchange"),
34 path('passwordchangedone/', auth_views.PasswordChangeDoneView.as_view(template_name='fashion/passwordchangedone.html'), name='passwordchangedone'),
35 path('checkout/',checkout,name='checkout'),
36
37
38
39 ]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| 9 - error: relative-beyond-top-level
|
1 # Generated by Django 3.2.6 on 2021-09-25 07:35
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('fashion', '0002_cart_orderplaced_product'),
10 ]
11
12 operations = [
13 migrations.AlterField(
14 model_name='product',
15 name='category',
16 field=models.CharField(choices=[('TS', 'Tshirts'), ('W', 'Watches'), ('P', 'Perfumes'), ('S', 'Shoes')], max_length=2),
17 ),
18 ]
| 6 - refactor: too-few-public-methods
|
1 # Conectar ao Minecraft
2 from mcpi.minecraft import Minecraft
3 mc = Minecraft.create()
4
5 # String para variaveis de 3D
6
7 x = input("localização desejada para x ")
8 y = input("localização desejada para y ")
9 z = input("localização desejada para z ")
10
11 # Mudar a posição do jogador
12
13 mc.player.setPos(x, y, z)
14
15 print("Fim de locomoção ->", x, y ,z) | Clean Code: No Issues Detected
|
1 # Conectar ao Minecraft
2 from mcpi.minecraft import Minecraft
3 mc = Minecraft.create()
4
5 # String para variaveis de 3D
6
7 bloco = input("Numero do bloco desejado:")
8
9 x = input("localização desejada para: x ")
10 y = input("localização desejada para: y ")
11 z = input("localização desejada para: z ")
12
13 mc.setBlock(x, y, z, bloco)
14
15 print("Fim de script obs: cria 1 bloco por vez ! ") | Clean Code: No Issues Detected
|
1 # third party imports
2 import argparse
3 import os
4 import torch
5 from torchvision import models
6
7 # local imports
8 from model import create_dataloaders, create_model, train_model
9 from utils import determine_device
10 from validation import validate_train_args
11
12 # CLI defaults
13 HIDDEN_UNITS_DEFAULT = 2048
14 ARCH_DEFAULT = "vgg16"
15 LEARNING_RATE_DEFAULT = 0.001
16 EPOCHS_DEFAULT = 8
17
18 # other settings
19 BATCH_SIZE = 60
20 DROPOUT_PROBABILITY = 0.5
21 ARCH_CHOICES = [
22 "vgg16",
23 "vgg16_bn",
24 "vgg11",
25 "vgg11_bn",
26 "vgg13",
27 "vgg13_bn",
28 "vgg19",
29 "vgg19_bn",
30 "densenet121",
31 "densenet161",
32 "densenet169",
33 "densenet201",
34 ]
35
36 # configure argument parser
37 parser = argparse.ArgumentParser(description="Trains model and saves checkpoint")
38 parser.add_argument("data_directory", help="the directory for the training data")
39 parser.add_argument("--arch", choices=ARCH_CHOICES, default=ARCH_DEFAULT)
40 parser.add_argument("--gpu", action="store_true")
41 parser.add_argument("--learning_rate", type=float, default=LEARNING_RATE_DEFAULT)
42 parser.add_argument("--save_dir")
43 parser.add_argument("--epochs", type=int, default=EPOCHS_DEFAULT)
44 parser.add_argument("--hidden_units", type=int, default=HIDDEN_UNITS_DEFAULT)
45
46 # parse CLI args
47 args = parser.parse_args()
48
49 # do some additional validation on args
50 validate_train_args(args)
51
52 # get dataloaders and class_to_idx map
53 print("Creating dataloaders...")
54 dataloaders, class_to_idx = create_dataloaders(args.data_directory, BATCH_SIZE)
55
56 # use gpu if available and requested in args
57 device = determine_device(args.gpu)
58 print("Using device {}...".format(device.type))
59
60 print("Creating model...")
61 training_directory = args.data_directory + "/train/"
62 output_units_size = sum(
63 [os.path.isdir(training_directory + i) for i in os.listdir(training_directory)]
64 )
65 model, input_size = create_model(
66 args.arch, args.hidden_units, DROPOUT_PROBABILITY, output_units_size, device
67 )
68
69 # train the model in place
70 print("Training model...")
71 train_model(model, dataloaders, args.epochs, args.learning_rate, device)
72
73 # save checkpoint
74 print("Saving checkpoint...")
75 checkpoint = {
76 "arch": args.arch,
77 "batch_size": BATCH_SIZE,
78 "class_to_idx": class_to_idx,
79 "dropout_probability": DROPOUT_PROBABILITY,
80 "hidden_size": args.hidden_units,
81 "input_size": input_size,
82 "output_size": output_units_size,
83 "state_dict": model.state_dict(),
84 }
85 save_path = args.save_dir + "/checkpoint.pth" if args.save_dir else "checkpoint.pth"
86 torch.save(checkpoint, save_path)
87 print("Done. Checkpoint has been saved at {}".format(save_path))
| 62 - refactor: consider-using-generator
5 - warning: unused-import
|
1 from os import path
2 import torch
3 from torchvision import models
4
5 # validates train.py args
6 def validate_train_args(args):
7 # check cuda
8 if args.gpu and torch.cuda.is_available() == False:
9 # we don't want to throw sand in the user's face
10 # but let them know we are falling back to CPU
11 print("GPU is not enabled for this device, falling back to CPU")
12
13 # check data_directory existance
14 if path.exists(args.data_directory) == False:
15 raise ValueError(
16 "data directory does not exist: {}".format(args.data_directory)
17 )
18
19 # check save_dir existance
20 if args.save_dir and path.exists(args.save_dir) == False:
21 raise ValueError("save directory does not exist: {}".format(args.save_dir))
22
23
24 # validates predict.py args
25 def validate_predict_args(args):
26 # check cuda
27 if args.gpu and torch.cuda.is_available() == False:
28 # we don't want to throw sand in the user's face
29 # but let them know we are falling back to CPU
30 print("GPU is not enabled for this device, falling back to CPU")
31
32 # check data_directory existance
33 if path.exists(args.image_path) == False:
34 raise ValueError("image path does not exist: {}".format(args.image_path))
35
36 # check checkpoint existance
37 if path.exists(args.checkpoint) == False:
38 raise ValueError("checkpoint does not exist: {}".format(args.checkpoint))
39
40 # check category names existance
41 if args.category_names and path.exists(args.category_names) == False:
42 raise ValueError(
43 "category names does not exist: {}".format(args.category_names)
44 )
| 3 - warning: unused-import
|
1 # third party imports
2 import argparse
3 import json
4
5 # local imports
6 from model import predict, load_checkpoint
7 from utils import determine_device
8 from validation import validate_predict_args
9
10 # CLI defaults
11 TOP_K_DEFAULT = 1
12
13 # configure argument parser
14 parser = argparse.ArgumentParser(description="Trains model and saves checkpoint")
15 parser.add_argument("image_path", help="the path for the image you wish to classify")
16 parser.add_argument("checkpoint", help="the model checkpoint you would like to use")
17 parser.add_argument("--category_names")
18 parser.add_argument("--gpu", action="store_true")
19 parser.add_argument("--top_k", type=int, default=TOP_K_DEFAULT)
20
21 # parse and validate args
22 args = parser.parse_args()
23 validate_predict_args(args)
24
25 # Getting category to name mapping
26 cat_to_name = None
27 if args.category_names:
28 with open(args.category_names, "r") as f:
29 cat_to_name = json.load(f)
30
31 # use gpu if available and requested in args
32 device = determine_device(args.gpu)
33 print("Using device {}...".format(device.type))
34
35 print("Loading checkpoint...")
36 model = load_checkpoint(args.checkpoint, device)
37
38 print("Predicting class for image...")
39 chart_data = predict(args.image_path, model, device, cat_to_name, args.top_k)
40
41 print("Printing chart of classes and probabilities...")
42 print(chart_data)
| 28 - warning: unspecified-encoding
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.