code
stringlengths 20
13.2k
| label
stringlengths 21
6.26k
|
---|---|
1 #!usr/bin/env python3
2 # _
3 # _ __ ___ ___ | | ___ ___
4 # | '_ ` _ \ / _ \| |/ _ \/ __|
5 # | | | | | | (_) | | __/ (__
6 # |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
7 #
8 # Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
9 # Michel Breyer (mbreyer@student.ethz.ch)
10 # Florian Frei (flofrei@student.ethz.ch)
11 # Fabian Thuring (thfabian@student.ethz.ch)
12 #
13 # This file is distributed under the MIT Open Source License.
14 # See LICENSE.txt for details.
15
16 from pymolec import *
17
18 import numpy as np
19 import matplotlib.pyplot as plt
20 import seaborn as sns
21
22 # seaborn formatting
23 sns.set_context("notebook", font_scale=1.1)
24 sns.set_style("darkgrid")
25 sns.set_palette('deep')
26 deep = ["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"]
27
28 def main():
29
30 periodics = ['ref', 'c4']
31 N = np.array([1000, 2000, 3000, 4000, 5000, 6000, 7000, 10000])
32
33 flops = 2 * N # mod plus addition
34
35 fig = plt.figure()
36 ax = fig.add_subplot(1,1,1);
37
38 for periodic in periodics:
39 p = pymolec(N=N, periodic=periodic )
40 output = p.run()
41
42 perf = flops / output['periodic']
43 ax.plot(N, perf, 'o-')
44
45
46 ax.set_xlim([np.min(N)-100, np.max(N)+100])
47 ax.set_ylim([0,2])
48
49 ax.set_xlabel('Number of particles')
50 ax.set_ylabel('Performance [Flops/Cycle]',
51 rotation=0,
52 horizontalalignment = 'left')
53 ax.yaxis.set_label_coords(-0.055, 1.05)
54
55 plt.legend(periodics)
56
57 filename = 'periodic.pdf'
58 print("saving '%s'" % filename )
59 plt.savefig(filename)
60
61
62 if __name__ == '__main__':
63 main()
| 36 - warning: unnecessary-semicolon
16 - warning: wildcard-import
39 - error: undefined-variable
|
1 #!usr/bin/env python3
2 # _
3 # _ __ ___ ___ | | ___ ___
4 # | '_ ` _ \ / _ \| |/ _ \/ __|
5 # | | | | | | (_) | | __/ (__
6 # |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
7 #
8 # Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
9 # Michel Breyer (mbreyer@student.ethz.ch)
10 # Florian Frei (flofrei@student.ethz.ch)
11 # Fabian Thuring (thfabian@student.ethz.ch)
12 #
13 # This file is distributed under the MIT Open Source License.
14 # See LICENSE.txt for details.
15
16 from pymolec import *
17
18 import numpy as np
19 import matplotlib.pyplot as plt
20 import seaborn as sns
21 import os.path
22
23
24
25 # seaborn formatting
26 sns.set_context("notebook", font_scale=1.1)
27 sns.set_style("darkgrid")
28 sns.set_palette('deep')
29 deep = ["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"]
30
31 def measure_performance():
32
33 forces = ['q'];
34
35 N = np.logspace(4,7,8).astype(np.int32)
36 steps = np.array([100, 100, 90, 80, 65, 50, 35, 20])
37 rhos = np.array([0.5, 1., 2., 4., 6.,8.,10.])
38
39
40 rc = 2.5
41
42 if os.path.isfile("performances-grid-forces-density.npy"):
43 print("Loading data from <performances-grid-forces-density.npy")
44 performances = np.load("performances-grid-forces-density.npy")
45 return performances, N, rhos
46 else:
47
48 performances = np.zeros((len(rhos), len(N)))
49
50 for rho_idx, rho in enumerate(rhos):
51 flops = N * rc**3 * rho * (18 * np.pi + 283.5)
52
53 p = pymolec(N=N, rho=rho, force=forces, steps=steps, integrator='lf8', periodic='c4')
54 output = p.run()
55
56 perf = flops / output['force']
57 performances[len(rhos)-1-rho_idx, :] = perf
58
59 print("Saving performance data to <performances-grid-forces-density.npy>")
60 np.save("performances-grid-forces-density", performances)
61
62 return performances, N, rhos
63
64 def plot_performance(performances, N, rhos):
65 fig = plt.figure()
66 ax = fig.add_subplot(1,1,1);
67
68 # Generate a custom diverging colormap
69 cmap = sns.diverging_palette(10, 133, n = 256, as_cmap=True)
70
71 ax = sns.heatmap(performances, linewidths=1,
72 yticklabels=rhos[::-1], xticklabels=N,
73 vmax=0.2*np.round(np.max(np.max(performances))*5),
74 vmin=0.2*np.round(np.min(np.min(performances))*5),
75 cmap=cmap, annot=False
76 )
77
78
79 cax = plt.gcf().axes[-1]
80 pos_old = cax.get_position()
81 pos_new = [pos_old.x0 - 0.01, pos_old.y0 + 0, pos_old.width, pos_old.height*((len(rhos)-1)*1./len(rhos))]
82 cax.set_position(pos_new)
83 cax.tick_params(labelleft=False, labelright=True)
84 cax.set_yticklabels(['Low', '', '', '', 'High'])
85
86 ax.text(len(N)+0.35, len(rhos), 'Performance\n[flops/cycle]', ha='left', va='top')
87
88
89 rho_labels_short = ['%.2f' % a for a in rhos]
90 ax.set_yticklabels(rho_labels_short)
91
92 N_labels_short = ['10$^{%1.2f}$' % a for a in np.array(np.log10(N))]
93 ax.set_xticklabels(N_labels_short)
94
95 ax.set_xlabel('Number of particles $N$')
96 ax.set_ylabel('Particle density',
97 rotation=0, horizontalalignment = 'left')
98 ax.yaxis.set_label_coords(0., 1.01)
99 plt.yticks(rotation=0)
100
101 filename = 'forces-grid.pdf'
102 print("saving '%s'" % filename )
103 plt.savefig(filename)
104
105
106 if __name__ == '__main__':
107 perf, N, rhos = measure_performance()
108 plot_performance(perf, N, rhos)
| 33 - warning: unnecessary-semicolon
66 - warning: unnecessary-semicolon
16 - warning: wildcard-import
35 - warning: redefined-outer-name
37 - warning: redefined-outer-name
56 - warning: redefined-outer-name
42 - refactor: no-else-return
53 - error: undefined-variable
64 - warning: redefined-outer-name
64 - warning: redefined-outer-name
|
1 #!usr/bin/env python3
2 # _
3 # _ __ ___ ___ | | ___ ___
4 # | '_ ` _ \ / _ \| |/ _ \/ __|
5 # | | | | | | (_) | | __/ (__
6 # |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
7 #
8 # Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
9 # Michel Breyer (mbreyer@student.ethz.ch)
10 # Florian Frei (flofrei@student.ethz.ch)
11 # Fabian Thuring (thfabian@student.ethz.ch)
12 #
13 # This file is distributed under the MIT Open Source License.
14 # See LICENSE.txt for details.
15
16 import numpy as np
17 import matplotlib.pyplot as plt
18 import seaborn as sns
19 import sys
20 import json
21
22 # seaborn formatting
23 sns.set_context("notebook", font_scale=1.1)
24 sns.set_style("darkgrid")
25 sns.set_palette('deep')
26 deep = ["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"]
27
28 try:
29 filename = sys.argv[1]
30 except IndexError as ie:
31 print('usage: plot results.txt')
32 sys.exit(1)
33
34 # load results from json object
35 with open(filename, 'r') as infile:
36 results = json.load(infile)
37
38 N = np.array(results['N'])
39 rho = np.array(results['rho'])
40
41 del results['N']
42 del results['rho']
43
44 #----- plot runtime ------
45
46 fig = plt.figure()
47 ax = fig.add_subplot(1,1,1);
48
49 for k in sorted(results):
50 if 'cell_ref' in results:
51 ax.semilogx(N, np.array(results['cell_ref']) / np.array(results[k]), 'o-', label=k)
52 elif 'lf' in results:
53 ax.semilogx(N, np.array(results['lf']) / np.array(results[k]), 'o-', label=k)
54
55
56 ax.set_xlabel('Number of particles $N$')
57 ax.set_ylabel('Runtime Speedup',
58 rotation=0,
59 horizontalalignment = 'left')
60 ax.yaxis.set_label_coords(-0.055, 1.05)
61
62 ax.set_xlim([np.min(N)*0.9, np.max(N)*1.1])
63 ax.set_ylim([0.0, 1.2 * ax.get_ylim()[1]])
64
65 ax.legend(loc='upper right')
66
67 plt.savefig(filename[:filename.rfind('.')]+'-runtime.pdf')
68
69 #----- plot performance -----
70
71 flops = dict()
72 flops['cell_ref'] = lambda N, rho : 301 * N * rho * 2.5**3
73 flops['q'] = lambda N, rho : 301 * N * rho * 2.5**3
74 flops['q_g'] = lambda N, rho : 180 * N * rho * 2.5**3
75 flops['q_g_avx'] = lambda N, rho : N * (205 * rho * 2.5**3 + 24)
76 flops['lf'] = lambda N, rho : 9 * N
77 flops['lf2'] = lambda N, rho : 9 * N
78 flops['lf4'] = lambda N, rho : 9 * N
79 flops['lf8'] = lambda N, rho : 9 * N
80 flops['lf_avx'] = lambda N, rho : 9 * N
81
82 fig = plt.figure()
83 ax = fig.add_subplot(1,1,1);
84
85 for k in sorted(results):
86 ax.semilogx(N, flops[k](N,rho) / np.array(results[k]), 'o-', label=k)
87
88 ax.set_xlabel('Number of particles $N$')
89 ax.set_ylabel('Performance [Flops/Cycles]',
90 rotation=0,
91 horizontalalignment = 'left')
92 ax.yaxis.set_label_coords(-0.055, 1.05)
93
94 ax.set_xlim([np.min(N)*0.9, np.max(N)*1.1])
95 ax.set_ylim([-0.1, 1.4 * ax.get_ylim()[1]])
96
97 ax.legend(loc='upper right')
98
99 plt.savefig(filename[:filename.rfind('.')]+'-performance.pdf')
| 47 - warning: unnecessary-semicolon
83 - warning: unnecessary-semicolon
35 - warning: unspecified-encoding
71 - refactor: use-dict-literal
|
1 #!usr/bin/env python3
2 # _
3 # _ __ ___ ___ | | ___ ___
4 # | '_ ` _ \ / _ \| |/ _ \/ __|
5 # | | | | | | (_) | | __/ (__
6 # |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
7 #
8 # Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
9 # Michel Breyer (mbreyer@student.ethz.ch)
10 # Florian Frei (flofrei@student.ethz.ch)
11 # Fabian Thuring (thfabian@student.ethz.ch)
12 #
13 # This file is distributed under the MIT Open Source License.
14 # See LICENSE.txt for details.
15
16 from pymolec import *
17
18 import numpy as np
19 import json
20 import sys
21
22 #------------------------------------------------------------------------------
23
24 integrators = ['lf', 'lf2', 'lf4', 'lf8', 'lf_avx']
25
26 N = np.logspace(2, 5, 12, base=10).astype(np.int32)
27 steps = np.array([25])
28
29 rho = 1.0
30 rc = 2.5
31
32 #------------------------------------------------------------------------------
33
34 filename = sys.argv[1]
35
36 results = {}
37
38 for integrator in integrators:
39 p = pymolec(N=N, rho=rho, steps=steps, force='q_g_avx', integrator=integrator)
40 output = p.run()
41
42 results['N'] = output['N'].tolist()
43 results['rho'] = output['rho'].tolist()
44 results[integrator] = output['integrator'].tolist()
45
46 print('Saving performance data to ' + filename)
47
48 with open(filename, 'w') as outfile:
49 json.dump(results, outfile, indent=4)
| 16 - warning: wildcard-import
39 - error: undefined-variable
48 - warning: unspecified-encoding
|
1 #!usr/bin/env python3
2 # _
3 # _ __ ___ ___ | | ___ ___
4 # | '_ ` _ \ / _ \| |/ _ \/ __|
5 # | | | | | | (_) | | __/ (__
6 # |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
7 #
8 # Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
9 # Michel Breyer (mbreyer@student.ethz.ch)
10 # Florian Frei (flofrei@student.ethz.ch)
11 # Fabian Thuring (thfabian@student.ethz.ch)
12 #
13 # This file is distributed under the MIT Open Source License.
14 # See LICENSE.txt for details.
15
16 import numpy as np
17 import time, sys, os, subprocess
18
19 class pymolec:
20
21 def __init__(self, N=np.array([1000]), rho=1.25, steps=np.array([100]),
22 force="cell_ref", integrator="lf", periodic="ref"):
23
24 self.N = N
25 self.rho = rho
26
27
28 if hasattr(steps, "__len__"):
29 if len(N) != len(steps):
30 self.steps = np.full(len(N), steps[0], dtype=np.int)
31 else:
32 self.steps = steps
33 else:
34 self.steps = np.full(len(N), steps, dtype=np.int)
35
36
37 self.force = force
38 self.integrator = integrator
39 self.periodic = periodic
40
41 def run(self, path = None):
42 """
43 runs a molec simulation for the given configurations and outputs a
44 dictionnary containing N, rho, force, integrator, periodic, simulation
45 """
46
47 # Use default path
48 if not path:
49 script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)))
50 if os.name == 'nt':
51 path = os.path.join(script_path, '..', 'build', 'molec.exe')
52 else:
53 path = os.path.join(script_path, '..', 'build', 'molec')
54
55 # Check if molec exists
56 if not os.path.exists(path):
57 raise IOError("no such file or directory: %s" % path)
58
59 times = np.zeros((4, len(self.N)))
60
61 print ("Running molec: %s" % path)
62 print ("rho = {0}, force = {1}, integrator = {2}, periodic = {3}".format(
63 self.rho, self.force, self.integrator, self.periodic))
64
65
66 output = {}
67
68 output['N'] = np.zeros(len(self.N))
69 output['rho'] = np.zeros(len(self.N))
70 output['force'] = np.zeros(len(self.N))
71 output['integrator'] = np.zeros(len(self.N))
72 output['periodic'] = np.zeros(len(self.N))
73 output['simulation'] = np.zeros(len(self.N))
74
75 for i in range(len(self.N)):
76 cmd = [path]
77 cmd += ["--N=" + str(self.N[i])]
78 cmd += ["--rho=" + str(self.rho)]
79 cmd += ["--step=" + str(self.steps[i])]
80 cmd += ["--force=" + self.force]
81 cmd += ["--integrator=" + self.integrator]
82 cmd += ["--periodic=" + self.periodic]
83 cmd += ["--verbose=0"]
84
85 # Print status
86 start = time.time()
87 print(" - N = %9i ..." % self.N[i], end='')
88 sys.stdout.flush()
89
90 try:
91 out = subprocess.check_output(cmd).decode(encoding='utf-8').split('\t')
92
93 print(" %20f s" % (time.time() - start))
94
95 output['N'][i] = int(out[0])
96 output['rho'][i] = float(out[1])
97 output['force'][i] = int(out[3])
98 output['integrator'][i] = int(out[5])
99 output['periodic'][i] = int(out[7])
100 output['simulation'][i] = int(out[9])
101
102 except subprocess.CalledProcessError as e:
103 print(e.output)
104
105 return output
106
107 def main():
108 p = pymolec()
109 print(p.run())
110
111 if __name__ == '__main__':
112 main()
| 21 - refactor: too-many-arguments
21 - refactor: too-many-positional-arguments
59 - warning: unused-variable
19 - refactor: too-few-public-methods
|
1 from selenium import webdriver
2 from selenium.common.exceptions import *
3 from selenium.webdriver.common.by import By
4 from selenium.webdriver.support.ui import WebDriverWait
5 from selenium.webdriver.support import expected_conditions as EC
6 from time import sleep
7 from getpass import getpass
8 import tkinter as tk
9 from tkinter import messagebox
10
11 class tanmay_bhat:
12 def __init__(self, username, password, channel_addr):
13
14 try:
15 #Check for Chrome webdriver in Windows
16 self.bot = webdriver.Chrome('driver/chromedriver.exe')
17 except WebDriverException:
18 try:
19 #Check for Chrome webdriver in Linux
20 self.bot = webdriver.Chrome('/usr/bin/chromedriver')
21 except WebDriverException:
22 print("Please set Chrome Webdriver path above")
23 exit()
24
25 self.username = username
26 self.password = password
27 self.channel_addr = channel_addr
28
29 def login(self):
30 bot = self.bot
31 print("\nStarting Login process!\n")
32 bot.get('https://stackoverflow.com/users/signup?ssrc=head&returnurl=%2fusers%2fstory%2fcurrent%27')
33 bot.implicitly_wait(10)
34 self.bot.find_element_by_xpath('//*[@id="openid-buttons"]/button[1]').click()
35 self.bot.find_element_by_xpath('//input[@type="email"]').send_keys(self.username)
36 self.bot.find_element_by_xpath('//*[@id="identifierNext"]').click()
37 sleep(3)
38 self.bot.find_element_by_xpath('//input[@type="password"]').send_keys(self.password)
39 self.bot.find_element_by_xpath('//*[@id="passwordNext"]').click()
40 WebDriverWait(self.bot, 900).until(EC.presence_of_element_located((By.XPATH, "/html/body/header/div/div[1]/a[2]/span")))
41 print("\nLoggedin Successfully!\n")
42 sleep(2)
43 self.bot.get(self.channel_addr + "/videos")
44
45 def start_liking(self):
46 bot = self.bot
47 scroll_pause = 2
48 last_height = bot.execute_script("return document.documentElement.scrollHeight")
49 while True:
50 bot.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")
51 sleep(scroll_pause)
52
53 new_height = bot.execute_script("return document.documentElement.scrollHeight")
54 if new_height == last_height:
55 print("\nScrolling Finished!\n")
56 break
57 last_height = new_height
58 print("\nScrolling")
59
60 all_vids = bot.find_elements_by_id('thumbnail')
61 links = [elm.get_attribute('href') for elm in all_vids]
62 links.pop()
63 for i in range(len(links)):
64 bot.get(links[i])
65
66 like_btn = bot.find_element_by_xpath('//*[@id="top-level-buttons"]/ytd-toggle-button-renderer[1]/a')
67 check_liked = bot.find_element_by_xpath('//*[@id="top-level-buttons"]/ytd-toggle-button-renderer[1]')
68 # Check if its already liked
69 if check_liked.get_attribute("class") == 'style-scope ytd-menu-renderer force-icon-button style-text':
70 like_btn.click()
71 print("Liked video! Bot Army Zindabad!!!\n")
72 sleep(0.5)
73 elif check_liked.get_attribute("class") == 'style-scope ytd-menu-renderer force-icon-button style-default-active':
74 print("Video already liked. You are a good Bot Army Member\n")
75
76
77
78
79 #************************************************** GUI AREA **********************************************
80
81 def start():
82 if email_entry.get() and password_entry.get() and url_entry.get():
83 bot_army = tanmay_bhat(email_entry.get(), password_entry.get(), url_entry.get())
84 root.destroy()
85 bot_army.login()
86 bot_army.start_liking()
87 else:
88 messagebox.showinfo('Notice', 'Please fill all the entries to proceed furthur')
89
90 def tanmay_url_inject():
91 url_entry.delete(0, tk.END)
92 url_entry.insert(0, "https://www.youtube.com/c/TanmayBhatYouTube")
93
94 root = tk.Tk()
95 root.resizable(False, False)
96 root.geometry('%dx%d+%d+%d' % (760, 330, (root.winfo_screenwidth()/2) - (760/2), (root.winfo_screenheight()/2) - (330/2)))
97
98 frame = tk.Frame(root, height=330, width=760)
99 head_label = tk.Label(frame, text='Youtube Video Liker', font=('verdana', 25))
100 email_label = tk.Label(frame, text='Email: ', font=('verdana', 15))
101 password_label = tk.Label(frame, text='Password: ', font=('verdana', 15))
102 email_entry = tk.Entry(frame, font=('verdana', 15))
103 password_entry = tk.Entry(frame, font=('verdana', 15), show="*")
104 url_label = tk.Label(frame, text='Channel\nURL', font=('verdana', 15))
105 url_entry = tk.Entry(frame, font=('verdana', 15))
106 tanmay_button = tk.Button(frame, text='Tanmay\nBhatt', font=('verdana', 15), command=tanmay_url_inject)
107 start_button = tk.Button(frame, text='Start Liking', font=('verdana', 20), command=start)
108
109 frame.pack()
110 head_label.place(y=15, relx=0.32)
111 email_label.place(x=15, y=95, anchor='w')
112 password_label.place(x=15, y=130, anchor='w')
113 email_entry.place(x=140, y=78, width=600)
114 password_entry.place(x=140, y=115, width=600)
115 url_label.place(x=15, y=190, anchor='w')
116 url_entry.place(x=140, y=175, width=600)
117 tanmay_button.place(x=400, y=240)
118 start_button.place(x=550, y=250)
119 root.mainloop()
120
121
122 """
123 Comment out the GUI area and uncomment the Console Area to use Console controls
124 ********************************************** Console Area *******************************************
125
126 print("HI BOT ARMYYYYYYY! How you doing?\nToday is the time to make our PROVIDER (BOT LEADER) proud by liking all his videos!\n\nLet's make hime proud!!\n\n")
127
128 print("Enter the link of the channel or just hit [ENTER] key for default Tanmay's Channel")
129 channel_addr = str(input("Channel Link: "))
130
131 username = str(input("\nEnter your YouTube/Google Email ID: "))
132 password = str(getpass("Enter your password: "))
133
134 if not channel_addr:
135 channel_addr = "https://www.youtube.com/c/TanmayBhatYouTube"
136
137
138 bot_army = tanmay_bhat(username, password, channel_addr)
139 bot_army.login()
140 bot_army.start_liking()
141 print("\n\nALL VIDEOS ARE LIKED!!! YOU CAN NOW OFFICIALLY CALL YOURSELF:\nA PROUD BOT ARMY MEMBERRRRR!!!!!!\n\n\nPress any key to end")
142 input()
143 """ | 2 - warning: wildcard-import
17 - error: undefined-variable
21 - error: undefined-variable
23 - refactor: consider-using-sys-exit
122 - warning: pointless-string-statement
7 - warning: unused-import
|
1 """
2 Self_compare_dist.py
3
4 Usage: This program has a function called self_seg_compare().
5 This function takes a track id (named as a parameter in the function),
6 compares every segment to every other segment, and
7 prints out the following information:
8
9 1. The number of segments that have one or more matches
10 2. The number of possible combinations that match
11 3. Saves a histogram that describes the combinations
12 4. Returns the adjacency list for the segments in the song
13
14 Takes the segments of a song, compares them using the Infinite Jukebox's
15 fields and weights, and gives a percentage of segments that have another
16 segment within 45 of itself. It also saves a histogram of these
17 distances. The histogram only shows distances <= 800, and up to 600
18 matches in each bin.
19
20 This program uses the weights and ideas on how to compare
21 segments. The following is a link to access the Infinite Jukebox:
22 http://labs.echonest.com/Uploader/index.html
23
24 Author: Chris Smith
25
26 Date: 03.11.2015
27
28 """
29
30 import matplotlib
31 matplotlib.use("Agg")
32 import echonest.remix.audio as audio
33 import matplotlib.pyplot as plt
34 import scipy.spatial.distance as distance
35 import numpy as np
36
37 '''
38 Method that uses a track id to compare every segment with
39 every other segment, supplies a histogram that shows
40 the distances between segments (tuples of segments),
41 and returns an adjacency list of segments in the song.
42 '''
43 def self_seg_compare():
44 #Defines the threshold for comparisons
45 thres = 45
46 adj_list = []
47 sim_seg_count = 0
48 sim_count = 0
49 track_id = "TRAWRYX14B7663BAE0"
50 audiofile = audio.AudioAnalysis(track_id)
51 segments = audiofile.segments
52 #Get each segment's array of comparison data
53 segs = np.array(segments.pitches)
54 segs = np.c_[segs, np.array(segments.timbre)]
55 segs = np.c_[segs, np.array(segments.loudness_max)]
56 segs = np.c_[segs, np.array(segments.loudness_begin)]
57 segs = np.c_[segs, np.ones(len(segs))]
58 #Finish creating the adjacency list
59 for i in segments:
60 adj_list.append([])
61 #Finish getting the comparison data
62 for i in range(len(segs)):
63 segs[i][26] = segments[i].duration
64 #Get the euclidean distance for the pitch vectors, then multiply by 10
65 distances = distance.cdist(segs[:,:12], segs[:,:12], 'euclidean')
66 for i in range(len(distances)):
67 for j in range(len(distances)):
68 distances[i][j] = 10 * distances[i][j]
69 #Get the euclidean distance for the timbre vectors, adding it to the
70 #pitch distance
71 distances = distances + distance.cdist(segs[:,12:24], segs[:,12:24], 'euclidean')
72 #Get the rest of the distance calculations, adding them to the previous
73 #calculations.
74 for i in range(len(distances)):
75 for j in range(len(distances)):
76 distances[i][j] = distances[i][j] + abs(segs[i][24] - segs[j][24])
77 distances[i][j] = distances[i][j] + abs(segs[i][25] - segs[j][25]) + abs(segs[i][26] - segs[j][26]) * 100
78 i_point = 0
79 j_point = 0
80 #Use i_point and j_point for the indices in the 2D distances array
81 for i_point in range(len(distances)):
82 for j_point in range(len(distances)):
83 if i_point != j_point:
84 #Check to see if the distance between segment # i_point and
85 #segment # j_point is less than 45
86 if abs(distances[i_point][j_point]) <= thres:
87 #Add to the adjacency lists if not already there
88 if j_point not in adj_list[i_point]:
89 adj_list[i_point].append(j_point)
90 if i_point not in adj_list[j_point]:
91 adj_list[j_point].append(i_point)
92 j_point = j_point + 1
93 i_point = i_point + 1
94 j_point = 0
95 #Get the count of the similarities in the adjacency lists
96 for i in adj_list:
97 if len(i) > 0:
98 sim_count = sim_count + len(i);
99 sim_seg_count = sim_seg_count + 1
100 #print i, "\n"
101 print "Num of segments with at least 1 match: ", sim_seg_count, " out of", len(segments)
102 print "Percentage of segments with at least 1 match: ", (sim_seg_count / float(len(segments)) * 100), "%"
103 print "Num of similar tuples: ", sim_count, " out of ", len(segments) ** 2 - len(segments)
104 print "Percentage of possible tuples that are similar: ", sim_count / float(len(segments) ** 2 - len(segments)) * 100, "%"
105 print "Note:This takes out comparisons between a segment and itself."
106 #Get the number of bins. Calculated by taking the max range and dividing by 50
107 bins = int(np.amax(distances)) / thres
108 #Make the histogram with titles and axis labels. Plot the line x=thres for visual comparison.
109 plt.hist(distances.ravel(), bins = bins)
110 plt.title('Distances between Tuples of Segments')
111 plt.xlabel('Distances')
112 plt.ylabel('Number of occurrences')
113 plt.axvline(thres, color = 'r', linestyle = 'dashed')
114 #Make each tick on the x-axis correspond to the end of a bin.
115 plt.xticks(range(0, int(np.amax(distances) + 2 * thres), thres))
116 #Make each tick on the y-axis correspond to each 25000th number up to the number of possible tuple combos / 2.
117 plt.yticks(range(0, (len(segments) ** 2 - len(segments))/2 + 25000, 25000))
118 plt.gcf().savefig('sim_histogram.png')
119 return adj_list
120
| 101 - error: syntax-error
|
1 import numpy as np
2 from collections import Counter
3
4 def calculate(filename):
5 data = np.load(filename)
6 checked = data[1]
7 countClusters = Counter()
8 counter = Counter()
9 for i in checked:
10 countClusters[i] += 1
11 for i in countClusters.values():
12 counter[i] += 1
13 val = counter.values()
14 key = counter.keys()
15 sum = 0
16 for i in range(len(key)):
17 sum += val[i] * key[i] ** 2
18 sum += (len(checked) * len(countClusters.values()))
19 print sum
20 fin = sum * (4376.4/4999950000)
21 print fin
| 19 - error: syntax-error
|
1 """
2 h5_seg_to_array.py
3
4 Usage: In the functions following this, the parameters are described as follows:
5
6 dir: the directory to search
7
8 filename: the filename for saving/loading the results to/from
9
10 Program that parses all .h5 files in the passed in directory and subdirectories,
11 getting the segment arrays from each .h5 file and putting them into a
12 numpy array for later use. Each segment array is in the following format:
13
14 [12 values for segment pitch, 12 values for segment timbre, 1 value for loudness
15 max, 1 value for loudness start, and 1 value for the segment duration]
16
17 This program uses the hdf5_getters, which can be found here:
18 https://github.com/tbertinmahieux/MSongsDB/blob/master/PythonSrc/hdf5_getters.py
19
20 Author: Chris Smith
21
22 Date: 02.22.2015
23 """
24 import os
25 import numpy as np
26 import hdf5_getters as getters
27
28 '''
29 Method that takes a directory, searches that directory, as well as any
30 subdirectories, and returns a list of every .h5 file.
31 '''
32 def get_h5_files(dir):
33 list = []
34 for root, dirs, files in os.walk(dir):
35 for file in files:
36 name, extension = os.path.splitext(file)
37 if extension == ".h5":
38 list.append(os.path.realpath(os.path.join(root, file)))
39 for subdir in dirs:
40 get_h5_files(subdir)
41 return list
42
43 '''
44 Method that takes a directory, gets every .h5 file in that directory (plus any
45 subdirectories), and then parses those files. The outcome is a Numpy array
46 that contains every segment in each file. Each row in the array of arrays
47 contains pitch, timbre, loudness max, loudness start, and the duration of each
48 segment.
49 '''
50 def h5_files_to_np_array(dir, filename):
51 list = get_h5_files(dir)
52 num_done = 0
53 seg_array = []
54 #Go through every file and get the desired information.
55 for file in list:
56 song = getters.open_h5_file_read(file)
57 seg_append = np.array(getters.get_segments_pitches(song))
58 seg_append = np.c_[ seg_append, np.array(getters.get_segments_timbre(song))]
59 seg_append = np.c_[seg_append, np.array(getters.get_segments_loudness_max(song))]
60 seg_append = np.c_[seg_append, np.array(getters.get_segments_loudness_start(song))]
61 start = np.array(getters.get_segments_start(song))
62 for i in range(0,len(start)-1):
63 if i != (len(start) - 1):
64 start[i] = start[i+1] - start[i]
65 start[len(start) - 1] = getters.get_duration(song) - start[len(start) - 1]
66 seg_append = np.c_[seg_append, start]
67 #Add the arrays to the bottom of the list
68 seg_array.extend(seg_append.tolist())
69 song.close()
70 num_done = num_done + 1
71 #Gives a count for every 500 files completed
72 if num_done % 500 == 0:
73 print num_done," of ",len(list)
74 #Convert the list to a Numpy array
75 seg_array = np.array(seg_array)
76 #Save the array in a file
77 seg_array.dump(filename)
78 print len(seg_array)," number of segments in the set."
79 return seg_array
80
81 '''
82 Method that opens the file with that filename. The file must contain a
83 Numpy array. This method returns the array.
84 '''
85 def open(filename):
86 data = np.load(filename)
87 return data
| 73 - error: syntax-error
|
1 import numpy as np
2
3 def check(filename):
4 clusters = np.load(filename)
5 clusters = clusters[1]
6 truths = np.load("Results/groundtruths.npy")
7 error = 0
8 total = 0
9 for i in range(len(truths)):
10 for j in range(len(truths[i])):
11 if clusters[truths[i][j]] != clusters[i]:
12 error += 1
13 total += 1
14 print error
15 print total
| 14 - error: syntax-error
|
1 #!/usr/bin/env python
2 # encoding: utf=8
3 """
4 one.py
5
6 Digest only the first beat of every bar.
7
8 By Ben Lacker, 2009-02-18.
9
10 """
11
12 '''
13 one_segment.py
14
15 Author: Chris Smith, 02-05-2015
16
17 Changes made to original one.py:
18
19 - Changes made to take the first segment out of every beat.
20 - Does not take the first beat from every bar anymore.
21
22 The original code is stored at this address: https://github.com/echonest/remix/blob/master/examples/one/one.py
23 '''
24 import echonest.remix.audio as audio
25
26 usage = """
27 Usage:
28 python one.py <input_filename> <output_filename>
29
30 Example:
31 python one.py EverythingIsOnTheOne.mp3 EverythingIsReallyOnTheOne.mp3
32 """
33
34 def main(input_filename, output_filename):
35 audiofile = audio.LocalAudioFile(input_filename)
36 '''
37 This line got the bars of the song in the previous version:
38 bars = audiofile.analysis.bars
39
40 Now, this line gets the beats in the song:
41 '''
42 beats = audiofile.analysis.beats
43 collect = audio.AudioQuantumList()
44 '''
45 This loop got the first beat in each bar and appended them to a list:
46 for bar in bars:
47 collect.append(bar.children()[0])
48
49 Now, this loop gets the first segment in each beat and appends them to the list:
50 '''
51 for b in beats:
52 collect.append(b.children()[0])
53 out = audio.getpieces(audiofile, collect)
54 out.encode(output_filename)
55
56 if __name__ == '__main__':
57 import sys
58 try:
59 input_filename = sys.argv[1]
60 output_filename = sys.argv[2]
61 except:
62 print usage
63 sys.exit(-1)
64 main(input_filename, output_filename)
| 62 - error: syntax-error
|
1 import matplotlib
2 matplotlib.use("Agg")
3 import numpy as np
4 import matplotlib.pyplot as plt
5 import time
6 from collections import Counter
7
8 def truth_generator(filename):
9 data = np.load(filename)
10 data.resize(100000, 27)
11 truths = []
12 for i in range(len(data)):
13 truths.append([])
14 t0 = time.time()
15 for i in range(0,100000,10000):
16 a = data[i:i+10000,]
17 a[:,:12:] *= 10
18 a[:,26] *= 100
19 for j in range(i,100000,10000):
20 b = data[j:j+10000,]
21 b[:,:12:] *= 10
22 b[:,26] *= 100
23 c = seg_distances(a,b)
24 for k in range(len(c)):
25 for l in range(len(c)):
26 if c[k,l] <= 80:
27 truths[k+i].append(l+j)
28 print "Done. Onto the next one..."
29 print time.time() - t0
30 np.save("Results/groundtruths", truths)
31
32 def histo_generator(filename):
33 data = np.load(filename)
34 labels = data[1]
35 counter = Counter()
36 for i in labels:
37 counter[i] += 1
38 if np.amax(len(counter)) / 50 >= 5:
39 bins = np.amax(len(counter)) / 50
40 else:
41 bins = 5
42 plt.hist(counter.values(), bins = bins)
43 plt.title('Number of members per cluster')
44 plt.xlabel('Number of members')
45 plt.ylabel('Number of occurrences')
46 ticks = range(0, bins)
47 #plt.xticks(ticks[0::50])
48 plt.gcf().savefig('Results/truthCountHistogram.png')
49 plt.close()
50
51 def seg_distances(u_, v_=None):
52 from scipy.spatial.distance import pdist, cdist, squareform
53 from numpy import diag, ones
54 if v_ is None:
55 d_ = pdist(u_[:, 0:12], 'euclidean')
56 d_ += pdist(u_[:, 12:24], 'euclidean')
57 d_ += pdist(u_[:, 24:], 'cityblock')
58 d_ = squareform(d_) + diag(float('NaN') * ones((u_.shape[0],)))
59 else:
60 d_ = cdist(u_[:, 0:12], v_[:, 0:12], 'euclidean')
61 d_ += cdist(u_[:, 12:24], v_[:, 12:24], 'euclidean')
62 d_ += cdist(u_[:, 24:], v_[:, 24:], 'cityblock')
63
64 return d_
| 28 - error: syntax-error
|
1 """
2 dir_comp.py
3
4 Usage: In the functions following this, the parameters are described as follows:
5
6 dir: the directory to search
7
8 Program that parses all .mp3 files in the passed in directory,
9 gets the segment arrays from each .mp3 file and puts them into a
10 numpy array for later use. Each segment array is in the following format:
11
12 [12 values for segment pitch, 12 values for segment timbre, 1 value for loudness
13 max, 1 value for loudness start, and 1 value for the segment duration]
14
15 Author: Chris Smith
16
17 Date: 03.27.2015
18 """
19 import matplotlib
20 matplotlib.use("Agg")
21 import echonest.remix.audio as audio
22 import matplotlib.pyplot as plt
23 import scipy.spatial.distance as distance
24 import os
25 import numpy as np
26
27 '''
28 Method that takes a directory, searches that directory, and returns a list of every .mp3 file in it.
29 '''
30 def get_mp3_files(dir):
31 list = []
32 for root, dirs, files in os.walk(dir):
33 for file in files:
34 name, extension = os.path.splitext(file)
35 if extension == ".mp3":
36 list.append(os.path.realpath(os.path.join(root, file)))
37 return list
38
39 '''
40 Method that takes two .mp3 files and compares every segment within song A to
41 every segment in song B and supplies a histogram that shows
42 the distances between segments (tuples of segments). Also supplies some data
43 about the songs that were parsed.
44 '''
45 def two_song_comp(fileA, fileB):
46 #Defines the threshold for comparisons
47 thres = 45
48 nameA = os.path.basename(os.path.splitext(fileA)[0])
49 nameB = os.path.basename(os.path.splitext(fileB)[0])
50 adj_listA = []
51 adj_listB = []
52 sim_seg_countA = 0
53 sim_seg_countB = 0
54 sim_countA = 0
55 sim_countB = 0
56 audiofileA = audio.AudioAnalysis(fileA)
57 audiofileB = audio.AudioAnalysis(fileB)
58 segmentsA = audiofileA.segments
59 segmentsB = audiofileB.segments
60 #Get each segment's array of comparison data for song A
61 segsA = np.array(segmentsA.pitches)
62 segsA = np.c_[segsA, np.array(segmentsA.timbre)]
63 segsA = np.c_[segsA, np.array(segmentsA.loudness_max)]
64 segsA = np.c_[segsA, np.array(segmentsA.loudness_begin)]
65 segsA = np.c_[segsA, np.ones(len(segsA))]
66 #Get each segment's array of comparison data for song B
67 segsB = np.array(segmentsB.pitches)
68 segsB = np.c_[segsB, np.array(segmentsB.timbre)]
69 segsB = np.c_[segsB, np.array(segmentsB.loudness_max)]
70 segsB = np.c_[segsB, np.array(segmentsB.loudness_begin)]
71 segsB = np.c_[segsB, np.ones(len(segsB))]
72
73 #Finish creating the adjacency list
74 for i in segmentsA:
75 adj_listA.append([])
76 for i in segmentsB:
77 adj_listB.append([])
78 #Finish getting the comparison data
79 for i in range(len(segsA)):
80 segsA[i][26] = segmentsA[i].duration
81 for i in range(len(segsB)):
82 segsB[i][26] = segmentsB[i].duration
83 #Get the euclidean distance for the pitch vectors, then multiply by 10
84 distances = distance.cdist(segsA[:,:12], segsB[:,:12], 'euclidean')
85 for i in range(len(distances)):
86 for j in range(len(distances[i])):
87 distances[i][j] = 10 * distances[i][j]
88 #Get the euclidean distance for the timbre vectors, adding it to the
89 #pitch distance
90 distances = distances + distance.cdist(segsA[:,12:24], segsB[:,12:24], 'euclidean')
91 #Get the rest of the distance calculations, adding them to the previous
92 #calculations.
93 for i in range(len(distances)):
94 for j in range(len(distances[i])):
95 distances[i][j] = distances[i][j] + abs(segsA[i][24] - segsB[j][24])
96 distances[i][j] = distances[i][j] + abs(segsA[i][25] - segsB[j][25]) + abs(segsA[i][26] - segsB[j][26]) * 100
97 i_point = 0
98 j_point = 0
99 #Use i_point and j_point for the indices in the 2D distances array
100 for i_point in range(len(distances)):
101 for j_point in range(len(distances[i])):
102 #Check to see if the distance between segment # i_point and
103 #segment # j_point is less than 45
104 if abs(distances[i_point][j_point]) <= thres:
105 #Add to the adjacency lists if not already there
106 if j_point not in adj_listA[i_point]:
107 adj_listA[i_point].append(j_point)
108 if i_point not in adj_listB[j_point]:
109 adj_listB[j_point].append(i_point)
110 j_point = j_point + 1
111 i_point = i_point + 1
112 j_point = 0
113 #Get the count of the similarities in the adjacency lists
114 for i in adj_listA:
115 if len(i) > 0:
116 sim_countA = sim_countA + len(i);
117 sim_seg_countA = sim_seg_countA + 1
118 for i in adj_listB:
119 if len(i) > 0:
120 sim_countB = sim_countB + len(i);
121 sim_seg_countB = sim_seg_countB + 1
122
123 #print i, "\n"
124 print "Num of segments with at least 1 match in song A: ", sim_seg_countA, " out of", len(segmentsA)
125 print "Percentage of segments with at least 1 match in song A: ", (sim_seg_countA / float(len(segmentsA)) * 100), "%"
126 print "Num of similar tuples: ", sim_countA, " out of ", len(segmentsA) *len(segmentsB)
127 print "Percentage of possible tuples that are similar: ", sim_countA / float(len(segmentsA) * len(segmentsB)) * 100, "%"
128 print "Num of segments with at least 1 match in song B: ", sim_seg_countB, " out of", len(segmentsB)
129 print "Percentage of segments with at least 1 match in song B: ", (sim_seg_countB / float(len(segmentsB)) * 100), "%"
130 #Get the number of bins. Calculated by taking the max range and dividing by 50
131 bins = int(np.amax(distances)) / thres
132 #Make the histogram with titles and axis labels. Plot the line x=thres for visual comparison.
133 plt.hist(distances.ravel(), bins = bins)
134 plt.title('Distances between Tuples of Segments' + nameA + nameB)
135 plt.xlabel('Distances')
136 plt.ylabel('Number of occurrences')
137 plt.axvline(thres, color = 'r', linestyle = 'dashed')
138 #Make each tick on the x-axis correspond to the end of a bin.
139 plt.xticks(range(0, int(np.amax(distances) + 2 * thres), thres))
140 #Make each tick on the y-axis correspond to each 25000th number up to the number of possible tuple combos / 2.
141 plt.yticks(range(0, (len(segmentsA) * len(segmentsB))/2 + 25000, 25000))
142 plt.gcf().savefig('Histograms/' + nameA + 'and' + nameB + '_histogram.png')
143 plt.close()
144
145 '''
146 Method that runs the comparison on every pair of .mp3 files in a directory
147 '''
148 def dir_comp(dir):
149 files = get_mp3_files(dir)
150 count = 0
151 total = sum(range(len(files) + 1))
152 for f1 in files:
153 for f2 in files:
154 nameA = os.path.basename(os.path.splitext(f1)[0])
155 nameB = os.path.basename(os.path.splitext(f2)[0])
156 if not os.path.isfile('Histograms/' + nameA + 'and' + nameB + '_histogram.png') and not os.path.isfile('Histograms/' + nameB + 'and' + nameA + '_histogram.png'):
157 two_song_comp(f1, f2)
158 print "Comparison completed!"
159 count = count + 1
160 print count, " out of ", total
161 print "Finished."
| 124 - error: syntax-error
|
1 """
2 seg_kmeans.py
3
4 This code performs K-Means clustering on a dataset passed in as a pickled
5 NumPy array.
6
7 There is a function (seg_kmeans) that performs K-Means on
8 the dataset not using another class's stuff. There is another function
9 (KMeans) that performs K-Means on the dataset by using Scikit-Learn's
10 K-Means class inside of the cluster package.
11 Both functions have the follwoing parameters:
12
13 1. filename: the file that contains the dataset (must be a pickled array)
14 2. clusters: the number of clusters to generate
15 3. iter: the max number of iterations to use
16
17 This also saves the results to an output in the Results folder.
18
19 Author: Chris Smith
20
21 Version: 4.19.2015
22 """
23 import matplotlib
24 matplotlib.use("Agg")
25 import numpy as np
26 from numpy import random
27 import scipy.spatial.distance as distance
28 from sklearn import metrics
29 from sklearn import cluster
30 import matplotlib.pyplot as plt
31 import time
32
33 '''
34 Figures out which cluster center that the segment x is closest to.
35 '''
36 def classify(x, size, centroids):
37 list = np.zeros(size)
38 for i in range(size):
39 list[i] = np.sqrt(np.sum((centroids[i] - x) ** 2))
40 return np.argmin(list)
41 '''
42 Figures out the cluster member counts and the max distances from the centers in each cluster.
43 Also, histograms are generated.
44 '''
45 def score(centers, centroids):
46 counts = np.zeros(len(centers))
47 maxes = np.zeros(len(centers))
48 index = 0
49 np.asarray(centers)
50 for i in range(len(centers)):
51 counts[index] = len(centers[index])
52 index += 1
53 for i in range(len(centers)):
54 maxes[i] = distance.cdist(centers[i], np.asarray(centroids[i]).reshape((1,27)), 'euclidean').max()
55 if np.amax(counts)/50 >= 5:
56 bins = np.amax(counts) / 50
57 else:
58 bins = 5
59 plt.hist(counts.ravel(), bins = bins)
60 plt.title('Number of members per cluster')
61 plt.xlabel('Number of members')
62 plt.ylabel('Number of occurrences')
63 ticks = range(0, int(np.amax(counts)))
64 plt.xticks(ticks[0::50])
65 plt.gcf().savefig('Results/countHistogram.png')
66 plt.close()
67 if np.amax(maxes)/50 >= 5:
68 bins = np.amax(maxes) / 50
69 else:
70 bins = 5
71
72 plt.hist(maxes.ravel(), bins = bins)
73 plt.title('Max distance in cluster')
74 plt.xlabel('Max distances')
75 plt.ylabel('Number of occurrences')
76 ticks = range(0, int(np.amax(maxes)))
77 plt.xticks(ticks[0::50])
78 plt.gcf().savefig('Results/maxdistHistogram.png')
79 plt.close()
80
81
82 print "Counts of each cluster:"
83 print counts
84 print "------------------------------"
85 print "The max distance from each center to a cluster member:"
86 print maxes
87 print "------------------------------"
88
89 '''
90 Performs K-Means clustering on a dataset of music segments without using a pre-made function.
91 Saves the results to a .npy file in the Results folder.
92 '''
93 def seg_kmeans(filename, clusters, iter):
94 #Initialize everything
95 data = np.load(filename)
96 #Use the first 1 million segments
97 data.resize(1000000,27)
98 centroids = np.empty((clusters, 27))
99 copyroids = np.empty((clusters, 27))
100 for i in range(0, clusters):
101 sample = random.randint(0, len(data))
102 centroids[i] = data[sample]
103 #Start the algorithm
104 stop = False
105 attempt = 1
106 numlist = []
107 while not stop and attempt <= iter:
108 #Initialize the lists
109 numlist = []
110 for i in range(clusters):
111 numlist.append([])
112 print "Attempt Number: %d" % attempt
113 #Classify stuff
114 for row in range(len(data)):
115 closest = classify(data[row], clusters, centroids)
116 numlist[closest].append(data[row])
117 if row % 10000 == 0:
118 print row
119 #Redo the centroids
120 copyroids = centroids.copy()
121 for i in range(clusters):
122 if len(numlist[i]) > 0:
123 centroids[i].put(range(27), np.average(numlist[i], axis=0).astype(np.int32))
124 attempt += 1
125 if np.any(centroids-copyroids) == 0:
126 stop = True
127 score(numlist, centroids)
128 np.save("Results/clusterdata.npy", numlist)
129
130 '''
131 Performs the K-Means clustering algorithm that Scikit-Learn's cluster package provides.
132 Saves the output into a file called clusterdata.npy. This file is located in the Results folder.
133 '''
134 def KMeans(filename, clusters, iter):
135 data = np.load(filename)
136 data.resize(100000,27)
137 print "Loaded data"
138 t0 = time.time()
139 estimator = cluster.KMeans(n_clusters=clusters, n_init = 5, max_iter=iter, verbose=1, n_jobs=5)
140 estimator.fit(data)
141 print('%.2fs %i'
142 % ((time.time() - t0), estimator.inertia_))
143 saveddata = [estimator.cluster_centers_, estimator.labels_, estimator.inertia_]
144 np.save("Results/clusterdata.npy", saveddata)
| 82 - error: syntax-error
|
1 """
2 timing.py
3
4 Usage: In the functions following this, the parameters are described as follows:
5
6 filename: the file that contains segment data
7
8 This file must have been a NumPy array of segment data that was saved. It is loaded through NumPy's load function.
9
10 Each segment array is in the following format:
11
12 [12 values for segment pitch, 12 values for segment timbre, 1 value for loudness
13 max, 1 value for loudness start, and 1 value for the segment duration]
14
15 Author: Chris Smith
16
17 Date: 04.11.2015
18 """
19
20 import time
21 import scipy.spatial.distance as distance
22 import numpy as np
23
24 '''
25 Method that takes a file of segment data (a 2D NumPy array), and compares the first 850 segments to 1000, 10000, 100000, and
26 1000000 segments. The results are ignored, as this function times the comparisons.
27 '''
28 def comp_time(filename):
29 seg_array = np.load(filename)
30 song = seg_array[:850:].copy()
31 t1 = time.time()
32 distance.cdist(song, seg_array[:1000:],'euclidean')
33 t2 = time.time()
34 distance.cdist(song, seg_array[:10000:],'euclidean')
35 t3 = time.time()
36 distance.cdist(song, seg_array[:100000:],'euclidean')
37 t4 = time.time()
38 distance.cdist(song, seg_array[:1000000:],'euclidean')
39 t5 = time.time()
40 print "Time for comparisons between a song and 1000 segments: " + str(t2-t1)
41 print "Time for comparisons between a song and 10000 segments: " + str(t3-t2)
42 print "Time for comparisons between a song and 100000 segments: " + str(t4-t3)
43 print "Time for comparisons between a song and 1000000 segments: " + str(t5-t4)
| 40 - error: syntax-error
|
1
2 # coding: utf-8
3
4 # In[2]:
5
6 import numpy as np
7 import tensorflow as tf
8 import requests
9 import urllib
10 from PIL import Image
11 import os
12 import matplotlib.pyplot as plt
13 import cv2 as cv2
14
15 get_ipython().magic('matplotlib inline')
16
17
18 # In[3]:
19
20 os.chdir("C:\\Users\\USER\\python studyspace\\Deep learning\\Project")
21 pic = Image.open("cat_test.jpg")
22 new_image = pic.resize((32,32))
23 test1 = np.array(new_image)
24 test1 = test1.reshape(1,32,32,3)
25 print(test1.shape)
26
27
28 # In[5]:
29
30 plt.imshow(pic)
31
32
33 # In[6]:
34
35 sess = tf.Session()
36
37 saver = tf.train.import_meta_graph('save2.ckpt.meta')
38
39 saver.restore(sess, tf.train.latest_checkpoint('./'))
40
41 graph = tf.get_default_graph()
42
43 y_pred = graph.get_tensor_by_name("train_pred:0")
44
45 x = graph.get_tensor_by_name("train_dataset:0")
46 y_true = graph.get_tensor_by_name("train_label:0")
47
48 y_test_images = np.zeros((1,2))
49
50 feed_dict_testing = {x: test1, y_true: y_test_images}
51
52 result=sess.run(y_pred, feed_dict=feed_dict_testing)
53
54
55 # In[7]:
56
57 print(result)
58
59
60 # In[ ]:
61
62
63
| 15 - error: undefined-variable
24 - error: too-many-function-args
8 - warning: unused-import
9 - warning: unused-import
13 - warning: unused-import
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Sun May 10 23:34:29 2020
4
5 @author: HP USER
6 """
7
8
9 import urllib.request, urllib.error, urllib.parse
10 import json
11 import sqlite3
12 import pandas as pd
13 from datetime import datetime
14 import matplotlib.pyplot as plt
15 import matplotlib
16 import numpy as np
17
18 #retrieve json file and decode it
19 jsonFile = urllib.request.urlopen('https://api.covid19india.org/data.json').read()
20 data = json.loads(jsonFile)
21
22 conn = sqlite3.connect('Covid19Data.sqlite')
23 cur = conn.cursor()
24
25 #create a table in database if the table does not exists
26 cur.executescript('''
27 CREATE TABLE IF NOT EXISTS dailyCases(
28 dailyConfirmed INTEGER NOT NULL,
29 dailyDeceased INTEGER NOT NULL,
30 dailyRecovered INTEGER NOT NULL,
31 date TEXT NOT NULL UNIQUE,
32 totalConfirmed INTEGER NOT NULL,
33 totalDeceased INTEGER NOT NULL,
34 totalRecovered INTEGER NOT NULL
35 );''')
36
37 #%%
38
39 #update the data in database for each date
40 for daily in data['cases_time_series']:
41 dailyData = list(daily.values())
42 cur.execute('''SELECT * FROM dailyCases WHERE date=?''', (dailyData[3], ))
43 result = cur.fetchone()
44 if result is None:
45 cur.execute('''
46 INSERT INTO dailyCases (dailyConfirmed, dailyDeceased, dailyRecovered, date,
47 totalConfirmed, totalDeceased, totalRecovered) VALUES ( ?, ?, ?, ?, ?, ?, ?)''',
48 (int(dailyData[0]), int(dailyData[1]), int(dailyData[2]), dailyData[3],
49 int(dailyData[4]), int(dailyData[5]), int(dailyData[6])))
50 elif result[4] < int(dailyData[4]):
51 cur.execute('''
52 UPDATE dailyCases
53 SET totalConfirmed=?
54 WHERE date=?''',
55 (int(dailyData[4]), dailyData[3]))
56 conn.commit()
57
58
59 #%%
60 total = pd.read_sql('SELECT * FROM dailyCases', conn)
61
62 #convert date to python datetime type object
63 def fun(x):
64 return datetime.strptime(x+str((datetime.today().year)), '%d %B %Y')
65 total['date'] = total['date'].apply(fun)
66
67 #plot figure for total cases for each day
68 fig = plt.figure()
69
70 plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))
71 plt.plot(total['date'], total['totalConfirmed'], '-o', ms=1)
72 plt.title('Total cases in India for each day')
73 plt.xlabel('Dates', fontsize=12)
74 plt.ylabel('Total cases', labelpad=0.1, fontsize=12)
75
76 def slide(event):
77 date = int(event.xdata)
78 print(event.xdata)
79 dateIndex = date - dateLoc[0]+2
80 date = total['date'].iloc[dateIndex]
81 strDate = date.strftime('%d %b')
82 #text for displaying the total cases for each day
83 str = 'Total cases on {} were {}'.format(strDate, total['totalConfirmed'].iloc[dateIndex])
84 plt.cla()
85 plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))
86 plt.plot(total['date'], total['totalConfirmed'], '-o', ms=1)
87 plt.text(x=dateLoc[0], y=50000, s=str)
88 plt.title('Total cases in India for each day')
89 plt.xlabel('Dates', fontsize=12)
90 plt.ylabel('Total cases', labelpad=0.1, fontsize=12)
91 plt.draw()
92
93 dateLoc = (plt.gca().xaxis.get_majorticklocs())
94 dateLoc = dateLoc.astype(np.int64)
95 fig.canvas.mpl_connect('button_press_event', slide)
96
97 #plot the figure for new cases reported for each day
98 fig2 = plt.figure()
99 fig2.set_figheight(9)
100 fig2.set_figwidth(16)
101 fig2.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))
102 plt.bar(total['date'], total['dailyConfirmed'], width=0.8, alpha=0.8)
103 plt.plot(total['date'], total['dailyConfirmed'], c='red', alpha=0.8)
104 plt.title('New cases reported in India for each day')
105 plt.xlabel('Dates', fontsize=12)
106 plt.ylabel('New cases reported', labelpad=10, fontsize=12)
107
108 def slide2(event):
109 date = int(round(event.xdata))
110 print(event.xdata)
111 dateIndex = date - dateLoc[0]+2
112 date = total['date'].iloc[dateIndex]
113 strDate = date.strftime('%d %b')
114 # print(plt.gcf().texts())
115 str = 'Total cases reported on {} were {}'.format(strDate, total['dailyConfirmed'].iloc[dateIndex])
116 plt.cla()
117 plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))
118 plt.bar(total['date'], total['dailyConfirmed'], alpha=0.8)
119 plt.plot(total['date'], total['dailyConfirmed'], c='red', alpha=0.8)
120 plt.annotate(xy=(event.xdata, total['dailyConfirmed'].iloc[dateIndex]),
121 xytext=(dateLoc[0], 4000), s=str,
122 arrowprops={'arrowstyle':'->'})
123 plt.title('New cases reported in India for each day')
124 plt.xlabel('Dates', fontsize=12)
125 plt.ylabel('New cases reported', fontsize=12, labelpad=10)
126 plt.draw()
127
128 fig2.canvas.mpl_connect('button_press_event', slide2)
129
130 plt.show()
131 conn.close()
| 19 - refactor: consider-using-with
83 - warning: redefined-builtin
115 - warning: redefined-builtin
|
1 # IPython log file
2
3 import json
4 path = 'ch02/usagov_bitly_data2012-03-16-1331923249.txt'
5 records = [json.loads(line) for line in open(path)]
6 import json
7 path = 'ch2/usagov_bitly_data2012-03-16-1331923249.txt'
8 records = [json.loads(line) for line in open(path)]
9 import json
10 path = 'ch2/usagov_bitly_data2012-11-13-1352840290.txt'
11 records = [json.loads(line) for line in open(path)]
12 time_zones = [rec['tz'] for rec in records if 'tz' in rec]
13 get_ipython().magic(u'logstart')
14 ip_info = get_ipython().getoutput(u'ifconfig eth0 | grep "inet "')
15 ip_info[0].strip()
16 ip_info = get_ipython().getoutput(u'ifconfig en0 | grep "inet "')
17 ip_info[0].strip()
18 ip_info = get_ipython().getoutput(u'ifconfig en1 | grep "inet "')
19 ip_info[0].strip()
20 pdc
21 get_ipython().magic(u'debug')
22 def f(x, y, z=1):
23 tmp = x + y
24 return tmp / z
25 get_ipython().magic(u'debug (f, 1, 2, z = 3)')
26 get_ipython().magic(u'debug (f, 1, 2, z = 3)')
27 get_ipython().magic(u'debug (f, 1, 2, z = 3)')
28 def set_trace():
29 from IPython.core.debugger import Pdb
30 Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
31
32 def debug(f, *args, **kwargs):
33 from IPython.core.debugger import Pdb
34 pdb = Pdb(color_scheme='Linux')
35 return pdb.runcall(f, *args, **kwargs)
36 debug (f, 1, 2, z = 3)
37 set_trace()
38 class Message:
39 def __init__(self, msg):
40 self.msg = msg
41 class Message:
42 def __init__(self, msg):
43 self.msg = msg
44 def __repr__(self):
45 return 'Message: %s' % self.msg
46 x = Message('I have a secret')
47 x
| 5 - refactor: consider-using-with
5 - warning: unspecified-encoding
6 - warning: reimported
8 - refactor: consider-using-with
8 - warning: unspecified-encoding
9 - warning: reimported
11 - refactor: consider-using-with
11 - warning: unspecified-encoding
13 - error: undefined-variable
13 - warning: redundant-u-string-prefix
14 - error: undefined-variable
14 - warning: redundant-u-string-prefix
16 - error: undefined-variable
16 - warning: redundant-u-string-prefix
18 - error: undefined-variable
18 - warning: redundant-u-string-prefix
20 - warning: pointless-statement
20 - error: undefined-variable
21 - error: undefined-variable
21 - warning: redundant-u-string-prefix
22 - warning: redefined-outer-name
25 - error: undefined-variable
25 - warning: redundant-u-string-prefix
26 - error: undefined-variable
26 - warning: redundant-u-string-prefix
27 - error: undefined-variable
27 - warning: redundant-u-string-prefix
30 - warning: protected-access
30 - error: undefined-variable
32 - warning: redefined-outer-name
38 - refactor: too-few-public-methods
41 - error: function-redefined
41 - refactor: too-few-public-methods
47 - warning: pointless-statement
|
1 import random
2
3 def lottery_sim(my_picks, num_tickets):
4 ticket = 1
5 winners = {3:0,4:0,5:0,6:0}
6 for i in range(num_tickets):
7 ticket+=1
8 drawing = random.sample(range(1, 53), 6)
9 correct = 0
10 for i in my_picks:
11 if i in drawing:
12 correct+=1
13 if correct == 3:
14 winners[3]+=1
15
16 elif correct == 4:
17 winners[4]+=1
18
19 elif correct == 5:
20 winners[5]+=1
21
22 elif correct == 6:
23 winners[6]+=1
24
25 return winners
26
27 lottery_sim([17,3,44,22,15,37], 100000) | Clean Code: No Issues Detected
|
1 #!/usr/bin/python3
2
3 import argparse
4 import subprocess
5 import re
6
7
8 HEIGHT_OFFSET = 60
9
10 class Rectangle:
11 def __init__(self, x, y, w, h):
12 self.x = int(x) # origin x
13 self.y = int(y) # origin y
14 self.w = int(w) # width
15 self.h = int(h) # height
16
17 def __str__(self):
18 return str(self.x) + ',' + str(self.y) + ',' \
19 + str(self.w) + ',' + str(self.h)
20
21 def __repr__(self):
22 return "position: (" + str(self.x) + \
23 "," + str(self.y) + ')'\
24 ", size: " + str(self.w) + \
25 "," + str(self.h) + ')'
26
27
28 # example ['1366x768+1024+373', '1024x768+0+0']
29 def get_displays():
30 out = str(execute('xrandr'))
31
32 # remove occurrences of 'primary' substring
33 out = out.replace("primary ", "")
34
35 # we won't match displays that are disabled (no resolution)
36 out = out.replace("connected (", "")
37
38 start_flag = " connected "
39 end_flag = " ("
40 resolutions = []
41 for m in re.finditer(start_flag, out):
42 # start substring in the end of the start_flag
43 start = m.end()
44 # end substring before the end_flag
45 end = start + out[start:].find(end_flag)
46
47 resolutions.append(out[start:end])
48
49 displays = []
50 for r in resolutions:
51 width = r.split('x')[0]
52 height, x, y = r.split('x')[1].split('+')
53 displays.append(Rectangle(x, y, width, int(height)-HEIGHT_OFFSET))
54
55 return displays
56
57
58 def parse_arguments():
59 parser = argparse.ArgumentParser(description='Tile tool')
60 parser.add_argument('-t', '--tile', dest='tile',
61 choices=['left', 'right', 'top', 'bottom'],
62 help='tile relatively to display')
63 parser.add_argument('-w', '--tile-window', dest='tile_w',
64 choices=['left', 'right', 'top', 'bottom'],
65 help='tile relatively to window itself')
66 parser.add_argument('-s', '--switch-display', dest='switch_display',
67 action='store_true',
68 help='move window to next display')
69 parser.add_argument('-c', '--change-to-display', dest='display',
70 type=int, help='move window to specified display')
71 parser.add_argument('-m', '--maximize', dest='maximize',
72 action='store_true', help='maximize window')
73 return parser.parse_args()
74
75
76 def execute(cmd):
77 print('$ ' + cmd)
78 return subprocess.check_output(['bash', '-c', cmd])
79
80
81 def get_active_window():
82 cmd = 'xdotool getactivewindow getwindowgeometry'
83 flag_pos_start = "Position: "
84 flag_pos_end = " (screen:"
85 flag_geom_start = "Geometry: "
86 flag_geom_end = "\\n"
87
88 r = str(execute(cmd))
89
90 str_pos = r[r.find(flag_pos_start) + len(flag_pos_start) \
91 : r.find(flag_pos_end)]
92 str_geom = r[r.find(flag_geom_start) + len(flag_geom_start) \
93 : r.rfind(flag_geom_end)]
94
95 pos = str_pos.split(',')
96 geom = str_geom.split('x')
97
98 return Rectangle(pos[0], pos[1], geom[0], geom[1])
99
100
101 def window_is_in_display(w, d):
102 return (d.x <= w.x <= d.x+d.w) and (d.y <= w.y <= d.y+d.h)
103
104
105 def get_display(displays, active):
106 w = get_active_window()
107 for d in displays:
108 if window_is_in_display(w, d):
109 if active:
110 return d
111 else:
112 if not active:
113 return d
114
115
116 def get_active_display(displays):
117 return get_display(displays, True)
118
119
120 def get_inactive_display(displays):
121 return get_display(displays, False)
122
123
124 def set_window(x, y, w, h):
125 cmd_header = 'wmctrl -r ":ACTIVE:" -e 0,'
126
127 cmd = cmd_header + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)
128 execute(cmd)
129
130
131 def tile(direction, basis, display):
132 x = basis.x
133 y = basis.y
134 w = basis.w
135 h = basis.h
136
137 if direction == 'left':
138 w = int(display.w/2)
139 x = display.x
140 elif direction == 'right':
141 w = int(display.w/2)
142 x = display.x + w
143 elif direction == 'top':
144 h = int(display.h/2)
145 y = display.y
146 elif direction == 'bottom':
147 h = int(display.h/2)
148 y = display.y + h
149
150 set_window(x, y, w, h)
151
152
153 def main():
154 args = parse_arguments()
155 displays = get_displays()
156
157 if args.tile:
158 display = get_active_display(displays)
159 tile(args.tile, display, display)
160
161 if args.tile_w:
162 display = get_active_display(displays)
163 window = get_active_window()
164 # the get is 2 pixels more than the real value
165 window.x -= 2
166 tile(args.tile_w, window, display)
167
168 if args.display is not None:
169 d = displays[args.display]
170 set_window(d.x, d.y, d.w, d.h)
171
172 if args.switch_display:
173 d = get_inactive_display(displays)
174 set_window(d.x, d.y, d.w, d.h)
175
176 if args.maximize:
177 d = get_active_display(displays)
178 set_window(d.x, d.y, d.w, d.h)
179
180
181 if __name__ == "__main__":
182 main()
| 102 - warning: bad-indentation
105 - refactor: inconsistent-return-statements
|
1 import requests
2 import sqlite3
3 from sqlite3 import Error
4 from bs4 import BeautifulSoup
5
6 # Create the Cumulative database
7 CTeamStats = sqlite3.connect('CumulativeTeamStats.db')
8
9 # This vector will be used to collect every team from 2012 to 2019
10 yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
11
12 #Function to create the tables from 2012-2019
13 def cumulative_team_stats_table():
14 #cts -> cumulative team stats
15 cts = CTeamStats.cursor()
16 table_values = '(Team_Name TEXT, Wins INTEGER, Runs INTEGER, Run_Differential INTEGER, WAR INTEGER, WPA INTEGER, Dollars REAL, Batter TEXT, AVG REAL, OBP REAL, SLG REAL, OPS REAL, wOBA REAL, wRCplus REAL, BBperc TEXT, Kperc TEXT, Spd REAL, Def REAL, BWAR REAL, BWPA REAL, BDollars TEXT, Pitcher TEXT, ERA REAL, ERAminus REAL, WHIP REAL, FIPx REAL, FIPxminus REAL, Kper9 REAL, Kper9plus REAL, HRper9 REAL, GBperc REAL, PWAR REAL, PWPA REAL, PDollars TEXT)'
17 #concatenate the string
18 cts.execute('CREATE TABLE IF NOT EXISTS Cumulative_Team_Stats' + table_values)
19 cts.close()
20
21 #Fucntion used to enter the data of a team into the cts database
22 def data_entry(year, team_name, wins, runs, rd, war, wpa, dollar, batter, avg, obp, slg, ops, woba, wrc, bb, k, spd, defense, bwar, bwpa, bdollar, pitcher, era, eramin, whip, fipx, fipxmin, kper9, kper9plus, hrper9, gbperc, pwar, pwpa, pdollar):
23 cts = CTeamStats.cursor()
24 insertStatement = "INSERT INTO Cumulative_Team_Stats (Team_Name, Wins, Runs, Run_Differential, WAR, WPA, Dollars, Batter, AVG, OBP, SLG, OPS, wOBA, wRCplus, BBperc, Kperc, Spd, Def, BWAR, BWPA, BDollars, Pitcher, ERA, ERAminus, WHIP, FIPx, FIPxminus, Kper9, Kper9plus, HRper9, GBperc, PWAR, PWPA, PDollars) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
25 statTuple = (year + team_name, wins, runs, rd, war, wpa, dollar, batter, avg, obp, slg, ops, woba, wrc, bb, k, spd, defense, bwar, bwpa, bdollar, pitcher, era, eramin, whip, fipx, fipxmin, kper9, kper9plus, hrper9, gbperc, pwar, pwpa, pdollar)
26 cts.execute(insertStatement, statTuple)
27 CTeamStats.commit()
28 cts.close()
29
30 #Function used to scrape fangraphs to get all of the desired team statistics
31 def web_scrape(teamList, year):
32 #adds all the pitcher stats from the teams
33 source = requests.get('https://www.fangraphs.com/leaders.aspx?pos=all&stats=pit&lg=all&qual=0&type=c,6,117,62,119,36,301,40,48,63,60,4,59,32,17,42&season=' + year + '&month=0&season1=' + year + '&ind=0&team=0,ts&rost=0&age=0&filter=&players=0&startdate=2019-01-01&enddate=2019-12-31&sort=1,a').text
34 soup = BeautifulSoup(source, "html.parser")
35 #use the identifier class to scrape the right table
36 table = soup.find('table', class_ = 'rgMasterTable')
37 table_rows = table.find_all('tr')
38 #Scrape all the data from the table
39 for tr in table_rows:
40 td = tr.find_all('td')
41 row = [i.text for i in td]
42 del row[:1]
43 #Simple conditional checks to make sure all the data looks the same
44 if len(row) != 0:
45 row[8] = row[8][:-1]
46 if row[10] == '($1.9)':
47 row = '$1.9'
48 row[10] = row[10][1:]
49 teamList.append(row)
50 #adds all the batter stats to the teams
51 source = requests.get('https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=0&type=c,12,34,35,23,37,38,50,61,199,58,62,59,60,13,39&season=' + year + '&month=0&season1=' + year + '&ind=0&team=0,ts&rost=0&age=0&filter=&players=0&startdate=2019-01-01&enddate=2019-12-31&sort=1,a').text
52 soup = BeautifulSoup(source, "html.parser")
53 table = soup.find('table', class_ = 'rgMasterTable')
54 table_rows = table.find_all('tr')
55 #Scrape all the data from the table
56 for tr in table_rows:
57 td = tr.find_all('td')
58 row = [i.text for i in td]
59 del row[:2]
60 if len(row) != 0:
61 row[1] = row[1][:-1]
62 row[2] = row[2][:-1]
63 if row[11] == '($20.6)':
64 row[11] = '$20.6'
65 if row[11] == '($19.0)':
66 row[11] = '$19.0'
67 row[11] = row[11][1:]
68 teamList.append(row)
69 #Check to make the correct data is being added
70
71 #Main Program
72 def main():
73 cumulative_team_stats_table()
74 #for every year in the vector yearList
75 for i in range(len(yearList)):
76 teamList = []
77 #Scrape the table for the entire year
78 web_scrape(teamList, yearList[i])
79 #Enter the data for all 30 major league teams
80 for j in range(30):
81 data_entry(yearList[i], teamList[j][0], teamList[j][11], int(teamList[j][13]), int(teamList[j+30][13]) - int(teamList[j][14]), round(float(teamList[j][12]) + float(teamList[j+30][9]), 3), round(float(teamList[j][9]) + float(teamList[j+30][10]), 3), round(float(teamList[j][10]) + float(teamList[j+30][11]), 3), '-', float(teamList[j+30][3]), float(teamList[j+30][4]), float(teamList[j+30][5]), float(teamList[j+30][14]), float(teamList[j+30][6]), int(teamList[j+30][7]), float(teamList[j+30][1]), float(teamList[j+30][2]), float(teamList[j+30][12]), float(teamList[j+30][8]), float(teamList[j+30][9]), float(teamList[j+30][10]), float(teamList[j+30][11]), '-', float(teamList[j][1]), int(teamList[j][2]), float(teamList[j][15]), float(teamList[j][3]), float(teamList[j][4]), float(teamList[j][5]), float(teamList[j][6]), float(teamList[j][7]), float(teamList[j][8]), float(teamList[j][12]), float(teamList[j][9]), float(teamList[j][10]))
82
83 if __name__ == "__main__":
84 main()
| 22 - refactor: too-many-arguments
22 - refactor: too-many-positional-arguments
22 - refactor: too-many-locals
33 - warning: missing-timeout
51 - warning: missing-timeout
3 - warning: unused-import
|
1 import requests
2 import sqlite3
3 from sqlite3 import Error
4 from bs4 import BeautifulSoup
5
6 # Create the batter pool database
7 BatterPool = sqlite3.connect('TeamBatterPool.db')
8
9 positionList = ['c', '1b', '2b', 'ss', '3b', 'rf', 'cf', 'lf', 'dh']
10 yearList = ['2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
11 teamList = ["Los_Angeles_Angels", "Baltimore_Orioles", "Boston_Red_Sox", "White_Sox", "Cleveland_Indians", "Detroit_Tigers", "Kansas_City_Royals", "Minnesota_Twins", "New_York_Yankees", "Oakland_Athletics", "Seattle_Mariners", "Tamba_Bay_Rays", "Texas_Rangers", "Toronto_Blue_Jays", "Arizona_Diamondbacks", "Atlanta_Braves", "Chicago_Cubs", "Cincinatti_Reds", "Colarado_Rockies", "Miami_Marlins", "Houston_Astros", "Los_Angeles_Dodgers", "Milwaukee_Brewers", "Washingon_Nationals", "New_York_Mets", "Philadelphia_Phillies", "Pittsburgh_Pirates", "St_Louis_Cardinals", "San_Diego_Padres", "San_Francisco_Giants"]
12 source = "https://www.baseball-reference.com/players/t/troutmi01.shtml"
13
14 def batter_pool_table(team_name, year):
15 bp = BatterPool.cursor()
16 #concanate the string
17 table_values = '(Player_Name TEXT, Age INTEGER, Position TEXT, WAR REAL, WPA REAL, wRCplus REAL, PA INTEGER, AVG REAL, OBP REAL, SLG REAL, OPS REAL, BABIP REAL, wOBA REAL, BBperc REAL, Kperc REAL, SPD REAL, DEF REAL, Worth TEXT)'
18 bp.execute('CREATE TABLE IF NOT EXISTS _' + year + team_name + table_values)
19 bp.close()
20
21 def data_entry(team_name, year, player_name, age, position, war, wpa, rcplus, pa, avg, obp, slg, ops, babip, oba, bbpec, kperc, speed, defense, worth):
22 bp = BatterPool.cursor()
23 insertStatement = "INSERT INTO _" + year + team_name + " (Player_Name, Age, Position, WAR, WPA, wRCplus, PA, AVG, OBP, SLG, OPS, BABIP, wOBA, BBperc, Kperc, SPD, DEF, Worth) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
24 statTuple = (player_name, age, position, war, wpa, rcplus, pa, avg, obp, slg, ops, babip, oba, bbpec, kperc, speed, defense, worth)
25 bp.execute(insertStatement, statTuple)
26 BatterPool.commit()
27 bp.close()
28
29 def web_scrape(playerList):
30 source = requests.get("https://www.baseball-reference.com/players/g/guerrvl01.shtml#all_br-salaries").text
31 soup = BeautifulSoup(source, "html.parser")
32 table = soup.find('table', id = 'batting_value')
33 table_rows = table.find_all('tr')
34 #Scrape all the data from the table
35 for tr in table_rows:
36 td = tr.find_all('td')
37 #th = tr.find('th')
38 row = [i.text for i in td]
39 #row.append(th.text)
40 playerList.append(row)
41 '''
42 table = soup.find('table', id = 'batting_standard')
43 table_rows = table.find_all('tr')
44 #Scrape all the data from the table
45 for tr in table_rows:
46 td = tr.find_all('td')
47 th = tr.find('th')
48 row = [i.text for i in td]
49 row.append(th.text)
50 playerList.append(row)
51 '''
52
53 playerList = []
54 web_scrape(playerList)
55 print(playerList)
| 21 - refactor: too-many-arguments
21 - refactor: too-many-positional-arguments
21 - refactor: too-many-locals
29 - warning: redefined-outer-name
30 - warning: redefined-outer-name
30 - warning: missing-timeout
41 - warning: pointless-string-statement
3 - warning: unused-import
|
1 import requests
2 import sqlite3
3 from sqlite3 import Error
4 from bs4 import BeautifulSoup
5
6 # Create the top 100 database
7 Top100 = sqlite3.connect('Top100Prospects.db')
8
9 #Year list for the top 100 prospects
10 yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
11
12 #Function to create the tables from 2012-2019
13 def top_100_table(year):
14 tp = Top100.cursor()
15 #concatenate the string
16 table_values = '(Rank INTEGER, Player_Name TEXT, Team TEXT, Organization_Rank TEXT, Age INTEGER, Position TEXT, MLB_Est TEXT)'
17 tp.execute('CREATE TABLE IF NOT EXISTS _' + year + 'Top100Prospects' + table_values)
18 tp.close()
19
20 #Function to enter the data into the respective SQLite table
21 def data_entry(year, rank, player_name, team, organization_rank, age, position, mlb_est):
22 tp = Top100.cursor()
23 insertStatement = "INSERT INTO _" + year + "Top100Prospects (Rank, Player_Name, Team, Organization_Rank, Age, Position, MLB_Est) VALUES(?, ?, ?, ?, ?, ?, ?)"
24 statTuple = (rank, player_name, team, organization_rank, age, position, mlb_est)
25 tp.execute(insertStatement, statTuple)
26 Top100.commit()
27 tp.close()
28
29 #Function to web scrape The Baseball Cube for the top 100 prospects
30 def web_scrape(playerList, year):
31 source = requests.get('http://www.thebaseballcube.com/prospects/years/byYear.asp?Y=' + year + '&Src=ba').text
32 soup = BeautifulSoup(source, "html.parser")
33 table = soup.find('table', id = 'grid2')
34 table_rows = table.find_all('tr')
35 for tr in table_rows:
36 td = tr.find_all('td')
37 row = [i.text for i in td]
38 #Manipulates the data that is not needed
39 if len(row) > 9:
40 row[9] = row[9][:4]
41 row[13] = row[13][:4]
42 del row[-2:]
43 del row[10:13]
44 del row[5:9]
45 playerList.append(row)
46 #removes the table labels that are not needed
47 del playerList[:2]
48 del playerList[25]
49 del playerList[50]
50 del playerList[75]
51 del playerList[100]
52
53
54 def main():
55 #create the database for every top 100 prospect from 2012-2019
56 for i in range(len(yearList)):
57 #call the method to create 8 tables
58 top_100_table(yearList[i])
59 #stores the data of all available free agent
60 playerList = []
61 #call web_scrape method
62 web_scrape(playerList, yearList[i])
63 for j in range(len(playerList)):
64 #insert the top100prospect data
65 data_entry(yearList[i], int(playerList[j][0]), playerList[j][1], playerList[j][2], playerList[j][3], int(yearList[i]) - int(playerList[j][5]) + 1, playerList[j][4], playerList[j][6])
66
67 if __name__ == "__main__":
68 main()
| 21 - refactor: too-many-arguments
21 - refactor: too-many-positional-arguments
31 - warning: missing-timeout
3 - warning: unused-import
|
1 import requests
2 import sqlite3
3 from sqlite3 import Error
4 from bs4 import BeautifulSoup
5
6 # Create the free agency database
7 FreeAgency = sqlite3.connect('FreeAgency.db')
8
9
10 # List to gather every year from 2012 to 2019
11 yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
12
13 #Create the Free Agency Pool from 2012-2019
14 def free_agency_table(year):
15 fa = FreeAgency.cursor()
16 #concatenate the string
17 table_values = '(Player_Name TEXT, Age INTEGER, Position TEXT, FA_Type TEXT, Rank INTEGER, Years INTEGER, Amount TEXT)'
18 fa.execute('CREATE TABLE IF NOT EXISTS _' + year + 'FA_Class' + table_values)
19 fa.close()
20
21 #Enter the data of a player into the respective table
22 def data_entry(year, player_name, age, position, fa_type, rank, years, amount):
23 fa = FreeAgency.cursor()
24 insertStatement = "INSERT INTO _" + year + "FA_Class (Player_Name, Age, Position, FA_Type, Rank, Years, Amount) VALUES(?, ?, ?, ?, ?, ?, ?)"
25 statTuple = (player_name, age, position, fa_type, rank, years, amount)
26 fa.execute(insertStatement, statTuple)
27 FreeAgency.commit()
28 fa.close()
29
30 #Scrapes ESPN for all of the Free Agents for a given year
31 def web_scrape(playerList, year):
32 source = requests.get('http://www.espn.com/mlb/freeagents/_/year/' + year).text
33 soup = BeautifulSoup(source, "html.parser")
34 table = soup.find('table')
35 table_rows = table.find_all('tr')
36 #Scrape all the data from the table
37 for tr in table_rows:
38 td = tr.find_all('td')
39 row = [i.text for i in td]
40 #Check to make the correct data is being added
41 if row[0] != 'PLAYER' and row[0] != 'Free Agents':
42 playerList.append(row)
43 #Remove 2011 team and new team
44 for i in range(len(playerList)):
45 del playerList[i][4:6]
46
47 #Function to modify the player list since some of the data from ESPN is not ideal for sorting purposes
48 def modifyPlayerList(playerList, i, j):
49 if playerList[j][3] == 'Signed (A)':
50 playerList[j][3] = 'A'
51 elif playerList[j][3] == 'Signed (B)':
52 playerList[j][3] = 'B'
53 else:
54 playerList[j][3] = 'None'
55 #set the age to the correct number
56 playerList[j][2] = int(playerList[j][2])
57 playerList[j][2] -= (2020 - int(yearList[i]))
58 #set the rank of the players, 51 is a place holder
59 if playerList[j][5] == 'NR':
60 playerList[j][5] = 51
61 else:
62 playerList[j][5] = int(playerList[j][5])
63 playerList[j][5] = 51 if playerList[j][5] == 'NR' else int(playerList[j][5])
64 #correct dollar amount FA
65 if playerList[j][6] == '--' or playerList[j][6] == 'Minor Lg':
66 playerList[j][4] = '0'
67 if playerList[j][6] == '--':
68 playerList[j][6] = 'Not Signed'
69
70 #Main function to create the free agent database which contains every free agent from 2012 to 2019
71 def main():
72 #create the database for every freeagent from 2011-2020
73 for i in range(len(yearList)):
74 #call the method to create 10 tables
75 free_agency_table(yearList[i])
76 #stores the data of all available free agent
77 playerList = []
78 #call web_scrape method
79 web_scrape(playerList, yearList[i])
80 print(playerList)
81 for j in range(len(playerList)):
82 #modify list method
83 modifyPlayerList(playerList, i, j)
84 #insert the free agent data
85 data_entry(yearList[i], playerList[j][0], int(playerList[j][2]), playerList[j][1], playerList[j][3], playerList[j][5], int(playerList[j][4]), playerList[j][6])
86
87 if __name__ == "__main__":
88 main()
| 22 - refactor: too-many-arguments
22 - refactor: too-many-positional-arguments
32 - warning: missing-timeout
3 - warning: unused-import
|
1 import requests
2 import sqlite3
3 from sqlite3 import Error
4 from bs4 import BeautifulSoup
5
6 # Create the pitcher pool database
7 PitcherPool = sqlite3.connect('TeamPitcherPool1.db')
8
9 yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
10 teamList = ["Los_Angeles_Angels", "Baltimore_Orioles", "Boston_Red_Sox", "White_Sox", "Cleveland_Indians", "Detroit_Tigers", "Kansas_City_Royals", "Minnesota_Twins", "New_York_Yankees", "Oakland_Athletics", "Seattle_Mariners", "Tamba_Bay_Rays", "Texas_Rangers", "Toronto_Blue_Jays", "Arizona_Diamondbacks", "Atlanta_Braves", "Chicago_Cubs", "Cincinatti_Reds", "Colarado_Rockies", "Miami_Marlins", "Houston_Astros", "Los_Angeles_Dodgers", "Milwaukee_Brewers", "Washingon_Nationals", "New_York_Mets", "Philadelphia_Phillies", "Pittsburgh_Pirates", "St_Louis_Cardinals", "San_Diego_Padres", "San_Francisco_Giants"]
11 source = "https://www.fangraphs.com/leaders.aspx?pos=all&stats=pit&lg=all&qual=0&type=c,3,59,45,118,6,117,42,7,13,36,40,48,60,63&season=2011&month=0&season1=2011&ind=0&team=1&rost=0&age=0&filter=&players=0&startdate=2011-01-01&enddate=2011-12-31"
12
13 #Function to create the tables from 2012-2019
14 def pitcher_pool_table(year, team_name):
15 pp = PitcherPool.cursor()
16 #concatenate the string
17 table_values = '(Player_Name TEXT, Age INTEGER, IP REAL, WAR REAL, WPA REAL, FIPx REAL, FIPXminus REAL, ERA REAL, ERAminus REAL, WHIP REAL, Kper9 REAL, HRper9 REAL, GBperc REAL, Worth TEXT)'
18 pp.execute('CREATE TABLE IF NOT EXISTS _' + year + team_name + table_values)
19 pp.close()
20
21 #Function to enter the data into the respective SQLite table
22 def data_entry(team_name, year, player_name, age, innings_pitched, war, wpa, fipx, fipx_minus, era, era_minus, whip, kPer9, hrPer9, gb_percentage, worth):
23 pp = PitcherPool.cursor()
24 insertStatement = "INSERT INTO _" + year + team_name + " (Player_Name, Age, IP, WAR, WPA, FIPx, FIPXminus, ERA, ERAminus, WHIP, Kper9, HRper9, GBperc, Worth) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
25 statTuple = (player_name, age, innings_pitched, war, wpa, fipx, fipx_minus, era, era_minus, whip, kPer9, hrPer9, gb_percentage, worth)
26 pp.execute(insertStatement, statTuple)
27 PitcherPool.commit()
28 pp.close()
29
30 #Function to web scrape FanGraphs for every the pitcher on every team
31 def web_scrape(playerList, year, team):
32 source = requests.get("https://www.fangraphs.com/leaders.aspx?pos=all&stats=pit&lg=all&qual=0&type=c,3,59,45,118,6,117,42,7,13,36,40,48,60,63&season=" + year + "&month=0&season1=" + year + "&ind=0&team=" + str(team + 1) + "&rost=0&age=0&filter=&players=0&startdate=2011-01-01&enddate=2011-12-31").text
33 soup = BeautifulSoup(source, "html.parser")
34 table = soup.find('table', class_ = 'rgMasterTable')
35 table_rows = table.find_all('tr')
36 #Scrape all the data from the table
37 for tr in table_rows:
38 td = tr.find_all('td')
39 row = [i.text for i in td]
40 if len(row) == 16:
41 playerList.append(row)
42
43 #main function to add the desired pitcher stats for every team from 2012 to 2019
44 def main():
45 counter = 0
46 #iterate through every year
47 for h in range(len(yearList)):
48 #iterate through every team
49 for i in range(30):
50 pitcher_pool_table(yearList[h], teamList[i])
51 playerList = []
52 web_scrape(playerList, yearList[h], i)
53 #iterate through every player
54 for k in range(len(playerList)):
55 counter += 1
56 data_entry(teamList[i], yearList[h], playerList[k][1], playerList[k][2], playerList[k][10], playerList[k][3], playerList[k][15], playerList[k][4], playerList[k][5], playerList[k][6], playerList[k][7], playerList[k][8], playerList[k][11], playerList[k][12], playerList[k][13], playerList[k][14])
57 print(counter)
58
59 if __name__ == "__main__":
60 main()
| 22 - refactor: too-many-arguments
22 - refactor: too-many-positional-arguments
22 - refactor: too-many-locals
32 - warning: redefined-outer-name
32 - warning: missing-timeout
3 - warning: unused-import
|
1 import requests
2 import sqlite3
3 from sqlite3 import Error
4 from bs4 import BeautifulSoup
5
6 # Create the free agency database
7 International = sqlite3.connect('InternationalProspects.db')
8
9
10 # List for the Free Agency Pool
11 yearList = ['2015', '2016', '2017', '2018', '2019']
12
13 #Create the International Table from 2015-2019
14 def international_table(year):
15 ip = International.cursor()
16 #concanate the string
17 table_values = '(Rank INTEGER, Player_Name TEXT, Position TEXT, Age INTEGER, Projected_Team TEXT, Future_Value TEXT)'
18 ip.execute('CREATE TABLE IF NOT EXISTS _' + year + 'TopInternationalClass' + table_values)
19 ip.close()
20
21 #Enter the data of a player into the respective table
22 def data_entry(year, rank, player_name, position, age, proj_team, fut_val):
23 ip = International.cursor()
24 #need the underscore because a table can't start with a number
25 insertStatement = "INSERT INTO _" + year + "International_Prospects (Rank, Player_Name, Team, Organization_Rank, Age, Position, MLB_Est) VALUES(?, ?, ?, ?, ?, ?, ?)"
26 statTuple = (rank, player_name, position, age, proj_team, fut_val)
27 ip.execute(insertStatement, statTuple)
28 International.commit()
29 ip.close()
30
31 #Scrapes ESPN for all of the Free Agents for a given year
32 def web_scrape(playerList, year):
33 #URL changes based on the year
34 source = requests.get('https://www.fangraphs.com/prospects/the-board/' + year + '-international/summary?sort=-1,1&type=0&pageitems=200&pg=0').text
35 soup = BeautifulSoup(source, "html.parser")
36 table = soup.find_all('table')
37 for table_rows in table:
38 table_row = table_rows.find_all('tr')
39 #Scrape all the data from the table
40 for tr in table_row:
41 td = tr.find_all('td')
42 row = [i.text for i in td]
43 playerList.append(row)
44
45 #main function to create the database of all the top international free agents from 2015-2019
46 def main():
47 #5 tables will be created in sqLite with all available international free agents from fangraphs
48 for i in range(len(yearList)):
49 international_table(yearList[i])
50
51 if __name__ == "__main__":
52 main() | 22 - refactor: too-many-arguments
22 - refactor: too-many-positional-arguments
34 - warning: missing-timeout
3 - warning: unused-import
|
1 import requests
2 import sqlite3
3 from sqlite3 import Error
4 from bs4 import BeautifulSoup
5
6 #Creates the player draft database
7 PlayerDraft = sqlite3.connect('PlayerDraft.db')
8
9 yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
10
11 #Function to create the player draft tables
12 def player_draft_table(year):
13 pd = PlayerDraft.cursor()
14 #concanate the string
15 table_values = '(Player_Name TEXT, Rank INTEGER, Position TEXT, School TEXT)'
16 pd.execute('CREATE TABLE IF NOT EXISTS _' + year + 'Draft_Class' + table_values)
17 pd.close()
18
19 #Inserts the data into the table
20 def data_entry(year, player_name, rank, position, school):
21 pd = PlayerDraft.cursor()
22 insertStatement = "INSERT INTO _" + year + "Draft_Class (Player_Name, Rank, Position, School) VALUES(?, ?, ?, ?)"
23 statTuple = (player_name, rank, position, school)
24 pd.execute(insertStatement, statTuple)
25 PlayerDraft.commit()
26 pd.close()
27
28 #Scrapes the internet from Baseball Almanac
29 def web_scrape(draftList, year):
30 source = requests.get('https://www.baseball-almanac.com/draft/baseball-draft.php?yr=' + year).text
31 soup = BeautifulSoup(source, "html.parser")
32 table = soup.find('table')
33 table_rows = table.find_all('tr')
34 #Scrape all the data from the table
35 for tr in table_rows:
36 td = tr.find_all('td')
37 row = [i.text for i in td]
38 #Adds the top 200 prospects for every year
39 if len(draftList) > 201:
40 break
41 draftList.append(row)
42
43 #main function to create a database for the top prospects from 2012-2019
44 def main():
45 for i in range(len(yearList)):
46 player_draft_table(yearList[i])
47 draftList = []
48 web_scrape(draftList, yearList[i])
49 #removes the heading of the table due to the structure on Baseball Almanac
50 draftList.pop(0)
51 draftList.pop(0)
52 for j in range(len(draftList)):
53 data_entry(yearList[i], draftList[j][3], draftList[j][1], draftList[j][5], draftList[j][6])
54
55 if __name__ == "__main__":
56 main()
| 30 - warning: missing-timeout
3 - warning: unused-import
|
1 # def test_no_cors_enabled():
2 # assert False | Clean Code: No Issues Detected
|
1 from flask import Response
2 from flask.testing import FlaskClient
3
4
5 # def test_with_origin(client: FlaskClient):
6 # response: Response = client.options('/some-request', headers={
7 # 'Access-Control-Request-Method': 'POST',
8 # 'Access-Control-Request-Headers': 'Content-Type, X-Custom',
9 # 'Origin': 'https://test.org'
10 # })
11 # assert response.status_code == 404
12 # assert 'Access-Control-Max-Age' in response.headers
13 # assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
14
15
16 def test_with_origin(client: FlaskClient):
17 response: Response = client.options('/some-request', headers={
18 'Origin': 'https://test.org'
19 })
20 assert response.status_code == 404
21 assert 'Access-Control-Allow-Origin'.lower() in response.headers
22 assert 'Access-Control-Max-Age'.lower() in response.headers
23 assert response.headers.get('Access-Control-Allow-Origin') is not None
24 assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
25 assert response.headers.get('Access-Control-Max-Age') is not None
26 assert response.headers.get('Access-Control-Max-Age') != ''
27
28
29 def test_without_origin(client: FlaskClient):
30 response: Response = client.options('/some-request', headers={
31 })
32 assert response.status_code == 404
33 assert 'Access-Control-Allow-Origin'.lower() not in response.headers
34 assert 'Access-Control-Max-Age'.lower() not in response.headers
35 assert 'Access-Control-Allow-Methods'.lower() not in response.headers
36 assert 'Access-Control-Allow-Headers'.lower() not in response.headers
37
38
39 def test_allow_method(client: FlaskClient):
40 response: Response = client.options('/some-request', headers={
41 'Access-Control-Request-Method': 'POST',
42 'Origin': 'https://test.org'
43 })
44 assert response.status_code == 404
45 assert 'Access-Control-Allow-Methods'.lower() in response.headers
46 assert 'POST' in response.headers.get('Access-Control-Allow-Methods')
47 assert 'Access-Control-Max-Age'.lower() in response.headers
48 assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
49 assert 'Access-Control-Allow-Headers'.lower() not in response.headers
50
51
52 def test_dont_allow_method(client: FlaskClient):
53 response: Response = client.options('/some-request', headers={
54 'Access-Control-Request-Method': 'PATCH',
55 'Origin': 'https://test.org'
56 })
57 assert response.status_code == 404
58 assert 'Access-Control-Allow-Methods'.lower() not in response.headers
59 assert 'Access-Control-Max-Age'.lower() in response.headers
60 assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
61 assert 'Access-Control-Allow-Headers'.lower() not in response.headers
62
63
64 def test_allow_headers(client: FlaskClient):
65 response: Response = client.options('/some-request', headers={
66 'Access-Control-Request-Headers': 'Content-Type, X-Test-Header',
67 'Origin': 'https://test.org'
68 })
69 assert response.status_code == 404
70 assert 'Access-Control-Allow-Headers'.lower() in response.headers
71 assert 'Content-Type' in response.headers.get('Access-Control-Allow-Headers')
72 assert 'X-Test-Header' in response.headers.get('Access-Control-Allow-Headers')
73 assert 'Access-Control-Max-Age'.lower() in response.headers
74 assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
75 assert 'Access-Control-Allow-Methods'.lower() not in response.headers
76
77
78 def test_dont_allow_headers(client: FlaskClient):
79 response: Response = client.options('/some-request', headers={
80 'Access-Control-Request-Headers': 'Content-Type, X-Test-Header, X-Not-Allowed',
81 'Origin': 'https://test.org'
82 })
83 assert response.status_code == 404
84 assert 'Access-Control-Allow-Headers'.lower() not in response.headers
85 assert 'Access-Control-Max-Age'.lower() in response.headers
86 assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
87 assert 'Access-Control-Allow-Methods'.lower() not in response.headers
| Clean Code: No Issues Detected
|
1 import pytest
2 from flask import Flask
3
4 from yafcorse import Yafcorse
5
6
7 @pytest.fixture()
8 def app():
9 app = Flask(__name__)
10
11 cors = Yafcorse({
12 'origins': '*',
13 'allowed_methods': ['GET', 'POST', 'PUT'],
14 'allowed_headers': ['Content-Type', 'X-Test-Header'],
15 'allow_credentials': True,
16 'cache_max_age': str(60 * 5)
17 })
18 cors.init_app(app)
19
20 return app
21
22
23 @pytest.fixture()
24 def client(app: Flask):
25 return app.test_client()
| 9 - warning: redefined-outer-name
24 - warning: redefined-outer-name
|
1 from flask import Flask, Response
2 from flask.testing import FlaskClient
3
4
5 def test_simple_request(client: FlaskClient):
6 response: Response = client.get('/some-request', headers={
7 'Origin': 'https://test.org'
8 })
9 assert response.status_code == 404
10 assert 'Access-Control-Allow-Origin'.lower() in response.headers
11 assert 'Access-Control-Max-Age'.lower() not in response.headers
12 assert response.headers.get('Access-Control-Allow-Origin') is not None
13 assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
| 1 - warning: unused-import
|
1 from flask.app import Flask
2
3 from yafcorse import Yafcorse
4
5
6 def test_extension(app: Flask):
7 assert app.extensions.get('yafcorse') is not None
8 assert isinstance(app.extensions.get('yafcorse'), Yafcorse)
| Clean Code: No Issues Detected
|
1 import re
2 from typing import Callable, Iterable
3 from flask import Flask, Response, request
4
5 # Yet Another Flask CORS Extension
6 # --------------------------------
7 # Based on https://developer.mozilla.org/de/docs/Web/HTTP/CORS
8
9 # DEFAULT_CONFIGURATION = {
10 # 'origins': '*',
11 # 'allowed_methods': ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE'],
12 # 'allowed_headers': '*',
13 # 'allow_credentials': True,
14 # 'cache_max_age': str(60 * 5)
15 # }
16
17 DEFAULT_CONFIGURATION = {
18 'origins': None,
19 'allowed_methods': [],
20 'allowed_headers': None,
21 'allow_credentials': False,
22 'cache_max_age': None
23 }
24
25
26 class Yafcorse(object):
27 def __init__(self, configuration: dict = DEFAULT_CONFIGURATION, app: Flask = None) -> None:
28 super().__init__()
29 self.__initialized = False
30
31 self.__origins = configuration.get('origins', DEFAULT_CONFIGURATION.get('origins'))
32 self.__regex_origin_patterns = configuration.get('origin_patterns', None)
33 self.__allowed_methods = configuration.get('allowed_methods', DEFAULT_CONFIGURATION.get('allowed_methods'))
34 self.__allowed_headers = configuration.get('allowed_headers', DEFAULT_CONFIGURATION.get('allowed_headers'))
35 self.__allow_credentials = configuration.get('allow_credentials', DEFAULT_CONFIGURATION.get('allow_credentials'))
36 self.__max_age = configuration.get('cache_max_age', DEFAULT_CONFIGURATION.get('cache_max_age'))
37
38 self.__allowed_methods_value = ''
39 self.__allowed_headers_value = ''
40
41 self.init_app(app)
42
43 def init_app(self, app: Flask):
44 if not self.__initialized and app:
45
46 self.__allowed_methods_value = ', '.join(self.__allowed_methods)
47 self.__allowed_methods = [m.strip().lower() for m in self.__allowed_methods]
48 self.__allowed_headers_value = ', '.join(self.__allowed_headers)
49 self.__allowed_headers = [h.strip().lower() for h in self.__allowed_headers]
50
51 if not isinstance(self.__origins, str) and isinstance(self.__origins, (list, tuple, Iterable)):
52 self.__validate_origin = _check_if_contains_origin(self.__origins)
53 elif isinstance(self.__origins, Callable):
54 self.__validate_origin = self.__origins
55 elif self.__regex_origin_patterns is not None:
56 self.__validate_origin = _check_if_regex_match_origin(self.__regex_origin_patterns)
57 else:
58 self.__validate_origin = _check_if_asterisk_origin(self.__origins)
59
60 app.after_request(self.__handle_response)
61
62 app.extensions['yafcorse'] = self
63 self.__initialized = True
64
65 def __append_headers(self, response: Response, origin: str, is_preflight_request: bool = False):
66 response.headers.add_header('Access-Control-Allow-Origin', origin)
67
68 if 'Access-Control-Request-Method' in request.headers \
69 and request.headers.get('Access-Control-Request-Method', '').strip().lower() in self.__allowed_methods:
70 response.headers.add_header('Access-Control-Allow-Methods', self.__allowed_methods_value)
71
72 if 'Access-Control-Request-Headers' in request.headers \
73 and _string_list_in(request.headers.get('Access-Control-Request-Headers').split(','), self.__allowed_headers):
74 response.headers.add_header('Access-Control-Allow-Headers', self.__allowed_headers_value)
75
76 if self.__allow_credentials:
77 response.headers.add_header('Access-Control-Allow-Credentials', 'true')
78 if is_preflight_request:
79 response.headers.add_header('Access-Control-Max-Age', self.__max_age)
80
81 def __handle_response(self, response: Response):
82 is_preflight_request = request.method == 'OPTIONS'
83 if not is_preflight_request and 'Origin' not in request.headers:
84 return response
85
86 origin = request.headers.get('Origin')
87
88 if not self.__validate_origin(origin):
89 return response
90
91 self.__append_headers(response, origin, is_preflight_request)
92 return response
93
94
95 def _string_list_in(target: list[str], source: list[str]):
96 contained = [element for element in target if element.strip().lower() in source]
97 return contained == target
98
99
100 def _check_if_regex_match_origin(patterns):
101 compiled_patterns = [re.compile(p) for p in patterns]
102 def execute_check(origin):
103 for matcher in compiled_patterns:
104 if matcher.match(origin):
105 return True
106 return False
107
108 execute_check.__name__ = _check_if_regex_match_origin.__name__
109 return execute_check
110
111
112 def _check_if_contains_origin(origins):
113 def execute_check(origin):
114 for o in origins:
115 if o == origin:
116 return True
117 return False
118
119 execute_check.__name__ = _check_if_contains_origin.__name__
120 return execute_check
121
122
123 def _check_if_asterisk_origin(origins):
124 allow_all = origins == '*'
125 def execute_check(origin):
126 return allow_all and origin is not None
127
128 execute_check.__name__ = _check_if_asterisk_origin.__name__
129 return execute_check
| 96 - warning: bad-indentation
97 - warning: bad-indentation
26 - refactor: useless-object-inheritance
26 - refactor: too-many-instance-attributes
27 - warning: dangerous-default-value
26 - refactor: too-few-public-methods
|
1 import pytest
2 from flask import Flask, Response
3 from flask.testing import FlaskClient
4
5 from yafcorse import Yafcorse
6
7
8 @pytest.fixture()
9 def local_app():
10 app = Flask(__name__)
11
12 cors = Yafcorse({
13 'allowed_methods': ['GET', 'POST', 'PUT'],
14 'allowed_headers': ['Content-Type', 'X-Test-Header'],
15 'origins': lambda origin: origin == 'https://from_lambda'
16 })
17 cors.init_app(app)
18
19 return app
20
21
22 @pytest.fixture()
23 def local_client(local_app: Flask):
24 return local_app.test_client()
25
26
27 def test_origin_function(local_client: FlaskClient):
28 response: Response = local_client.options('/some-request', headers={
29 'Origin': 'https://from_lambda'
30 })
31 assert response.status_code == 404
32 assert 'Access-Control-Allow-Origin'.lower() in response.headers
33 assert 'Access-Control-Max-Age'.lower() in response.headers
34 assert response.headers.get('Access-Control-Allow-Origin') is not None
35 assert response.headers.get('Access-Control-Allow-Origin') == 'https://from_lambda'
36 assert response.headers.get('Access-Control-Max-Age') is not None
37 assert response.headers.get('Access-Control-Max-Age') != ''
38
39
40 def test_origin_function_fail(local_client: FlaskClient):
41 response: Response = local_client.options('/some-request', headers={
42 'Origin': 'https://other_than_lambda'
43 })
44 assert response.status_code == 404
45 assert 'Access-Control-Allow-Origin'.lower() not in response.headers
46 assert 'Access-Control-Max-Age'.lower() not in response.headers
| 23 - warning: redefined-outer-name
27 - warning: redefined-outer-name
40 - warning: redefined-outer-name
|
1 '''
2 PDF Text Extractor Main Module
3
4 This module will read every .pdf file within a directory. It will
5 use the PDFExtractor to extract its contents to a string. That
6 string will then be passed to TextFormatter where it will be
7 properly formatted to the desired format.
8
9 The module will ask the user for a desired output file name, but
10 if one if not provided then a default name will be used.
11
12 The .exe file must be within the same directory as the .pdf files.
13 '''
14
15 import os
16 import pymsgbox
17
18 from extractor import PDFExtractor
19 from formatter import TextFormatter
20
21 # returs a name of the output file
22 def get_user_input():
23 user_input = pymsgbox.prompt('Enter name', default=add_txt_ext(''), title='FBPI .pdf Text Extractor')
24 # closes program if user clicks cancel
25 if user_input == None:
26 exit(0)
27 return user_input
28
29 # ensure the output file has a name
30 def add_txt_ext(user_input):
31 if len(user_input) < 1:
32 return '_output'
33 else:
34 return user_input
35
36 # main function, runs on program startup
37 def main():
38 #create an pdf extractor
39 extractor = PDFExtractor()
40
41 # create a text formatter
42 formatter = TextFormatter()
43
44 # stores the name of the output file
45 user_input = get_user_input()
46
47 # create the output .txt file
48 output_file = open(add_txt_ext(user_input) + '.txt', 'w')
49
50 # stores a list of all files in the current directory
51 file_list = os.listdir(os.getcwd())
52
53 # interate through all the files in the file list
54 for files in file_list:
55 # will only process .pdf files
56 if files.endswith('.pdf'):
57 # convert contents of each pdf file to a string
58 name_badge = extractor.pdf_to_text(files)
59
60 # formats the string to the propper format
61 name_badge = formatter.name_tab_title(name_badge)
62
63 # writes the formatted string to the output file
64 output_file.write(name_badge)
65
66 output_file.close()
67
68 if __name__ == '__main__':
69 main()
| 19 - warning: deprecated-module
26 - refactor: consider-using-sys-exit
31 - refactor: no-else-return
48 - warning: unspecified-encoding
48 - refactor: consider-using-with
|
1 '''
2 PDF Text Extractor Module
3
4 This module will extract the text from a .pdf file and return the
5 contents as a string.
6 '''
7
8 from io import StringIO
9 from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
10 from pdfminer.converter import TextConverter
11 from pdfminer.layout import LAParams
12 from pdfminer.pdfpage import PDFPage
13 import getopt
14
15 class PDFExtractor(object):
16
17 # takes in a parameter of a pdf file
18 # returns the contents as a string
19 def pdf_to_text(self, pdf_file, pages=None):
20 # allows multiple pages to be passed in as a parameter
21 if pages:
22 num_of_pages = set(pages)
23 else:
24 num_of_pages = set()
25
26 output = StringIO()
27 manager = PDFResourceManager()
28
29 # parameters require a resource manager and an output text stream
30 converter = TextConverter(manager, output, laparams=LAParams())
31
32 # parameters require a resource manager and a text converter
33 interpreter = PDFPageInterpreter(manager, converter)
34
35 input_file = open(pdf_file, 'rb')
36 for page in PDFPage.get_pages(input_file, num_of_pages):
37 interpreter.process_page(page)
38 input_file.close()
39 converter.close()
40
41 text = output.getvalue()
42 output.close()
43
44 return text | 15 - refactor: useless-object-inheritance
35 - refactor: consider-using-with
15 - refactor: too-few-public-methods
13 - warning: unused-import
|
1 '''
2 Text Formatter Module
3
4 This module will format the string input to match the desired output.
5 '''
6
7 class TextFormatter(object):
8
9 # takes in a string parameter
10 # returns the string formatted as: 'name TAB title'
11 def name_tab_title(self, text):
12 # stores contents of the input text into a list
13 name_badge = text.split('\n')
14
15 badges = []
16
17 # strip the whitepsace from every element
18 for element in name_badge:
19 badges.append(element.strip())
20
21 # return true from as long as the badge has a blank line
22 while badges.count(''):
23 badges.remove('')
24
25 # stores the last string added to the badge list as the title
26 title = badges.pop()
27
28 # stores the first string added to the badge list as the name
29 name = badges.pop()
30
31 # formats the string as 'name TAB title'
32 name_badge = ('%s\t%s\n' % (name, title))
33
34 return name_badge
| 7 - refactor: useless-object-inheritance
7 - refactor: too-few-public-methods
|
1 # -*- coding: utf-8 -*-
2 from datetime import datetime
3 #from googletrans import Translator
4 from translate import Translator
5 from TwitterSearch import *
6
7 import configparser
8 import random
9 import re
10 import io
11
12 weather = [u"Sunny", u"Rainy", u"Cloudy"]
13 weather_tw = [u"晴天",u"雨天", u"陰天"]
14
15 translator= Translator(to_lang='zh-TW')
16 #translator= Translator()
17
18 cf = configparser.ConfigParser()
19 cf.read('janediary.conf')
20
21 #consumer_key = cf.get('twitter', 'consumer_key')
22 #consumer_secret = cf.get('twitter', 'consumer_secret')
23 #access_token = cf.get('twitter', 'access_token')
24 #access_token_secret = cf.get('twitter', 'access_token_secret')
25
26 ts = TwitterSearch(
27 consumer_key = cf.get('twitter', 'consumer_key'),
28 consumer_secret = cf.get('twitter', 'consumer_secret'),
29 access_token = cf.get('twitter', 'access_token'),
30 access_token_secret = cf.get('twitter', 'access_token_secret')
31 )
32 data_path = cf.get('data', 'data_path')
33
34 tso = TwitterSearchOrder()
35
36 def get_tweets(keyword_list, num=20, lang='en'):
37 tweets = []
38 try:
39 tso.set_keywords(keyword_list)
40 tso.set_language(lang)
41 i = 0
42 for tweet in ts.search_tweets_iterable(tso):
43 if i == num: break
44 if tweet['retweeted']: continue
45 tweets.append(tweet)
46 i = i+1
47
48 except TwitterSearchException as e:
49 print(e)
50
51 return tweets
52
53 def generate_jane_story(num=20, lang='en'):
54 tweets = get_tweets(['jane'], num, lang)
55 story = ""
56 for tweet in tweets:
57 story = u"%s %s" % (story, tweet['text'])
58
59 return story
60
61 def clear_up_text(text):
62 text = re.sub(r'RT @\S+: ', '', text)
63 clear_text = re.sub(r'http\S+', '', text)
64 clear_text = remove_emoji(clear_text)
65
66 return clear_text.strip()
67
68 def remove_emoji(text):
69 emoji_pattern = re.compile(
70 u"(\ud83d[\ude00-\ude4f])|" # emoticons
71 u"(\ud83c[\udf00-\uffff])|" # symbols & pictographs (1 of 2)
72 u"(\ud83d[\u0000-\uddff])|" # symbols & pictographs (2 of 2)
73 u"(\ud83d[\ude80-\udeff])|" # transport & map symbols
74 u"(\ud83c[\udde0-\uddff])" # flags (iOS)
75 "+", flags=re.UNICODE)
76
77 return emoji_pattern.sub(r'', text)
78
79 def get_translation(input_text, lang='zh-TW'):
80 output = ""
81 try:
82 #output = translator.translate(input_text, dest=lang)
83 output = translator.translate(input_text)
84
85 except Exception as e:
86 print(e)
87 return ""
88
89 return output
90
91 def save_story(filename, text):
92 with io.open(filename,'w',encoding='utf8') as f:
93 f.write(text)
94 f.close()
95
96 if __name__ == '__main__':
97 jane_story_en = ""
98 clear_story = ""
99 translated_story = ""
100
101 jane_story_en = generate_jane_story(10, 'en')
102 clear_story = clear_up_text(jane_story_en)
103 print("---")
104 print(clear_story)
105 translated_story = get_translation(clear_story[:500])
106 print("----")
107 print(translated_story)
108 current_time = datetime.now()
109 weather_idx = random.randrange(3)
110 y, m, d, h = current_time.year, current_time.month, current_time.day, current_time.hour
111 clear_story = u"%s %s\n%s" % (current_time.strftime('%Y-%m-%d %H:00'), weather[weather_idx], clear_story)
112 translated_story = u"%d年%d月%d日%d時 %s\n%s" % (y, m, d, h, weather_tw[weather_idx], translated_story)
113
114 print(clear_story)
115 print("\n")
116 print(translated_story)
117 print("save file")
118 save_story("%s/%s.txt" %(data_path, current_time.strftime("%Y%m%d")), clear_story+"\n\n"+translated_story)
119 #save_story("%s/%s_en.txt" % (data_path, current_time.strftime("%Y%m%d")), clear_story)
120 #save_story("%s/%s_tw.txt" % (data_path, current_time.strftime("%Y%m%d")), translated_story)
121
| 5 - warning: wildcard-import
12 - warning: redundant-u-string-prefix
12 - warning: redundant-u-string-prefix
12 - warning: redundant-u-string-prefix
13 - warning: redundant-u-string-prefix
13 - warning: redundant-u-string-prefix
13 - warning: redundant-u-string-prefix
26 - error: undefined-variable
34 - error: undefined-variable
48 - error: undefined-variable
57 - warning: redundant-u-string-prefix
70 - warning: redundant-u-string-prefix
85 - warning: broad-exception-caught
79 - warning: unused-argument
111 - warning: redundant-u-string-prefix
112 - warning: redundant-u-string-prefix
|
1 import numpy as pd
2 import matplotlib.pyplot as plt
3 import pandas as pd
4
5 dataset=pd.read_csv('music.csv') | 2 - warning: unused-import
|
1 import mediapipe as mp
2 import numpy as np
3 import cv2
4
5 cap = cv2.VideoCapture(0)
6
7 facmesh = mp.solutions.face_mesh
8 face = facmesh.FaceMesh(static_image_mode=True, min_tracking_confidence=0.6, min_detection_confidence=0.6)
9 draw = mp.solutions.drawing_utils
10
11 while True:
12
13 _, frm = cap.read()
14 print(frm.shape)
15 break
16 rgb = cv2.cvtColor(frm, cv2.COLOR_BGR2RGB)
17
18 op = face.process(rgb)
19 if op.multi_face_landmarks:
20 for i in op.multi_face_landmarks:
21 print(i.landmark[0].y*480)
22 draw.draw_landmarks(frm, i, facmesh.FACE_CONNECTIONS, landmark_drawing_spec=draw.DrawingSpec(color=(0, 255, 255), circle_radius=1))
23
24
25 cv2.imshow("window", frm)
26
27 if cv2.waitKey(1) == 27:
28 cap.release()
29 cv2.destroyAllWindows()
30 break
| 13 - warning: bad-indentation
14 - warning: bad-indentation
15 - warning: bad-indentation
16 - warning: bad-indentation
18 - warning: bad-indentation
19 - warning: bad-indentation
20 - warning: bad-indentation
21 - warning: bad-indentation
22 - warning: bad-indentation
25 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
16 - warning: unreachable
2 - warning: unused-import
|
1 from datetime import datetime
2
3 def log(data):
4 print('----', datetime.now(), '----')
5 print(data)
6
7
8 def logError(error):
9 print('****', datetime.now(), '****')
10 print(error) | Clean Code: No Issues Detected
|
1 from tattle_helper import register_post, upload_file
2
3 data = {
4 "type" : "image",
5 "data" : "",
6 "filename": "asdf",
7 "userId" : 169
8 }
9
10 response = upload_file(file_name='denny.txt')
11 print(response)
12
13 # register_post(data) | 1 - warning: unused-import
|
1 token = "78a6fc20-fa83-11e9-a4ad-d1866a9a3c7b" # add your token here
2 url = "<base-api-url>/api/posts"
3 try:
4 payload = d
5 payload = json.dumps(payload)
6 headers = {
7 'token': token,
8 'Content-Type': "application/json",
9 'cache-control': "no-cache",
10 }
11 r = requests.post(url, data=payload, headers=headers)
12 if r.ok:
13 print ('success')
14 else:
15 print ('something went wrong')
16
17 except:
18 logging.exception('error in POST request')
19 raise
20
21 {
22 "type" : "image", # can be image, text, video
23 "data" : "",
24 "filename": "4bf4b1cc-516b-469d-aa38-be6762d417a5", #filename you put on s3
25 "userId" : 169 # for telegram_bot this should be 169
26 } | 4 - error: undefined-variable
5 - error: undefined-variable
11 - error: undefined-variable
18 - error: undefined-variable
21 - warning: pointless-statement
|
1 import os
2 import json
3 import boto3
4 import requests
5 from logger import log, logError
6 from dotenv import load_dotenv
7 load_dotenv()
8
9 s3 = boto3.client("s3",aws_access_key_id=os.environ.get('S3_ACCESS_KEY'),aws_secret_access_key=os.environ.get('S3_SECRET_ACCESS_KEY'))
10
11 API_BASE_URL = "https://archive-server.tattle.co.in"
12 # API_BASE_URL = "https://postman-echo.com/post"
13 ARCHIVE_TOKEN = os.environ.get('ARCHIVE_TOKEN')
14
15 def register_post(data):
16 """
17 registers a post on archive server
18 """
19 url_to_post_to = API_BASE_URL+"/api/posts"
20 payload = json.dumps(data)
21 headers = {
22 'token': ARCHIVE_TOKEN,
23 'Content-Type': "application/json",
24 'cache-control': "no-cache",
25 }
26
27 try:
28 r = requests.post(url_to_post_to, data=payload, headers=headers)
29
30 if r.status_code==200:
31 log('STATUS CODE 200 \n'+json.dumps(r.json(), indent=2))
32 else:
33 log('STATUS CODE '+str(r.status_code)+'\n '+r.text)
34 except:
35 log('error with API call')
36
37
38 def upload_file(file_name, s3=s3 ,acl="public-read"):
39 bucket_name = os.environ.get('TGM_BUCKET_NAME')
40 #opens file, reads it, and uploads it to the S3 bucket.
41 try:
42 with open(file_name, 'rb') as data:
43 s3.upload_fileobj(data,bucket_name,file_name,ExtraArgs={"ACL": acl,"ContentType": file_name.split(".")[-1]})
44 except:
45 logError('ERROR_S3_UPLOAD of '+file_name)
46
47 file_url = "https://s3.ap-south-1.amazonaws.com/"+bucket_name+"/"+file_name
48 return file_url
49
50 def upload_file(file_name, s3=s3 ,acl="public-read"):
51 bucket_name = os.environ.get('TGM_BUCKET_NAME')
52 #opens file, reads it, and uploads it to the S3 bucket.
53 try:
54 with open(file_name, 'rb') as data:
55 s3.upload_fileobj(data,bucket_name,file_name,ExtraArgs={"ACL": acl,"ContentType": file_name.split(".")[-1]})
56 except:
57 logError('ERROR_S3_UPLOAD of '+file_name)
58
59 file_url = "https://s3.ap-south-1.amazonaws.com/"+bucket_name+"/"+file_name
60 return file_url | 16 - warning: bad-indentation
19 - warning: bad-indentation
20 - warning: bad-indentation
21 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
30 - warning: bad-indentation
31 - warning: bad-indentation
32 - warning: bad-indentation
33 - warning: bad-indentation
34 - warning: bad-indentation
35 - warning: bad-indentation
39 - warning: bad-indentation
41 - warning: bad-indentation
42 - warning: bad-indentation
43 - warning: bad-indentation
44 - warning: bad-indentation
45 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
51 - warning: bad-indentation
53 - warning: bad-indentation
54 - warning: bad-indentation
55 - warning: bad-indentation
56 - warning: bad-indentation
57 - warning: bad-indentation
59 - warning: bad-indentation
60 - warning: bad-indentation
34 - warning: bare-except
28 - warning: missing-timeout
38 - warning: redefined-outer-name
44 - warning: bare-except
50 - error: function-redefined
50 - warning: redefined-outer-name
56 - warning: bare-except
|
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*- #
3 from __future__ import unicode_literals
4
5 AUTHOR = 'Georges Dubus'
6 SITENAME = 'Compile-toi toi même'
7 SITESUBTITLE = u'(Georges Dubus)' # TODO: remove in next version ?
8 SITEURL = ''
9 ABSOLUTE_SITEURL = SITEURL # TODO: remove
10
11 TIMEZONE = 'Europe/Paris'
12
13 DEFAULT_LANG = 'en'
14 LOCALE = ('en_US.UTF-8', 'fr_FR.UTF8') # TODO: toujours d'actualité ?
15
16 THEME = 'stolenidea'
17
18 # Feed generation is usually not desired when developing
19 FEED_ALL_ATOM = None
20 CATEGORY_FEED_ATOM = None
21 TRANSLATION_FEED_ATOM = None
22
23 MENUITEMS = (
24 ('Archives', SITEURL + '/archives.html'),
25 ('Tags', SITEURL + '/tags.html')
26 )
27
28 # Social widget
29 SOCIAL = (
30 ('Github', 'https://github.com/madjar'),
31 ('Twitter', 'http://twitter.com/georgesdubus'),
32 ('Google+', 'https://plus.google.com/u/0/104750974388692229541'),
33 )
34 # TWITTER_USERNAME = 'georgesdubus'
35
36 DEFAULT_PAGINATION = 10 # TODO: voir si je dois modifier quelque chose pour ça
37
38 PATH = ('content')
39 STATIC_PATHS = ['CNAME', 'images', 'slides', '.well-known', '_config.yml']
40 ARTICLE_EXCLUDES = ['slides']
41
42 # TODO : use buildout to handle the plugin deps ?
43 PLUGIN_PATHS = ['plugins']
44 PLUGINS = ['pelican_youtube']
45
46
47 # Uncomment following line if you want document-relative URLs when developing
48 #RELATIVE_URLS = True
| 7 - warning: fixme
9 - warning: fixme
14 - warning: fixme
36 - warning: fixme
42 - warning: fixme
7 - warning: redundant-u-string-prefix
|
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 @author: efourrier
5
6 Purpose : The purpose of this class is too automaticely transfrom a DataFrame
7 into a numpy ndarray in order to use an aglorithm
8
9 """
10
11
12 #########################################################
13 # Import modules and global helpers
14 #########################################################
15
16 from autoc.explorer import DataExploration, pd
17 import numpy as np
18 from numpy.random import permutation
19 from autoc.utils.helpers import cserie
20 from autoc.exceptions import NumericError
21
22
23
24
25 class PreProcessor(DataExploration):
26 subtypes = ['text_raw', 'text_categorical', 'ordinal', 'binary', 'other']
27
28 def __init__(self, *args, **kwargs):
29 super(PreProcessor, self).__init__(*args, **kwargs)
30 self.long_str_cutoff = 80
31 self.short_str_cutoff = 30
32 self.perc_unique_cutoff = 0.2
33 self.nb_max_levels = 20
34
35 def basic_cleaning(self,filter_nacols=True, drop_col=None,
36 filter_constantcol=True, filer_narows=True,
37 verbose=True, filter_rows_duplicates=True, inplace=False):
38 """
39 Basic cleaning of the data by deleting manymissing columns,
40 constantcol, full missing rows, and drop_col specified by the user.
41 """
42
43
44 col_to_remove = []
45 index_to_remove = []
46 if filter_nacols:
47 col_to_remove += self.nacols_full
48 if filter_constantcol:
49 col_to_remove += list(self.constantcol())
50 if filer_narows:
51 index_to_remove += cserie(self.narows_full)
52 if filter_rows_duplicates:
53 index_to_remove += cserie(self.data.duplicated())
54 if isinstance(drop_col, list):
55 col_to_remove += drop_col
56 elif isinstance(drop_col, str):
57 col_to_remove += [drop_col]
58 else:
59 pass
60 col_to_remove = list(set(col_to_remove))
61 index_to_remove = list(set(index_to_remove))
62 if verbose:
63 print("We are removing the folowing columns : {}".format(col_to_remove))
64 print("We are removing the folowing rows : {}".format(index_to_remove))
65 if inplace:
66 return self.data.drop(index_to_remove).drop(col_to_remove, axis=1)
67 else:
68 return self.data.copy().drop(index_to_remove).drop(col_to_remove, axis=1)
69
70 def _infer_subtype_col(self, colname):
71 """ This fonction tries to infer subtypes in order to preprocess them
72 better for skicit learn. You can find the different subtypes in the class
73 variable subtypes
74
75 To be completed ....
76 """
77 serie_col = self.data.loc[:, colname]
78 if serie_col.nunique() == 2:
79 return 'binary'
80 elif serie_col.dtype.kind == 'O':
81 if serie_col.str.len().mean() > self.long_str_cutoff and serie_col.nunique()/len(serie_col) > self.perc_unique_cutoff:
82 return "text_long"
83 elif serie_col.str.len().mean() <= self.short_str_cutoff and serie_col.nunique() <= self.nb_max_levels:
84 return 'text_categorical'
85 elif self.is_numeric(colname):
86 if serie_col.dtype == int and serie_col.nunique() <= self.nb_max_levels:
87 return "ordinal"
88 else :
89 return "other"
90
91 def infer_subtypes(self):
92 """ Apply _infer_subtype_col to the whole DataFrame as a dictionnary """
93 return {col: {'dtype': self.data.loc[:,col].dtype, 'subtype':self._infer_subtype_col(col)} for col in self.data.columns}
94
95
96 def infer_categorical_str(self, colname, nb_max_levels=10, threshold_value=0.01):
97 """ Returns True if we detect in the serie a factor variable
98 A string factor is based on the following caracteristics :
99 ther percentage of unicity perc_unique = 0.05 by default.
100 We follow here the definition of R factors variable considering that a
101 factor variable is a character variable that take value in a list a levels
102
103 Arguments
104 ----------
105 nb_max_levels: int
106 the max nb of levels you fix for a categorical variable
107 threshold_value : float
108 the nb of of unique value in percentage of the dataframe length
109 """
110 # False for numeric columns
111 if threshold_value:
112 max_levels = max(nb_max_levels, threshold_value * self._nrow)
113 else:
114 max_levels = nb_max_levels
115 if self.is_numeric(colname):
116 return False
117 # False for categorical columns
118 if self.data.loc[:, colname].dtype == "category":
119 return False
120 unique_value = set()
121 for i, v in self.data.loc[:, colname], iteritems():
122 if len(unique_value) >= max_levels:
123 return False
124 else:
125 unique_value.add(v)
126 return True
127
128 def get_factors(self, nb_max_levels=10, threshold_value=None, index=False):
129 """ Return a list of the detected factor variable, detection is based on
130 ther percentage of unicity perc_unique = 0.05 by default.
131 We follow here the definition of R factors variable considering that a
132 factor variable is a character variable that take value in a list a levels
133
134 this is a bad implementation
135
136
137 Arguments
138 ----------
139 nb_max_levels: int
140 the max nb of levels you fix for a categorical variable.
141 threshold_value : float
142 the nb of of unique value in percentage of the dataframe length.
143 index: bool
144 False, returns a list, True if you want an index.
145
146
147 """
148 res = self.data.apply(lambda x: self.infer_categorical_str(x))
149 if index:
150 return res
151 else:
152 return cserie(res)
153
154 def factors_to_categorical(self, inplace=True, verbose=True, *args, **kwargs):
155 factors_col = self.get_factors(*args, **kwargs)
156 if verbose:
157 print("We are converting following columns to categorical :{}".format(
158 factors_col))
159 if inplace:
160 self.df.loc[:, factors_col] = self.df.loc[:, factors_col].astype(category)
161 else:
162 return self.df.loc[:, factors_col].astype(category)
163
164 def remove_category(self, colname, nb_max_levels, replace_value='other', verbose=True):
165 """ Replace a variable with too many categories by grouping minor categories to one """
166 if self.data.loc[:, colname].nunique() < nb_max_levels:
167 if verbose:
168 print("{} has not been processed because levels < {}".format(
169 colname, nb_max_levels))
170 else:
171 if self.is_numeric(colname):
172 raise NumericError(
173 '{} is a numeric columns you cannot use this function'.format())
174 top_levels = self.data.loc[
175 :, colname].value_counts[0:nb_max_levels].index
176 self.data.loc[~self.data.loc[:, colname].isin(
177 top_levels), colname] = replace_value
| 29 - refactor: super-with-arguments
35 - refactor: too-many-arguments
35 - refactor: too-many-positional-arguments
65 - refactor: no-else-return
78 - refactor: no-else-return
81 - refactor: no-else-return
70 - refactor: inconsistent-return-statements
121 - error: undefined-variable
122 - refactor: no-else-return
121 - warning: unused-variable
148 - warning: unnecessary-lambda
149 - refactor: no-else-return
128 - warning: unused-argument
128 - warning: unused-argument
154 - warning: keyword-arg-before-vararg
160 - error: undefined-variable
162 - error: undefined-variable
154 - refactor: inconsistent-return-statements
173 - error: too-few-format-args
16 - warning: unused-import
17 - warning: unused-import
18 - warning: unused-import
|
1 import seaborn as sns
2 import matplotlib.pyplot as plt
3
4
5 def plot_corrmatrix(df, square=True, linewidths=0.1, annot=True,
6 size=None, figsize=(12, 9), *args, **kwargs):
7 """
8 Plot correlation matrix of the dataset
9 see doc at https://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.html#seaborn.heatmap
10
11 """
12 sns.set(context="paper", font="monospace")
13 f, ax = plt.subplots(figsize=figsize)
14 sns.heatmap(df.corr(), vmax=1, square=square, linewidths=linewidths,
15 annot=annot, annot_kws={"size": size}, *args, **kwargs)
| 5 - refactor: too-many-arguments
5 - refactor: too-many-positional-arguments
5 - warning: keyword-arg-before-vararg
13 - warning: unused-variable
13 - warning: unused-variable
|
1 """
2 @author: efourrier
3
4 Purpose : This is a simple experimental class to detect outliers. This class
5 can be used to detect missing values encoded as outlier (-999, -1, ...)
6
7
8 """
9
10 from autoc.explorer import DataExploration, pd
11 import numpy as np
12 #from autoc.utils.helpers import cserie
13 from exceptions import NotNumericColumn
14
15
16 def iqr(ndarray, dropna=True):
17 if dropna:
18 ndarray = ndarray[~np.isnan(ndarray)]
19 return np.percentile(ndarray, 75) - np.percentile(ndarray, 25)
20
21
22 def z_score(ndarray, dropna=True):
23 if dropna:
24 ndarray = ndarray[~np.isnan(ndarray)]
25 return (ndarray - np.mean(ndarray)) / (np.std(ndarray))
26
27
28 def iqr_score(ndarray, dropna=True):
29 if dropna:
30 ndarray = ndarray[~np.isnan(ndarray)]
31 return (ndarray - np.median(ndarray)) / (iqr(ndarray))
32
33
34 def mad_score(ndarray, dropna=True):
35 if dropna:
36 ndarray = ndarray[~np.isnan(ndarray)]
37 return (ndarray - np.median(ndarray)) / (np.median(np.absolute(ndarray - np.median(ndarray))) / 0.6745)
38
39
40 class OutliersDetection(DataExploration):
41 """
42 this class focuses on identifying outliers
43
44 Parameters
45 ----------
46 data : DataFrame
47
48 Examples
49 --------
50 * od = OutliersDetection(data = your_DataFrame)
51 * od.structure() : global structure of your DataFrame
52 """
53
54 def __init__(self, *args, **kwargs):
55 super(OutliersDetection, self).__init__(*args, **kwargs)
56 self.strong_cutoff = {'cutoff_z': 6,
57 'cutoff_iqr': 6, 'cutoff_mad': 6}
58 self.basic_cutoff = {'cutoff_z': 3,
59 'cutoff_iqr': 2, 'cutoff_mad': 2}
60
61
62 def check_negative_value(self, colname):
63 """ this function will detect if there is at leat one
64 negative value and calculate the ratio negative postive/
65 """
66 if not self.is_numeric(colname):
67 NotNumericColumn("The serie should be numeric values")
68 return sum(serie < 0)
69
70 def outlier_detection_serie_1d(self, colname, cutoff_params, scores=[z_score, iqr_score, mad_score]):
71 if not self.is_numeric(colname):
72 raise("auto-clean doesn't support outliers detection for Non numeric variable")
73 keys = [str(func.__name__) for func in scores]
74 df = pd.DataFrame(dict((key, func(self.data.loc[:, colname]))
75 for key, func in zip(keys, scores)))
76 df['is_outlier'] = 0
77 for s in keys:
78 cutoff_colname = "cutoff_{}".format(s.split('_')[0])
79 index_outliers = np.absolute(df[s]) >= cutoff_params[cutoff_colname]
80 df.loc[index_outliers, 'is_outlier'] = 1
81 return df
82
83 def check_negative_value(self):
84 """ this will return a the ratio negative/positve for each numeric
85 variable of the DataFrame
86 """
87 return self.data[self._dfnum].apply(lambda x: self.check_negative_value_serie(x.name))
88
89 def outlier_detection_1d(self, cutoff_params, subset=None,
90 scores=[z_score, iqr_score, mad_score]):
91 """ Return a dictionnary with z_score,iqr_score,mad_score as keys and the
92 associate dataframe of distance as value of the dictionnnary"""
93 df = self.data.copy()
94 numeric_var = self._dfnum
95 if subset:
96 df = df.drop(subset, axis=1)
97 df = df.loc[:, numeric_var] # take only numeric variable
98 # if remove_constant_col:
99 # df = df.drop(self.constantcol(), axis = 1) # remove constant variable
100 # df_outlier = pd.DataFrame()
101 for col in df:
102 df_temp = self.outlier_detection_serie_1d(col, cutoff_params, scores)
103 df_temp.columns = [col + '_' +
104 col_name for col_name in df_temp.columns]
105 #df_outlier = pd.concat([df_outlier, df_temp], axis=1)
106 return df_temp
| 55 - refactor: super-with-arguments
68 - error: undefined-variable
70 - warning: dangerous-default-value
72 - error: raising-bad-type
83 - error: function-redefined
89 - warning: dangerous-default-value
|
1 __all__ = ["explorer", "naimputer"]
2 from .explorer import DataExploration
3 from .naimputer import NaImputer
4 from .preprocess import PreProcessor
5 from .utils.getdata import get_dataset
6 # from .preprocess import PreProcessor
| 2 - error: relative-beyond-top-level
3 - error: relative-beyond-top-level
4 - error: relative-beyond-top-level
5 - error: relative-beyond-top-level
1 - error: undefined-all-variable
1 - error: undefined-all-variable
2 - warning: unused-import
3 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
|
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 @author: efourrier
5
6 Purpose : Get data from https://github.com/ericfourrier/autoc-datasets
7
8 """
9 import pandas as pd
10
11
12
13 def get_dataset(name, *args, **kwargs):
14 """Get a dataset from the online repo
15 https://github.com/ericfourrier/autoc-datasets (requires internet).
16 Parameters
17 ----------
18 name : str
19 Name of the dataset 'name.csv'
20 """
21 path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
22 return pd.read_csv(path, *args, **kwargs)
| Clean Code: No Issues Detected
|
1 from setuptools import setup, find_packages
2
3
4 def readme():
5 with open('README.md') as f:
6 return f.read()
7
8 setup(name='autoc',
9 version="0.1",
10 description='autoc is a package for data cleaning exploration and modelling in pandas',
11 long_description=readme(),
12 author=['Eric Fourrier'],
13 author_email='ericfourrier0@gmail.com',
14 license='MIT',
15 url='https://github.com/ericfourrier/auto-cl',
16 packages=find_packages(),
17 test_suite='test',
18 keywords=['cleaning', 'preprocessing', 'pandas'],
19 install_requires=[
20 'numpy>=1.7.0',
21 'pandas>=0.15.0',
22 'seaborn>=0.5',
23 'scipy>=0.14']
24 )
| 5 - warning: unspecified-encoding
|
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 @author: efourrier
5
6 Purpose : File with all custom exceptions
7 """
8
9 class NotNumericColumn(Exception):
10 """ The column should be numeric """
11 pass
12
13 class NumericError(Exception):
14 """ The column should not be numeric """
15 pass
16
17 # class NotFactor
| 11 - warning: unnecessary-pass
15 - warning: unnecessary-pass
|
1 #!/usr/bin/python
2
3 import sys
4 import csv
5
6 reader = csv.reader(sys.stdin, delimiter='\t')
7
8 for line in reader:
9 post_id = line[0]
10 post_type = line[5]
11 abs_parent_id = line[7]
12 post_length = len(line[4])
13
14 if post_id == "id":
15 continue
16
17 if post_type[0] == "q": # i.e. if the post is a "question"
18 print post_id ,"\t", "1", "\t", post_length # here, "1" indicates "question"
19
20 if post_type[0] == "a": # i.e. if the post is an "answer"
21 print abs_parent_id, "\t", "2", "\t", post_length
22 # here "2" indicates "answer". The double keys (id and "1", "2") will make sure that an answer always comes after the corresponding question
| 18 - error: syntax-error
|
1 #!/usr/bin/python
2
3 import sys
4
5 oldAuthor = None # save the old author's id
6 hourList = [] # save the list of hours that an author makes posts
7
8 for line in sys.stdin:
9 data = line.strip().split("\t")
10
11 author, hour = data
12
13 if oldAuthor and author!=oldAuthor:
14 # if the author changes to a new author, determine the hours of highest frequency, print each of them out
15 LstOfMostFreqHours = set([x for x in hourList if all([hourList.count(x)>=hourList.count(y) for y in hourList])])
16 for i in LstOfMostFreqHours:
17 print oldAuthor,'\t', i
18 oldAuthor = author # set author to the new author
19 hourList = []
20
21 oldAuthor = author
22 hourList.append(hour)
23
24 if oldAuthor != None:
25 # for the last author, determine the hours of highest frequency, print each of them out
26 LstOfMostFreqHours = set([x for x in hourList if all([hourList.count(x)>=hourList.count(y) for y in hourList])])
27 for i in LstOfMostFreqHours:
28 print oldAuthor, "\t", i
29
| 17 - error: syntax-error
|
1 #!/usr/bin/python
2
3 import sys
4 import csv
5
6 reader = csv.reader(sys.stdin, delimiter='\t')
7
8 for line in reader:
9 author_id = line[3]
10 added_at = line[8]
11 if len(added_at) > 11:
12 hour = int(added_at[11] + added_at[12])
13 print author_id,"\t", hour
| 13 - error: syntax-error
|
1 #!/usr/bin/python
2
3 import sys
4 import csv
5
6 reader = csv.reader(sys.stdin, delimiter='\t')
7
8 for line in reader:
9 tag = line[2]
10
11 tag_list = tag.strip().split(' ')
12 for A_tag in tag_list:
13 print A_tag
| 13 - error: syntax-error
|
1 #!/usr/bin/python
2
3 import sys
4
5 oldQuestionNode = None # save the old question's node id
6
7 Student_IDs = [] # the list of question/answers/comment id's for a forum thread
8
9 for line in sys.stdin:
10 data = line.strip().split("\t")
11
12 question_id, author_id = data
13
14 if oldQuestionNode and oldQuestionNode != question_id:
15 # print the old question's node id, and the list of student id
16 print oldQuestionNode, "\t", Student_IDs
17
18 oldQuestionNode = question_id # set question node ID to that of the new question
19 Student_IDs = [author_id]
20
21 elif oldQuestionNode:
22 Student_IDs.append(author_id)
23 else:
24 oldQuestionNode = question_id
25 Student_IDs.append(author_id)
26
27 if oldQuestionNode != None:
28 # for the last question, print question node id, and student IDs
29 print oldQuestionNode, "\t", Student_IDs
| 16 - error: syntax-error
|
1 #!/usr/bin/python
2
3 import sys
4
5 oldQuestionNode = None # save the old question's node id
6 oldQuestionLength = 0 # save the old question's length
7 AnsLengthList = [] # the list of the length of answers for a question
8
9 for line in sys.stdin:
10 data = line.strip().split("\t")
11
12 question_id, post_type, post_length = data
13
14 if oldQuestionNode and oldQuestionNode != question_id: # i.e. it's a new question
15 # print the old question's node id, question length, avg answer length
16 if AnsLengthList == []:
17 print oldQuestionNode,"\t",oldQuestionLength,"\t", 0
18 else:
19 print oldQuestionNode,"\t",oldQuestionLength,"\t", sum(AnsLengthList)/len(AnsLengthList)
20
21
22 oldQuestionNode = question_id # set question node ID to that of the new question
23 oldQuestionLength = float(post_length)
24 AnsLengthList = []
25
26 elif oldQuestionNode:
27 AnsLengthList.append(float(post_length))
28 else:
29 oldQuestionNode = question_id
30 oldQuestionLength =float(post_length)
31
32 if oldQuestionNode != None:
33 # for the last question, print id, question length, avg answer length
34 if AnsLengthList == []:
35 print oldQuestionNode,"\t",oldQuestionLength,"\t", 0
36 else:
37 print oldQuestionNode,"\t",oldQuesitionLength,"\t", sum(AnsLengthList)/len(AnsLengthList)
| 17 - error: syntax-error
|
1 #!/usr/bin/python
2
3 import sys
4
5 oldTag = None # save the oldTag
6 oldTagCount = 0 # save the oldTag's Count
7 Top10Tag = [] # the list of top 10 tags
8 Top10TagCount = [] # the list of top 1 tags' counts
9
10 for line in sys.stdin:
11 tag = line
12
13 if oldTag and oldTag != tag:
14 # check if the old tag's count beats the current 10th tag
15 # if so, replace the current 10th tag, and its count, with those of the old tag
16
17 if len(Top10TagCount) == 10:
18 if oldTagCount > min(Top10TagCount) :
19 Top10Tag[Top10TagCount.index(min(Top10TagCount))]=oldTag
20 Top10TagCount[Top10TagCount.index(min(Top10TagCount))]=oldTagCount
21 else:
22 Top10Tag.append(oldTag)
23 Top10TagCount.append(oldTagCount)
24
25 oldTag = tag # set tag to the new one
26 oldTagCount = 0
27
28 oldTag = tag
29 oldTagCount = oldTagCount+1
30
31
32 if oldTag != None:
33 # for the last tag, print id, question length, avg answer length
34 # check if the old tag's count beats the current 10th tag
35 # if so, replace the current 10th tag, and its count, with those of the old tag
36 if oldTagCount > min(Top10TagCount) :
37 Top10Tag[Top10TagCount.index(min(Top10TagCount))]=oldTag
38 Top10TagCount[Top10TagCount.index(min(Top10TagCount))]=oldTagCount
39
40 # Sort the final top 10 list, and print out
41 for i in range(10):
42
43 print Top10Tag[Top10TagCount.index(max(Top10TagCount))], "\t", max(Top10TagCount)
44
45 del Top10Tag[Top10TagCount.index(max(Top10TagCount))]
46 del Top10TagCount[Top10TagCount.index(max(Top10TagCount))]
| 43 - error: syntax-error
|
1 #!/usr/bin/python
2
3 import sys
4 import csv
5
6 reader = csv.reader(sys.stdin, delimiter='\t')
7
8 for line in reader:
9 post_id = line[0]
10 post_type = line[5]
11 author_id = line[3]
12 abs_parent_id = line[7]
13
14 if post_id == "id":
15 continue
16
17 if post_type[0] == "q": # i.e. if the post is a "question"
18 print post_id ,"\t", author_id
19
20 if post_type[0] != "q": # i.e. if the post is an "answer" or "comment"
21 print abs_parent_id, "\t", author_id
| 18 - error: syntax-error
|
1 from python_graphql_client import GraphqlClient
2
3 API_KEY = '5f8fbc2aa23e93716e7c621b'
4 client = GraphqlClient(endpoint="https://staging-api.chargetrip.io/graphql")
5 client.headers = {
6 'x-client-id': API_KEY
7 }
8
9 query = """
10 query stationListAll ($page: Int!) {
11 stationList(size: 100, page: $page) {
12 id
13 external_id
14 country_code
15 party_id
16 name
17 address
18 city
19 postal_code
20 state
21 country
22 coordinates {
23 latitude
24 longitude
25 }
26 related_locations {
27 latitude
28 longitude
29 }
30 parking_type
31 evses {
32 uid
33 evse_id
34 status
35 status_schedule {
36 period_begin
37 period_end
38 status
39 }
40 capabilities
41 connectors {
42 id
43 standard
44 format
45 power_type
46 max_voltage
47 max_amperage
48 max_electric_power
49 power
50 tariff_ids
51 terms_and_conditions
52 last_updated
53 properties
54 }
55 floor_level
56 coordinates {
57 latitude
58 longitude
59 }
60 physical_reference
61 parking_restrictions
62 images {
63 url
64 thumbnail
65 category
66 type
67 width
68 height
69 }
70 last_updated
71 parking_cost
72 properties
73 }
74 directions {
75 language
76 text
77 }
78 operator {
79 id
80 external_id
81 name
82 website
83 logo {
84 url
85 thumbnail
86 category
87 type
88 width
89 height
90 }
91 country
92 contact {
93 phone
94 email
95 website
96 facebook
97 twitter
98 properties
99 }
100 }
101 suboperator {
102 id
103 name
104 }
105 owner {
106 id
107 name
108 }
109 facilities
110 time_zone
111 opening_times {
112 twentyfourseven
113 regular_hours {
114 weekday
115 period_begin
116 period_end
117 }
118 exceptional_openings {
119 period_begin
120 period_end
121 }
122 exceptional_closings {
123 period_begin
124 period_end
125 }
126 }
127 charging_when_closed
128 images {
129 url
130 thumbnail
131 category
132 type
133 width
134 height
135 }
136 last_updated
137 location {
138 type
139 coordinates
140 }
141 elevation
142 chargers {
143 standard
144 power
145 price
146 speed
147 status {
148 free
149 busy
150 unknown
151 error
152 }
153 total
154 }
155 physical_address {
156 continent
157 country
158 county
159 city
160 street
161 number
162 postalCode
163 what3Words
164 formattedAddress
165 }
166 amenities
167 properties
168 realtime
169 power
170 speed
171 status
172 review {
173 rating
174 count
175 }
176 }
177 }
178 """
179 variables = {"page": 1}
180 result = client.execute(query=query, variables=variables, verify=False)
181
182 print(result) | Clean Code: No Issues Detected
|
1 import os
2 import json
3
4 filepath = r"/home/axel/Documents/electralign-data/stations-all.json"
5 newData = {"data": {"stationList": []}}
6
7 if os.path.isfile(filepath):
8 with open(filepath, 'r') as file:
9 print("File opened")
10 data = json.load(file)
11 print("Data loaded")
12 newData["data"]["stationList"] = data
13 print("new data set")
14
15 filepath = r"/home/axel/Documents/electralign-data/stations-all-fixed.json"
16 with open(filepath, 'w') as file:
17 print("New file opened")
18 json.dump(newData, file)
19 print("Done saving data")
| 8 - warning: unspecified-encoding
16 - warning: unspecified-encoding
|
1 import os
2 import json
3
4 path = r"/home/axel/Documents/electralign-data/"
5 stations = []
6
7 for filename in sorted(os.listdir(path)):
8 filepath = os.path.join(path, filename)
9 if os.path.isfile(filepath):
10 print(filename)
11 with open(filepath, 'r') as file:
12 data = json.load(file)
13 stations += data
14
15
16 with open(path+'stations-all.json', 'w') as file:
17 json.dump(stations, file)
18
19 print("Saved " + str(len(stations)) + " stations")
| 11 - warning: unspecified-encoding
16 - warning: unspecified-encoding
|
1 # Copyright 2017-present Open Networking Foundation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from abc import abstractmethod
16
17 import grpc
18 from p4 import p4runtime_pb2
19 from p4.tmp import p4config_pb2
20
21 from p4info import p4browser
22
23
24 def buildSetPipelineRequest(p4info, device_config, device_id):
25 request = p4runtime_pb2.SetForwardingPipelineConfigRequest()
26 config = request.configs.add()
27 config.device_id = device_id
28 config.p4info.CopyFrom(p4info)
29 config.p4_device_config = device_config.SerializeToString()
30 request.action = p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT
31 return request
32
33
34 def buildTableEntry(p4info_browser,
35 table_name,
36 match_fields={},
37 action_name=None,
38 action_params={}):
39 table_entry = p4runtime_pb2.TableEntry()
40 table_entry.table_id = p4info_browser.get_tables_id(table_name)
41 if match_fields:
42 table_entry.match.extend([
43 p4info_browser.get_match_field_pb(table_name, match_field_name, value)
44 for match_field_name, value in match_fields.iteritems()
45 ])
46 if action_name:
47 action = table_entry.action.action
48 action.action_id = p4info_browser.get_actions_id(action_name)
49 if action_params:
50 action.params.extend([
51 p4info_browser.get_action_param_pb(action_name, field_name, value)
52 for field_name, value in action_params.iteritems()
53 ])
54 return table_entry
55
56
57 class SwitchConnection(object):
58 def __init__(self, name, address='127.0.0.1:50051', device_id=0):
59 self.name = name
60 self.address = address
61 self.device_id = device_id
62 self.p4info = None
63 self.channel = grpc.insecure_channel(self.address)
64 # TODO Do want to do a better job managing stub?
65 self.client_stub = p4runtime_pb2.P4RuntimeStub(self.channel)
66
67 @abstractmethod
68 def buildDeviceConfig(self, **kwargs):
69 return p4config_pb2.P4DeviceConfig()
70
71 def SetForwardingPipelineConfig(self, p4info_file_path, dry_run=False, **kwargs):
72 p4info_broswer = p4browser.P4InfoBrowser(p4info_file_path)
73 device_config = self.buildDeviceConfig(**kwargs)
74 request = buildSetPipelineRequest(p4info_broswer.p4info, device_config, self.device_id)
75 if dry_run:
76 print "P4 Runtime SetForwardingPipelineConfig:", request
77 else:
78 self.client_stub.SetForwardingPipelineConfig(request)
79 # Update the local P4 Info reference
80 self.p4info_broswer = p4info_broswer
81
82 def buildTableEntry(self,
83 table_name,
84 match_fields={},
85 action_name=None,
86 action_params={}):
87 return buildTableEntry(self.p4info_broswer, table_name, match_fields, action_name, action_params)
88
89 def WriteTableEntry(self, table_entry, dry_run=False):
90 request = p4runtime_pb2.WriteRequest()
91 request.device_id = self.device_id
92 update = request.updates.add()
93 update.type = p4runtime_pb2.Update.INSERT
94 update.entity.table_entry.CopyFrom(table_entry)
95 if dry_run:
96 print "P4 Runtime Write:", request
97 else:
98 print self.client_stub.Write(request)
99
100 def ReadTableEntries(self, table_name, dry_run=False):
101 request = p4runtime_pb2.ReadRequest()
102 request.device_id = self.device_id
103 entity = request.entities.add()
104 table_entry = entity.table_entry
105 table_entry.table_id = self.p4info_broswer.get_tables_id(table_name)
106 if dry_run:
107 print "P4 Runtime Read:", request
108 else:
109 for response in self.client_stub.Read(request):
110 yield response
111
112 def ReadDirectCounters(self, table_name=None, counter_name=None, table_entry=None, dry_run=False):
113 request = p4runtime_pb2.ReadRequest()
114 request.device_id = self.device_id
115 entity = request.entities.add()
116 counter_entry = entity.direct_counter_entry
117 if counter_name:
118 counter_entry.counter_id = self.p4info_broswer.get_direct_counters_id(counter_name)
119 else:
120 counter_entry.counter_id = 0
121 # TODO we may not need this table entry
122 if table_name:
123 table_entry.table_id = self.p4info_broswer.get_tables_id(table_name)
124 counter_entry.table_entry.CopyFrom(table_entry)
125 counter_entry.data.packet_count = 0
126 if dry_run:
127 print "P4 Runtime Read:", request
128 else:
129 for response in self.client_stub.Read(request):
130 print response
| 76 - error: syntax-error
|
1 # Copyright 2017-present Open Networking Foundation
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 import re
16
17 import google.protobuf.text_format
18 from p4 import p4runtime_pb2
19 from p4.config import p4info_pb2
20
21
22 class P4InfoBrowser(object):
23 def __init__(self, p4_info_filepath):
24 p4info = p4info_pb2.P4Info()
25 # Load the p4info file into a skeleton P4Info object
26 with open(p4_info_filepath) as p4info_f:
27 google.protobuf.text_format.Merge(p4info_f.read(), p4info)
28 self.p4info = p4info
29
30 def get(self, entity_type, name=None, id=None):
31 if name is not None and id is not None:
32 raise AssertionError("name or id must be None")
33
34 for o in getattr(self.p4info, entity_type):
35 pre = o.preamble
36 if name:
37 if (pre.name == name or pre.alias == name):
38 return o
39 else:
40 if pre.id == id:
41 return o
42
43 if name:
44 raise AttributeError("Could not find %r of type %s" % (name, entity_type))
45 else:
46 raise AttributeError("Could not find id %r of type %s" % (id, entity_type))
47
48 def get_id(self, entity_type, name):
49 return self.get(entity_type, name=name).preamble.id
50
51 def get_name(self, entity_type, id):
52 return self.get(entity_type, id=id).preamble.name
53
54 def get_alias(self, entity_type, id):
55 return self.get(entity_type, id=id).preamble.alias
56
57 def __getattr__(self, attr):
58 # Synthesize convenience functions for name to id lookups for top-level entities
59 # e.g. get_table_id() or get_action_id()
60 m = re.search("^get_(\w+)_id$", attr)
61 if m:
62 primitive = m.group(1)
63 return lambda name: self.get_id(primitive, name)
64
65 # Synthesize convenience functions for id to name lookups
66 m = re.search("^get_(\w+)_name$", attr)
67 if m:
68 primitive = m.group(1)
69 return lambda id: self.get_name(primitive, id)
70
71 raise AttributeError("%r object has no attribute %r" % (self.__class__, attr))
72
73 # TODO remove
74 def get_table_entry(self, table_name):
75 t = self.get(table_name, "table")
76 entry = p4runtime_pb2.TableEntry()
77 entry.table_id = t.preamble.id
78 entry
79 pass
80
81 def get_match_field(self, table_name, match_field_name):
82 for t in self.p4info.tables:
83 pre = t.preamble
84 if pre.name == table_name:
85 for mf in t.match_fields:
86 if mf.name == match_field_name:
87 return mf
88
89 def get_match_field_id(self, table_name, match_field_name):
90 return self.get_match_field(table_name,match_field_name).id
91
92 def get_match_field_pb(self, table_name, match_field_name, value):
93 p4info_match = self.get_match_field(table_name, match_field_name)
94 bw = p4info_match.bitwidth
95 p4runtime_match = p4runtime_pb2.FieldMatch()
96 p4runtime_match.field_id = p4info_match.id
97 # TODO switch on match type and map the value into the appropriate message type
98 match_type = p4info_pb2._MATCHFIELD_MATCHTYPE.values_by_number[
99 p4info_match.match_type].name
100 if match_type == 'EXACT':
101 exact = p4runtime_match.exact
102 exact.value = value
103 elif match_type == 'LPM':
104 lpm = p4runtime_match.lpm
105 lpm.value = value[0]
106 lpm.prefix_len = value[1]
107 # TODO finish cases and validate types and bitwidth
108 # VALID = 1;
109 # EXACT = 2;
110 # LPM = 3;
111 # TERNARY = 4;
112 # RANGE = 5;
113 # and raise exception
114 return p4runtime_match
115
116 def get_action_param(self, action_name, param_name):
117 for a in self.p4info.actions:
118 pre = a.preamble
119 if pre.name == action_name:
120 for p in a.params:
121 if p.name == param_name:
122 return p
123 raise AttributeError("%r has no attribute %r" % (action_name, param_name))
124
125
126 def get_action_param_id(self, action_name, param_name):
127 return self.get_action_param(action_name, param_name).id
128
129 def get_action_param_pb(self, action_name, param_name, value):
130 p4info_param = self.get_action_param(action_name, param_name)
131 #bw = p4info_param.bitwidth
132 p4runtime_param = p4runtime_pb2.Action.Param()
133 p4runtime_param.param_id = p4info_param.id
134 p4runtime_param.value = value # TODO make sure it's the correct bitwidth
135 return p4runtime_param | 73 - warning: fixme
97 - warning: fixme
107 - warning: fixme
134 - warning: fixme
60 - warning: anomalous-backslash-in-string
66 - warning: anomalous-backslash-in-string
22 - refactor: useless-object-inheritance
26 - warning: unspecified-encoding
30 - warning: redefined-builtin
37 - refactor: consider-using-in
43 - refactor: no-else-raise
51 - warning: redefined-builtin
54 - warning: redefined-builtin
78 - warning: pointless-statement
79 - warning: unnecessary-pass
81 - refactor: inconsistent-return-statements
98 - warning: protected-access
94 - warning: unused-variable
|
1 import tensorflow as tf
2 import numpy as np
3 import time
4
5 #help us to graph
6 import matplotlib
7 import matplotlib.pyplot as plt
8
9 #import datasets we need by scikit-learn
10 from sklearn.datasets.samples_generator import make_blobs
11 from sklearn.datasets.samples_generator import make_circles
12 #fuck Here I install scipy a matherical package
13
14 #set up data type , here i choose blobs to make it simpler
15 DATA_TYPE = "blobs"
16
17 #Set up Number of clusters in train data , if we choose circle,2 is enough
18 K = 4
19 if(DATA_TYPE == "circle"):
20 K = 2
21 else:
22 K = 4
23
24 #Set up max of iterations , if condition is not met , here I choose 1000
25 MAX_ITERS = 1000
26
27 #To caculate the time we use , record the begining time
28 start = time.time()
29
30 #Since we have chosen four clusters , We have to give four center points for training data
31 centers = [(-2, -2), (-2, 1.5), (1.5, -2), (2, 1.5)]
32 #set up the training set
33 #for blobs:
34 #n_samples:number of data,which means we have 200 points
35 #centers = centers
36 #n_features = dimmension , here we choose plane so = 2
37 #cluster_std = std
38 #shuffle:if we mix up samples,here I choose false
39 #random_state:random seed
40 #for circles:
41 #noise: random noise data set up to the sample set
42 #factor: the ratio factor between circle data set
43 if(DATA_TYPE == "circle"):
44 data, features = make_circles(n_samples=200,shuffle=True,noise=None,factor=0.4)
45 else:
46 data, features = make_blobs(n_samples=200,centers=centers,n_features=2,cluster_std=0.8,shuffle=False,random_state=42)
47
48 #Draw the four centers
49 #.transpose[0]: x .transpose[1]: y
50 fig, ax = plt.subplots()
51 ax.scatter(np.asarray(centers).transpose()[0], np.asarray(centers).transpose()[1], marker = 'o', s = 250)
52 plt.show()
53 #Draw the training data
54 fig, ax = plt.subplots()
55 if(DATA_TYPE == "blobs"):
56 ax.scatter(np.asarray(centers).transpose()[0], np.asarray(centers).transpose()[1], marker = 'o', s = 250)
57 ax.scatter(data.transpose()[0],data.transpose()[1], marker = 'o', s = 100 , c = features, cmap =plt.cm.coolwarm)
58 plt.plot()
59 plt.show()
60
61 #Set up tf.Variable
62 #points = data
63 #cluster_assignments = each points 's cluster
64 #for example:
65 #cluster_assignments[13]=2 means 13th point belong cluster 2
66 N = len(data)
67 points = tf.Variable(data)
68 cluster_assignments = tf.Variable(tf.zeros([N], dtype=tf.int64))
69
70 #centroids: each groups 's centroids
71 #tf.slice() really fuck up
72 #random pick 4 point after all
73 centroids = tf.Variable(tf.slice(points.initialized_value(), [0,0], [K,2]))
74
75 sess = tf.Session()
76 sess.run(tf.initialize_all_variables())
77
78 sess.run(centroids)
79
80 # Lost function and rep loop
81 #centroids = [[x1,y1],[x2,y2],[x3,y3],[x4,y4]] shape=[4,2]
82 #tf.tile(centroids, [N, 1]) = [N*[x1,y1], N*[x2,y2], N*[x3,y3], N*[x4,y4]] shape=[4N,2]
83 #rep_centroids = tf.reshape(tf.tile(centroids, [N,1]), [N,K,2]) = [ [N*[x1,y1]] , [N*[x2,y2]] , [N*[x3,y3]] , [N*[x4,y4]] ]
84 #The condition of stopping process is : "Centroids stop changing" :: did_assignments_change
85
86 rep_centroids = tf.reshape(tf.tile(centroids, [N,1]), [N,K,2])
87 rep_points = tf.reshape(tf.tile(points, [1, K]),[N, K, 2])
88 sum_squares = tf.reduce_sum(tf.square(rep_points - rep_centroids), reduction_indices=2)
89 best_centroids = tf.argmin(sum_squares, 1)
90 did_assignments_change = tf.reduce_any(tf.not_equal(best_centroids, cluster_assignments))
91
92 #total=[[all sum of points of group 1], [all sum of points of group 2], [all sum of points of group 3], [all sum of points of group 4]] shape=[4,2]
93 #count=[How many points of each group] shape = [4,1]
94 #total/count = [new centroids] shape = [4,1]
95 def bucket_mean(data, bucket_ids, num_buckets):
96 total = tf.unsorted_segment_sum(data, bucket_ids, num_buckets)
97 count = tf.unsorted_segment_sum(tf.ones_like(data), bucket_ids, num_buckets)
98 return total/count
99
100 means = bucket_mean(points, best_centroids, K)
101
102 #Do update
103 with tf.control_dependencies([did_assignments_change]):
104 do_updates = tf.group(centroids.assign(means), cluster_assignments.assign(best_centroids))
105
106 changed = True
107 iters = 0
108 fig, ax = plt.subplots()
109 if(DATA_TYPE == "blobs"):
110 colourindexes = [2,1,4,3]
111 else:
112 colourindexes = [2,1]
113
114 while changed and iters < MAX_ITERS:
115 fig, ax = plt.subplots()
116 iters +=1
117 [changed, _] = sess.run([did_assignments_change, do_updates])
118 [centers, assignments] = sess.run([centroids, cluster_assignments])
119 ax.scatter(sess.run(points).transpose()[0], sess.run(points).transpose()[1], marker = 'o', s = 200, c = assignments, cmap=plt.cm.coolwarm)
120 ax.scatter(centers[:,0], centers[:,1], marker = '^', s = 550, c=colourindexes, cmap=plt.cm.plasma)
121 ax.set_title("Iteration " + str(iters))
122 plt.savefig("kmeans" + str(iters) + ".png")
123
124 ax.scatter(sess.run(points).transpose()[0], sess.run(points).transpose()[1], marker='o', s=200, c=assignments, cmap=plt.cm.coolwarm)
125 plt.show()
126 end = time.time()
127 print("Found in %.2f seconds" %(end-start), iters, "iterations")
128 print("Centroids: ")
129 print(centers)
130 print("Cluster assignment", assignments) | 95 - warning: redefined-outer-name
6 - warning: unused-import
|
1 from sklearn import datasets
2 from sklearn.model_selection import train_test_split
3 from sklearn.neighbors import KNeighborsClassifier
4
5
6 iris = datasets.load_iris()
7 iris_X = iris.data
8 iris_y = iris.target
9
10 print("=====data=====")
11 print(iris_X)
12 print("===============")
13 print("data length : " + str(len(iris_X)))
14 print("====target====")
15 print(iris_y)
16 print("===============")
17 print("target length : " + str(len(iris_y)))
18 print("===============")
19 X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3)
20
21 print(y_train)
22
23 knn = KNeighborsClassifier()
24 knn.fit(X_train, y_train)
25
26 print(knn.predict(X_test))
27 print(y_test) | Clean Code: No Issues Detected
|
1 class Trinangle:
2 def __init__(self,base,height):
3 self.base = base
4 self.height = height
5 def calculate_area(self):
6 area = 0.5 * self.base * self.height
7 print(f"Base: {self.base}, Height: {self.height}","Area = ",area)
8
9 t1 = Trinangle(10,20)
10 t1.calculate_area()
11 t2 = Trinangle(20,30)
12 t2.calculate_area() | 1 - refactor: too-few-public-methods
|
1 #Map Function
2
3 def square(a):
4 return a*a
5
6 num = [1,2,3,4,5]
7 result = list(map(square,num))
8 print(result)
9
10 # Filter function
11
12 num = [1,2,3,4,5]
13
14 result = list(filter(lambda x: x%2==0,num))
15 print(result)
| Clean Code: No Issues Detected
|
1 def add(a,b):
2 sum = a+b
3 return sum
4 result = add(20,30)
5 print("Result = ",result) | 2 - warning: redefined-builtin
|
1 #Stack
2 """
3 books = []
4 books.append("Learn C")
5 books.append("Learn C++")
6 books.append("Learn Java")
7 print(books)
8 books.pop()
9 print("Now the top book is :",books[-1])
10 print(books)
11 books.pop()
12 print("Now the top book is :",books[-1])
13 print(books)
14 books.pop()
15 if not books:
16 print("No books left")
17
18 """
19 #Queue
20 from collections import deque
21 bank = deque(["Alex","Sabuj","Sonia","Moeen"])
22 print(bank)
23 bank.popleft()
24 print(bank)
25 bank.popleft()
26 bank.popleft()
27 bank.popleft()
28 if not bank:
29 print("no person left") | Clean Code: No Issues Detected
|
1 # Using string Function
2 """
3 sampleStr = "Emma is good developer. Emma is a writer"
4 cnt = sampleStr.count("Emma")
5 print("Emma appeared",cnt,"times")
6 """
7
8 #Without Using String function
9
10 def count_emma(str):
11 print("Given String : ",str)
12 count = 0
13 for i in range(len(str) -1):
14 count += str[i: i+4] == 'Emma'
15 return count
16 count = count_emma("Emma is good devveloper. Emma is a writer")
17 print("Emma appeared ",count,"times") | 10 - warning: redefined-builtin
12 - warning: redefined-outer-name
|
1 """
2 num2 = int(input("Enter a number: "))
3 result = 20 / num2
4 print(result)
5 print("Done")
6 """
7 """
8 text = "Alex"
9 print(text)
10 print("Done")
11 """
12 """
13 try:
14 list = [20,0,32]
15 result = list[0] / list[3]
16 print(result)
17 print("Done")
18 except ZeroDivisionError:
19 print("Dividing by zero is not possible ")
20 except IndexError:
21 print("Index Error")
22 finally:
23 print("Thanks!!!!!!!!!!")
24 """
25 #Multiple exception hangle
26 """
27 try:
28 num1 = int(input("Enter First Number: "))
29 num2 = int(input("Enter the Second Number: "))
30 result = num1/num2
31 print(result)
32 except (ValueError,ZeroDivisionError):
33 print("You have entered incorrect input.")
34 finally:
35 print("Thanks!!!!!!!")
36 """
37 def voter (age):
38 if age < 18:
39 raise ValueError("Invalid Voter")
40 return "You are Allowed to vote"
41 try:
42 print(voter(17))
43 except ValueError as e:
44 print(e) | 7 - warning: pointless-string-statement
12 - warning: pointless-string-statement
26 - warning: pointless-string-statement
|
1 num = [1,2,3,4,5]
2
3 #[expression for item in list]
4
5 result = [x for x in num if x%2==0]
6 print(result)
| Clean Code: No Issues Detected
|
1 #Multi level inheritance
2
3 """
4 class A:
5 def display1(self):
6 print("I am inside A class")
7
8 class B(A):
9 def display2(self):
10 print("I am inside B class")
11
12 class C(B):
13 def display3(self):
14 super().display1()
15 super().display2()
16 print("I am inside C class")
17
18 ob1 = C()
19 ob1.display3()
20 """
21
22 #Multiple inheritance
23
24 class A:
25 def display(self):
26 print("I am inside A class")
27
28 class B:
29 def display(self):
30 print("I am inside B class")
31
32 class C(B,A):
33 pass
34
35 ob1 = C()
36 ob1.display()
| 24 - refactor: too-few-public-methods
28 - refactor: too-few-public-methods
32 - refactor: too-few-public-methods
|
1 def mergeList(list1, list2):
2 print("First List ", list1)
3 print("Second List ", list2)
4 thirdList = []
5 for num in list1:
6 if (num % 2 != 0):
7 thirdList.append(num)
8 for num in list2:
9 if (num % 2 == 0):
10 thirdList.append(num)
11 return thirdList
12 list1 = [10, 20, 35, 11, 27]
13 list2 = [13, 43, 33, 12, 24]
14
15 print("Result List is ", mergeList(list1, list2)) | 1 - warning: redefined-outer-name
1 - warning: redefined-outer-name
|
1
2 n = 3
3 for i in range(n + 1):
4 print((2 * i - 1) * " *")
| Clean Code: No Issues Detected
|
1 # 2 kinds of funmctions
2 """
3 Library -> print(), input()
4
5 userdefine -> make your own need
6 """
7 def add(a,b):
8 sum = a+b
9 print(sum)
10 def sub(x,y):
11 sub = x-y
12 print(sub)
13 add(10,15)
14 sub(15,7)
15 def message():
16 print("No parameter")
17 message()
| 8 - warning: redefined-builtin
11 - warning: redefined-outer-name
|
1 num = list(range(10))
2 print(num)
3 print(num[2])
4
5 num = list(range(2,5))
6 print(num)
7
8 num = list(range(2,101,2))
9 print(num) | Clean Code: No Issues Detected
|
1
2 def char(str):
3 for i in range(0, len(str), 1):
4 print("index[",i,"]", str[i])
5 str = input("Enter any name: ")
6
7 print("Print Single Charecter: ")
8 char(str)
9
10 """
11
12 def printEveIndexChar(str):
13 for i in range(0, len(str)-1, 2):
14 print("index[",i,"]", str[i] )
15
16 inputStr = "pynative"
17 print("Orginal String is ", inputStr)
18
19 print("Printing only even index chars")
20 printEveIndexChar(inputStr)
21
22 """ | 5 - warning: redefined-builtin
2 - warning: redefined-outer-name
10 - warning: pointless-string-statement
|
1 class Student:
2 roll = ""
3 gpa = ""
4 def __init__(self,roll,gpa):
5 self.roll = roll
6 self.gpa = gpa
7 def display(self):
8 print(f"Roll: {self.roll}, GPA: {self.gpa}")
9
10 rahim = Student(464,4.50)
11 rahim.display()
12
13 karim = Student(525,4.98)
14 karim.display() | 1 - refactor: too-few-public-methods
|
1 students = (
2
3 ("Alex Biswas",21,3.46),
4 ("Sabuj Chandra Das",22,3.69),
5 ("Ahad Islam Moeen",22,3.46),
6 )
7
8 print(students[0:]) | Clean Code: No Issues Detected
|
1 #Parents class , Super class, Base class
2 class Phone:
3 def call(self):
4 print("You can Call")
5
6 def message(self):
7 print("You can Message")
8
9 #Child class, Sub class, Derived class
10 class Samsung(Phone):
11 def photo(self):
12 print("You can Take Photo")
13
14 s = Samsung()
15 s.call()
16 s.message()
17 s.photo()
18
19 print(issubclass(Phone,Samsung)) | Clean Code: No Issues Detected
|
1
2 studentid = {
3
4 464 : "Alex Biswas",
5 525 : "Sabuj Chandra Das",
6 957 : "Sonia Akter",
7 770 : "Tasni Tasnim Nilima",
8 }
9 print(studentid.get(525,"Not a valid key")) | Clean Code: No Issues Detected
|
1 file = open("Hello.html","w")
2
3 file.write("<h1> This is a text</h1>")
4
5 file.close() | 1 - warning: unspecified-encoding
1 - refactor: consider-using-with
|
1 num1 = {1,2,3,4,5}
2 num2 = set([4,5,6])
3 num2.add(7)
4 num2.remove(4)
5 print(num1 | num2)
6 print(num1 & num2)
7 print(num1 - num2) | Clean Code: No Issues Detected
|
1 class Student:
2 roll = " "
3 gpa = " "
4
5 rahim = Student()
6 print(isinstance(rahim,Student))
7 rahim.roll = 101
8 rahim.gpa = 3.95
9 print(f"Roll: {rahim.roll}, GPA: {rahim.gpa}")
10
11 karim = Student()
12 print(isinstance(karim,Student))
13 karim.roll = 102
14 karim.gpa = 4.85
15 print(f"Roll: {karim.roll}, GPA: {karim.gpa}") | 1 - refactor: too-few-public-methods
|
1 """
2 import re
3 pattern = r"colour"
4 text = r"My favourite colour is Red"
5 match = re.search(pattern,text)
6 if match:
7 print(match.start())
8 print(match.end())
9 print(match.span())
10
11 """
12
13
14 #Search And Replace
15
16 """
17 import re
18 pattern = r"colour"
19 text = r"My favourite colour is Red. I love blue colour as well"
20 text1 = re.sub(pattern,"color",text,count=1)
21 print(text1)
22 """
23 #Metacharecter
24
25 import re
26 pattern = r"[A-Z] [a-z] [0-9]"
27
28 if re.match(pattern,"Ag0"):
29 print("Matched") | 16 - warning: pointless-string-statement
|
1 from area import *
2
3 rectangle_area(25,6)
4 triangle_area(10,15) | 1 - warning: wildcard-import
3 - error: undefined-variable
4 - error: undefined-variable
|
1 import random
2
3 for x in range(1,6):
4 guessNumber = int(input("Enter your guess between 1 to 5 : "))
5 randomNumber = random.randint(1,5)
6
7 if guessNumber == randomNumber:
8 print("You have won")
9 else:
10 print("You have loss", randomNumber) | Clean Code: No Issues Detected
|
1 """
2 def calculate(a,b):
3 return a*a + 2*a*b + b*b
4 lambda parameter : a*a + 2*a*b + b*b
5 print(calculate(2,3))
6 """
7 a = (lambda a,b : a*a + 2*a*b + b*b) (2,3)
8 print(a)
9 #another
10 def cube(x):
11 return x*x*x
12 a = (lambda x : x*x*x) (3)
13 print(a) | Clean Code: No Issues Detected
|
1 import pyttsx3
2 friend = pyttsx3.init()
3 friend.say('I can speak now')
4 friend.runAndWait() | Clean Code: No Issues Detected
|
1 #xargs
2 """
3 def student(id,name):
4 print(id,name)
5 student(191,"Alex Biswas")
6 """
7
8 """
9 def student(*details):
10 print(details)
11 student(191,"Alex",3.46)
12 student(192,"Alex",3.46)
13 """
14
15 """
16 def add(*numbers):
17 sum = 0
18 for num in numbers:
19 sum = sum + num
20 print(sum)
21 add(10,15)
22 add(10,15,20)
23 add(10,15,20,25)
24 """
25 #xxagrs
26 def student(**details):
27 print(details)
28
29 student(id=191,name="Alex") | 8 - warning: pointless-string-statement
15 - warning: pointless-string-statement
|
1 def isFirstLastsame(numl):
2 print("Given List is ",numl)
3 firstElement = numl[0]
4 lastElement = numl[-1]
5 if (firstElement == lastElement):
6 return True
7 else:
8 return False
9 numl = [10,15,12,17,19]
10 print("Result is ",isFirstLastsame(numl)) | 1 - warning: redefined-outer-name
5 - refactor: simplifiable-if-statement
5 - refactor: no-else-return
|
1
2 num = [10,20,30,40,50]
3 print(num)
4 """
5 index = 0
6 n = len(num)
7 while index<n:
8 print(num[index])
9 index = index+1
10 """
11 sum = 0
12 for x in num:
13 sum = sum+x
14 print(sum) | 11 - warning: redefined-builtin
4 - warning: pointless-string-statement
|
1
2 num = list(range(10))
3 print(num)
4
5 num = list(range(10))
6 j | 6 - warning: pointless-statement
6 - error: undefined-variable
|
1 #Regular method
2 """
3 a = 20
4 b = 15
5 print("a = ",a)
6 print("b = ",b)
7 temp = a #temp = 20
8 a = b #a = 15
9 b = temp # b = 15
10
11 print("After Swapping")
12 print("a = ",a)
13 print("b = ",b)
14 """
15 #Python Special Method
16 a = 20
17 b = 15
18 print("a = ",a)
19 print("b = ",b)
20 a, b = b, a
21 print("After Swapping")
22 print("a = ",a)
23 print("b = ",b) | Clean Code: No Issues Detected
|
1 def multiplication_or_sum(num1,num2):
2 product = num1 * num2
3 if product <= 1000:
4 return product
5 else:
6 return num1 + num2
7
8 num1 = int(input("Enter 1st integer number: "))
9 num2 = int(input("Enter 2nd integer number: "))
10 print("\n")
11 result = multiplication_or_sum(num1, num2)
12 print("The result is ", result)
| 1 - warning: redefined-outer-name
1 - warning: redefined-outer-name
3 - refactor: no-else-return
|
1 number = 7536
2 print("Given number", number)
3 while (number > 0):
4 digit = number % 10
5 number = number // 10
6 print(digit, end = " ") | Clean Code: No Issues Detected
|
1 file = open("student.txt","r")
2 #print(file.readable())
3 #text = file.read()
4 #print(text)
5 #size = len(text)
6 #print(size)
7 #text = file.readlines()
8 for line in file:
9 print(line)
10 #print(text)
11 file.close() | 1 - warning: unspecified-encoding
1 - refactor: consider-using-with
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.