code
stringlengths 20
13.2k
| label
stringlengths 21
6.26k
|
---|---|
1 # Merge revenue with managers on 'city': merge_by_city
2 merge_by_city = pd.merge(revenue, managers, on='city')
3
4 # Print merge_by_city
5 print(merge_by_city)
6
7 # Merge revenue with managers on 'branch_id': merge_by_id
8 merge_by_id = pd.merge(revenue, managers, on='branch_id')
9
10 # Print merge_by_id
11 print(merge_by_id)
12
13 # Add 'state' column to revenue: revenue['state']
14 revenue['state'] = ['TX','CO','IL','CA']
15
16 # Add 'state' column to managers: managers['state']
17 managers['state'] = ['TX','CO','CA', 'MO']
18
19 # Merge revenue & managers on 'branch_id', 'city', & 'state': combined
20 combined = pd.merge(revenue, managers, on=['branch_id', 'city','state'])
21
22 # Print combined
23 print(combined)
24
25 #matching columns are suffixed with _x, _y. This can be changed with 'suffixes = [..., ...]' arg
26
27
28 o2o = pd.merge(left=site, right=visited, left_on='name', right_on='site')
29 #This will handle 1-to-1, many-to-1 and many-to-many merges
30
31 # Merge revenue and sales: revenue_and_sales
32 revenue_and_sales = pd.merge(revenue, sales, how='right',on=['city', 'state'])
33
34 # Print revenue_and_sales
35 print(revenue_and_sales)
36
37 # Merge sales and managers: sales_and_managers
38 sales_and_managers = pd.merge(sales, managers, how='left',left_on=['city', 'state'], right_on=['branch', 'state'])
39
40 # Print sales_and_managers
41 print(sales_and_managers) | 2 - error: undefined-variable
2 - error: undefined-variable
2 - error: undefined-variable
8 - error: undefined-variable
8 - error: undefined-variable
8 - error: undefined-variable
14 - error: undefined-variable
17 - error: undefined-variable
20 - error: undefined-variable
20 - error: undefined-variable
20 - error: undefined-variable
28 - error: undefined-variable
28 - error: undefined-variable
28 - error: undefined-variable
32 - error: undefined-variable
32 - error: undefined-variable
32 - error: undefined-variable
38 - error: undefined-variable
38 - error: undefined-variable
38 - error: undefined-variable
|
1 import numpy as np
2
3 np.mean(data)
4 np.median(data)
5 np.var(versicolor_petal_length)
6 np.std(versicolor_petal_length)
7
8 #covariance matrix:
9 # returns a 2D array where entries [0,1] and [1,0] are the covariances.
10 # Entry [0,0] is the variance of the data in x, and entry [1,1] is the variance of the data in y
11 np.cov(versicolor_petal_length, versicolor_petal_width)
12
13 def pearson_r(x, y):
14 """Compute Pearson correlation coefficient between two arrays."""
15 # Compute correlation matrix: corr_mat
16 corr_mat = np.corrcoef(x,y)
17
18 # Return entry [0,1]
19 return corr_mat[0,1]
20
21 # Compute Pearson correlation coefficient for I. versicolor: r
22 r = pearson_r(versicolor_petal_length, versicolor_petal_width)
23
24 # Print the result
25 print(r)
26
27
28 # Specify array of percentiles: percentiles
29 percentiles = np.array([2.5, 25, 50, 75, 97.5])
30
31 # Compute percentiles: ptiles_vers
32 ptiles_vers= np.percentile(versicolor_petal_length, percentiles)
33
34 # Print the result
35 print(ptiles_vers)
36
37
38 # Create box plot with Seaborn's default settings
39 _ = sns.boxplot(x='species', y='petal length (cm)', data=df)
40
41 # Label the axes
42 plt.xlabel('species')
43 plt.ylabel('petal length (cm)')
44
45 # Show the plot
46 plt.show()
| 3 - error: undefined-variable
4 - error: undefined-variable
5 - error: undefined-variable
6 - error: undefined-variable
11 - error: undefined-variable
11 - error: undefined-variable
22 - error: undefined-variable
22 - error: undefined-variable
32 - error: undefined-variable
39 - error: undefined-variable
39 - error: undefined-variable
42 - error: undefined-variable
43 - error: undefined-variable
46 - error: undefined-variable
|
1 # Import figure from bokeh.plotting
2 from bokeh.plotting import figure
3
4 # Import output_file and show from bokeh.io
5 from bokeh.io import output_file, show
6
7 # Create the figure: p
8 p = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)')
9
10 # Add a circle glyph to the figure p
11 p.circle(fertility,female_literacy)
12
13 # Call the output_file() function and specify the name of the file
14 output_file('fert_lit.html')
15
16 # Display the plot
17 show(p)
18
19
20
21 # Create the figure: p
22 p = figure(x_axis_label='fertility', y_axis_label='female_literacy (% population)')
23
24 # Add a circle glyph to the figure p
25 p.circle(fertility_latinamerica, female_literacy_latinamerica, size=10, alpha=0.8, color='blue')
26
27 # Add an x glyph to the figure p
28 p.x(fertility_africa, female_literacy_africa)
29
30 # Specify the name of the file
31 output_file('fert_lit_separate.html')
32
33 # Display the plot
34 show(p)
35
36
37
38 #lines
39 # Import figure from bokeh.plotting
40 from bokeh.plotting import figure
41
42 # Create a figure with x_axis_type="datetime": p
43 p = figure(x_axis_type='datetime', x_axis_label='Date', y_axis_label='US Dollars')
44
45 # Plot date along the x axis and price along the y axis
46 p.line(date, price)
47 p.circle(date, price, fill_color='white', size=4)
48
49 # Specify the name of the output file and show the result
50 output_file('line.html')
51 show(p)
52
53
54 #patches
55 # Create a list of az_lons, co_lons, nm_lons and ut_lons: x
56 x = [az_lons, co_lons, nm_lons, ut_lons]
57
58 # Create a list of az_lats, co_lats, nm_lats and ut_lats: y
59 y = [az_lats, co_lats, nm_lats, ut_lats]
60
61 # Add patches to figure p with line_color=white for x and y
62 p.patches(x,y, line_color='white')
63
64 # Specify the name of the output file and show the result
65 output_file('four_corners.html')
66 show(p) | 11 - error: undefined-variable
11 - error: undefined-variable
25 - error: undefined-variable
25 - error: undefined-variable
28 - error: undefined-variable
28 - error: undefined-variable
40 - warning: reimported
46 - error: undefined-variable
46 - error: undefined-variable
47 - error: undefined-variable
47 - error: undefined-variable
56 - error: undefined-variable
56 - error: undefined-variable
56 - error: undefined-variable
56 - error: undefined-variable
59 - error: undefined-variable
59 - error: undefined-variable
59 - error: undefined-variable
59 - error: undefined-variable
|
1 # Add the first circle glyph to the figure p
2 p.circle('fertility', 'female_literacy', source=latin_america, size=10, color='red', legend='Latin America')
3
4 # Add the second circle glyph to the figure p
5 p.circle('fertility', 'female_literacy', source=africa, size=10, color='blue', legend='Africa')
6
7 # Assign the legend to the bottom left: p.legend.location
8 p.legend.location = 'bottom_left'
9
10 # Fill the legend background with the color 'lightgray': p.legend.background_fill_color
11 p.legend.background_fill_color='lightgray'
12
13 # Specify the name of the output_file and show the result
14 output_file('fert_lit_groups.html')
15 show(p)
16
17
18 # Import HoverTool from bokeh.models
19 from bokeh.models import HoverTool
20
21 # Create a HoverTool object: hover
22 hover = HoverTool(tooltips=[('Country','@Country')])
23
24 # Add the HoverTool object to figure p
25 p.add_tools(hover)
26
27 # Specify the name of the output_file and show the result
28 output_file('hover.html')
29 show(p) | 2 - error: undefined-variable
2 - error: undefined-variable
5 - error: undefined-variable
5 - error: undefined-variable
8 - error: undefined-variable
11 - error: undefined-variable
14 - error: undefined-variable
15 - error: undefined-variable
15 - error: undefined-variable
25 - error: undefined-variable
28 - error: undefined-variable
29 - error: undefined-variable
29 - error: undefined-variable
|
1 from flask_restful import Resource, reqparse
2 from werkzeug.security import safe_str_cmp
3 from flask_jwt_extended import (
4 create_access_token,
5 create_refresh_token,
6 jwt_refresh_token_required,
7 get_jwt_identity
8 )
9 from models.user import UserModel
10
11
12 class UserRegister(Resource):
13 parser = reqparse.RequestParser()
14 parser.add_argument('username',
15 type=str,
16 required=True,
17 help="This field cannot be blank."
18 )
19 parser.add_argument('password',
20 type=str,
21 required=True,
22 help="This field cannot be blank."
23 )
24
25 def post(self):
26 data = UserRegister.parser.parse_args()
27
28 if UserModel.find_by_username(data['username']):
29 return {"message": "A user with that username already exists"}, 400
30
31 user = UserModel(**data)
32 user.save_to_db()
33
34 return {"message": "User created successfully."}, 201
35
36 class User(Resource):
37 @classmethod
38 def get(cls, user_id):
39 user = UserModel.find_by_id(user_id)
40
41 if user is None:
42 return {'message' :'user not found'}, 404
43 else:
44 return user.json()
45
46
47 @classmethod
48 def delete(cls, user_id):
49 user = UserModel.find_by_id(user_id)
50
51 if user is None:
52 return {'message' : 'User not found'}, 404
53 else:
54 user.delete_from_db()
55 return {'message' : 'User deleted'}
56
57 class UserLogin(Resource):
58 parser = reqparse.RequestParser()
59 parser.add_argument('username',
60 type=str,
61 required=True,
62 help="This field cannot be blank."
63 )
64 parser.add_argument('password',
65 type=str,
66 required=True,
67 help="This field cannot be blank."
68 )
69
70 def post(self):
71 data = self.parser.parse_args()
72
73 user = UserModel.find_by_username(data['username'])
74
75 #This is what 'authenticate()' used to do
76 if user is not None and safe_str_cmp(user.password, data['password']):
77 #What the 'identity()' function used to do
78 access_token = create_access_token(identity = user.id, fresh = True)
79 refresh_token = create_refresh_token(user.id)
80
81 return {
82 'access_token' : access_token,
83 'refresh_token' : refresh_token
84 }, 200
85 else:
86 return {'message' : 'Invalid credentials'}, 401
87
88 class TokenRefresh(Resource):
89 @jwt_refresh_token_required
90 def post(self):
91 current_user = get_jwt_identity()
92 new_token = create_access_token(identity = current_user, fresh = False)
93 return {'access_token' : new_token}, 200 | 37 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
41 - warning: bad-indentation
42 - warning: bad-indentation
43 - warning: bad-indentation
44 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
49 - warning: bad-indentation
51 - warning: bad-indentation
52 - warning: bad-indentation
53 - warning: bad-indentation
54 - warning: bad-indentation
55 - warning: bad-indentation
58 - warning: bad-indentation
59 - warning: bad-indentation
64 - warning: bad-indentation
70 - warning: bad-indentation
71 - warning: bad-indentation
73 - warning: bad-indentation
76 - warning: bad-indentation
78 - warning: bad-indentation
79 - warning: bad-indentation
81 - warning: bad-indentation
85 - warning: bad-indentation
86 - warning: bad-indentation
89 - warning: bad-indentation
90 - warning: bad-indentation
91 - warning: bad-indentation
92 - warning: bad-indentation
93 - warning: bad-indentation
12 - refactor: too-few-public-methods
41 - refactor: no-else-return
51 - refactor: no-else-return
76 - refactor: no-else-return
57 - refactor: too-few-public-methods
88 - refactor: too-few-public-methods
|
1 #Link the ranges with panning
2 # Link the x_range of p2 to p1: p2.x_range
3 p2.x_range = p1.x_range
4
5 # Link the y_range of p2 to p1: p2.y_range
6 p2.y_range = p1.y_range
7
8 # Link the x_range of p3 to p1: p3.x_range
9 p3.x_range = p1.x_range
10
11 # Link the y_range of p4 to p1: p4.y_range
12 p4.y_range = p1.y_range
13
14 # Specify the name of the output_file and show the result
15 output_file('linked_range.html')
16 show(layout)
17
18
19
20 #Link selection
21 # Create ColumnDataSource: source
22 source = ColumnDataSource(data)
23
24 # Create the first figure: p1
25 p1 = figure(x_axis_label='fertility (children per woman)', y_axis_label='female literacy (% population)',
26 tools='box_select,lasso_select')
27
28 # Add a circle glyph to p1
29 p1.circle('fertility', 'female literacy', source=source)
30
31 # Create the second figure: p2
32 p2 = figure(x_axis_label='fertility (children per woman)', y_axis_label='population (millions)',
33 tools='box_select,lasso_select')
34
35 # Ad
36 p2.circle('fertility', 'population', source=source)
37
38
39 # Create row layout of figures p1 and p2: layout
40 layout = row(p1, p2)
41
42 # Specify the name of the output_file and show the result
43 output_file('linked_brush.html')
44 show(layout) | 3 - error: used-before-assignment
3 - error: used-before-assignment
9 - error: undefined-variable
12 - error: undefined-variable
15 - error: undefined-variable
16 - error: undefined-variable
16 - error: used-before-assignment
22 - error: undefined-variable
22 - error: undefined-variable
25 - error: undefined-variable
32 - error: undefined-variable
40 - error: undefined-variable
43 - error: undefined-variable
44 - error: undefined-variable
|
1 # Convert the sex column to type 'category'
2 tips.sex = tips.sex.astype('category') #converting to categorical vars helps with memory and further analysis
3
4 # Convert the smoker column to type 'category'
5 tips.smoker = tips.smoker.astype('category')
6
7 # Print the info of tips
8 print(tips.info())
9
10 #sometimes we may need to tell python how to deal with values it can't convert
11 tips['total_bill'] = pd.to_numeric(tips['total_bill'], errors='coerce')
| 2 - error: undefined-variable
2 - error: undefined-variable
5 - error: undefined-variable
5 - error: undefined-variable
8 - error: undefined-variable
11 - error: undefined-variable
11 - error: undefined-variable
11 - error: undefined-variable
|
1 SELECT * FROM table
2
3 SELECT COUNT(*) FROM table
4 #counts number of rows
5
6 SELECT DISTINCT row FROM table
7 #selects unique entries in row
8
9 SELECT COUNT(row) FROM table
10 #counts non-null entries
11
12 SELECT COUNT(DISTINCT row) FROM table
13 #returns count of distinct entries
14
15 SELECT * FROM table
16 WHERE column_value = 'some_value' #Use boolean operators, note that <> is !=
17
18 SELECT * FROM table
19 WHERE column1 = 'some_value' AND/OR column2 > some_value;
20
21 SELECT * FROM table
22 WHERE column BETWEEN value1 AND value2;
23 #Returns a range (inclusive)
24
25 SELECT * FROM table
26 WHERE column IN ('...', '....', '....')
27 #use this instead of multiple ORs
28
29 SELECT * FROM table
30 WHERE column IS NULL\IS NOT NULL
31 #filter column on null\not null values
32
33 SELECT * FROM table
34 WHERE column LIKE 'Data%'
35 # % wildcard matches none, one or many
36
37 SELECT * FROM table
38 WHERE column NOT LIKE 'Data%'
39 # % wildcard matches none, one or many. Here we return all entrie that DON'T match
40
41 SELECT * FROM table
42 WHERE column LIKE 'Data_'
43 # _ wildcard matches a single char
44
45
46 ###AGGREGATION####
47
48 SELECT SUM(column) FROM table #AVG, MIN, MAX
49
50 SELECT (col1 + col2)*3 AS new_col FROM table #Note: (3/2) = 1, (3.0/2.0) = 1.5
51
52 #Can combine aggregations with arithmetic
53
54
55 ####ORDERING####
56 SELECT column FROM table
57 ORDER BY col1 DESC
58
59 #NOTE comes after WHERE clauses
60
61
62 ###GROUPING###
63
64 SELECT col1, COUNT(col2) FROM table
65 GROUP BY col1
66 #NOTE can't SELECT a column that isn't the GROUP BY, unless we aggregate it
67
68
69 ###HAVING###
70
71 SELECT column FROM table
72 HAVING AVG(col1) > ...
73
74 ###FULL EG###
75 SELECT release_year, AVG(budget) AS avg_budget, AVG(gross) AS avg_gross FROM films
76 WHERE release_year > 1990
77 GROUP BY release_year
78 HAVING AVG(budget) > 60000000
79 ORDER BY avg_gross DESC
80
81 SELECT country, AVG(budget) AS avg_budget, AVG(gross) AS avg_gross FROM films
82 GROUP BY country
83 HAVING COUNT(title) > 10
84 ORDER BY country
85 LIMIT 5 | 1 - error: syntax-error
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Tue Aug 7 11:20:39 2018
4
5 @author: Damien
6 """
7
| Clean Code: No Issues Detected
|
1 #Melting data is the process of turning columns of your data into rows of data.
2 airquality_melt = pd.melt(airquality_melt, id_vars=['Month', 'Day'])
3 #id_vars = columns not wishing to melt
4 #value_vars = columns wishing to melt (deafult to all not in id_vars)
5
6 #Pivoting data is the opposite of melting it.
7 airquality_pivot = airquality_melt.pivot_table(index=["Month", "Day"], columns="measurement", values="reading")
8 #columns="measurement" : columns to pivot
9 #values="reading" : values to fill columns with
10
11 #the above create a heirarchical header format. To fix this:
12 airquality_pivot_reset = airquality_pivot.reset_index()
13
14 #Often there are duplicate values, these can be handled as follows:
15 airquality_pivot = airquality_dup.pivot_table(index=['Month', 'Day'], columns='measurement', values='reading', aggfunc=np.mean)
16 #where the mean is taken
17
18 #Note in the below that Series atributes and functions are accessed on the .str function
19 # Melt ebola: ebola_melt
20 ebola_melt = pd.melt(ebola, id_vars=['Date', 'Day'], var_name='type_country', value_name='counts')
21
22 # Create the 'str_split' column
23 ebola_melt['str_split'] = ebola_melt.type_country.str.split('_')
24
25 # Create the 'type' column
26 ebola_melt['type'] = ebola_melt.str_split.str.get(0)
27
28 # Create the 'country' column
29 ebola_melt['country'] = ebola_melt.str_split.str.get(1)
30
31 # Print the head of ebola_melt
32 print(ebola_melt.head()) | 2 - error: undefined-variable
2 - error: used-before-assignment
15 - error: undefined-variable
15 - error: undefined-variable
20 - error: undefined-variable
20 - error: undefined-variable
|
1 def ecdf(data):
2 """Compute ECDF for a one-dimensional array of measurements."""
3 # Number of data points: n
4 n = len(data)
5
6 # x-data for the ECDF: x
7 x = np.sort(data)
8
9 # y-data for the ECDF: y
10 y = np.arange(1, n+1) / n
11
12 return x, y
13
14 # Compute ECDF for versicolor data: x_vers, y_vers
15 x_vers, y_vers = ecdf(versicolor_petal_length)
16
17 # Generate plot
18 _ = plt.plot(x_vers, y_vers, marker='.', linestyle='none')
19
20 # Label the axes
21 plt.xlabel('versicolor_petal_length')
22 plt.ylabel('ECDF')
23
24 # Display the plot
25 plt.show() | 7 - error: undefined-variable
10 - error: undefined-variable
15 - error: undefined-variable
18 - error: undefined-variable
21 - error: undefined-variable
22 - error: undefined-variable
25 - error: undefined-variable
|
1 # Extract selected columns from weather as new DataFrame: temps_f
2 temps_f = weather[['Min TemperatureF', 'Mean TemperatureF', 'Max TemperatureF']]
3
4 # Convert temps_f to celsius: temps_c
5 temps_c = (temps_f - 32) * 5/9 #broadcasting
6
7 # Rename 'F' in column names with 'C': temps_c.columns
8 temps_c.columns = ['Min TemperatureC', 'Mean TemperatureC', 'Max TemperatureC']
9
10 # Print first 5 rows of temps_c
11 print(temps_c.head())
12
13
14 import pandas as pd
15
16 # Read 'GDP.csv' into a DataFrame: gdp
17 gdp = pd.read_csv('GDP.csv', index_col='DATE', parse_dates=True)
18
19 # Slice all the gdp data from 2008 onward: post2008
20 post2008 = gdp.loc['2008':, :]
21
22 # Print the last 8 rows of post2008
23 print(post2008.tail(8))
24 VALUE
25 DATE
26 2014-07-01 17569.4
27 2014-10-01 17692.2
28 2015-01-01 17783.6
29 2015-04-01 17998.3
30 2015-07-01 18141.9
31 2015-10-01 18222.8
32 2016-01-01 18281.6
33 2016-04-01 18436.5
34
35 # Resample post2008 by year, keeping last(): yearly
36 yearly = post2008.resample('A').last()
37
38 # Print yearly
39 print(yearly)
40 VALUE
41 DATE
42 2014-07-01 17569.4
43 2014-10-01 17692.2
44 2015-01-01 17783.6
45 2015-04-01 17998.3
46 2015-07-01 18141.9
47 2015-10-01 18222.8
48 2016-01-01 18281.6
49 2016-04-01 18436.5
50
51 # Compute percentage growth of yearly: yearly['growth']
52 yearly['growth'] = yearly.pct_change()*100
53
54 # Print yearly again
55 print(yearly)
56 VALUE growth
57 DATE
58 2008-12-31 14549.9 NaN
59 2009-12-31 14566.5 0.114090
60 2010-12-31 15230.2 4.556345
61 2011-12-31 15785.3 3.644732
62 2012-12-31 16297.3 3.243524
63 2013-12-31 16999.9 4.311144
64 2014-12-31 17692.2 4.072377
65 2015-12-31 18222.8 2.999062
66 2016-12-31 18436.5 1.172707
67
68 # Import pandas
69 import pandas as pd
70
71 # Read 'sp500.csv' into a DataFrame: sp500
72 sp500 = pd.read_csv('sp500.csv', index_col='Date', parse_dates=True)
73
74 # Read 'exchange.csv' into a DataFrame: exchange
75 exchange = pd.read_csv('exchange.csv', index_col='Date', parse_dates=True)
76
77 # Subset 'Open' & 'Close' columns from sp500: dollars
78 dollars = sp500[['Open', 'Close']]
79
80 # Print the head of dollars
81 print(dollars.head())
82
83 # Convert dollars to pounds: pounds
84 pounds = dollars.multiply(exchange['GBP/USD'], axis='rows')
85 #NOTE: similar add(), subtract(), divide() methods. These offer more flexibility than using standard +, -, / operators
86
87 # Print the head of pounds
88 print(pounds.head()) | 24 - error: syntax-error
|
1 # Seed random number generator
2 np.random.seed(42)
3
4 # Compute mean no-hitter time: tau
5 tau = np.mean(nohitter_times)
6
7 # Draw out of an exponential distribution with parameter tau: inter_nohitter_time
8 inter_nohitter_time = np.random.exponential(tau, 100000)
9
10 # Plot the PDF and label axes
11 _ = plt.hist(inter_nohitter_time,
12 bins=50, normed=True, histtype='step')
13 _ = plt.xlabel('Games between no-hitters')
14 _ = plt.ylabel('PDF')
15
16 # Show the plot
17 plt.show()
18
19 #Verigy using cdf
20 # Create an ECDF from real data: x, y
21 x, y = ecdf(nohitter_times)
22
23 # Create a CDF from theoretical samples: x_theor, y_theor
24 x_theor, y_theor = ecdf(inter_nohitter_time)
25
26 # Overlay the plots
27 plt.plot(x=x_theor, y=y_theor)
28 plt.plot(x=x, y=y, marker='.', linestyle='none')
29
30 # Margins and axis labels
31 plt.margins(0.02)
32 plt.xlabel('Games between no-hitters')
33 plt.ylabel('CDF')
34
35 # Show the plot
36 plt.show()
37
38
39 # Plot the theoretical CDFs
40 plt.plot(x_theor, y_theor)
41 plt.plot(x, y, marker='.', linestyle='none')
42 plt.margins(0.02)
43 plt.xlabel('Games between no-hitters')
44 plt.ylabel('CDF')
45
46 # Take samples with half tau: samples_half
47 samples_half = np.random.exponential(tau/2,10000)
48
49 # Take samples with double tau: samples_double
50 samples_double = np.random.exponential(2*tau,10000)
51
52 # Generate CDFs from these samples
53 x_half, y_half = ecdf(samples_half)
54 x_double, y_double = ecdf(samples_double)
55
56 # Plot these CDFs as lines
57 _ = plt.plot(x_half, y_half)
58 _ = plt.plot(x_double, y_double)
59
60 # Show the plot
61 plt.show()
| 2 - error: undefined-variable
5 - error: undefined-variable
5 - error: undefined-variable
8 - error: undefined-variable
11 - error: undefined-variable
13 - error: undefined-variable
14 - error: undefined-variable
17 - error: undefined-variable
21 - error: undefined-variable
21 - error: undefined-variable
24 - error: undefined-variable
27 - error: undefined-variable
28 - error: undefined-variable
31 - error: undefined-variable
32 - error: undefined-variable
33 - error: undefined-variable
36 - error: undefined-variable
40 - error: undefined-variable
41 - error: undefined-variable
42 - error: undefined-variable
43 - error: undefined-variable
44 - error: undefined-variable
47 - error: undefined-variable
50 - error: undefined-variable
53 - error: undefined-variable
54 - error: undefined-variable
57 - error: undefined-variable
58 - error: undefined-variable
61 - error: undefined-variable
|
1 # Import necessary module
2 from sqlalchemy import create_engine
3 import pandas as pd
4
5 # Create engine: engine
6 engine = create_engine('sqlite:///Chinook.sqlite')
7
8 # Save the table names to a list: table_names
9 table_names = engine.table_names()
10
11 # Print the table names to the shell
12 print(table_names)
13
14
15 #Executing a query
16 con = engine.connect()
17
18 # Perform query: rs
19 rs = con.execute("SELECT * from Album")
20
21 # Save results of the query to DataFrame: df
22 df = pd.DataFrame(rs.fetchall())
23 df.columns = rs.keys()
24
25
26 # Close connection
27 con.close()
28
29 #auto close connection
30 with engine.connect() as con:
31 rs = con.execute("SELECT LastName, Title FROM Employee")
32 df = pd.DataFrame(rs.fetchmany(3))
33 df.columns = rs.keys()
34
35
36 #ALTERNATIVELY
37 # Import packages
38 from sqlalchemy import create_engine
39 import pandas as pd
40
41 # Create engine: engine
42 engine = create_engine('sqlite:///Chinook.sqlite')
43
44 # Execute query and store records in DataFrame: df
45 df = pd.read_sql_query('SELECT * FROM Album', engine) | 38 - warning: reimported
39 - warning: reimported
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Tue Jul 24 16:43:47 2018
4
5 @author: Damien
6 """
7
8 from flask import Flask, jsonify, request, render_template
9
10 #NOTE on JSON: JSON are essentially dictionaries but in string format. Thus we need to convert of Python dicts to text
11 app = Flask(__name__) #unique __name__ - special python variable
12 stores = [
13 {
14 'name': 'My Store',
15 'items': [
16 {
17 'name':'My Item',
18 'price':15.99
19 }
20 ]
21 }
22 ]
23
24 @app.route('/')
25 def home():
26 return render_template('index.html') #Looks in template folder
27
28 #POST - recieves data
29 #GET - send data back
30
31 ##End points we are going to define
32 #POST /store data: {name:}
33 @app.route('/store', methods = ['POST']) #default to GET
34 def create_store():
35 request_data = request.get_json()
36 new_store = {
37 'name': request_data['name'],
38 'items' : []
39 }
40
41 stores.append(new_store)
42 return jsonify(new_store)
43
44 #GET /store/<string:name>
45 @app.route('/store/<string:name>') #<string:name> is a flask keyword
46 def get_store(name):
47 for store in stores:
48 if store['name'] == name:
49 return jsonify(store)
50 else:
51 return jsonify({'message' : 'No such store'})
52
53 #GET /store
54 @app.route('/store')
55 def get_stores():
56 return jsonify({'stores' : stores}) #convert list to dictionary
57
58 #POST /store/<string:name>/item {name:, price:}
59 @app.route('/store/<string:name>/item', methods = ['POST']) #default to GET
60 def create_item(name):
61 request_data = request.get_json()
62 for store in stores:
63 if store['name'] == name:
64 new_item = {
65 'name' : request_data['name'],
66 'price' : request_data['price']
67 }
68 store['items'].append(new_item)
69 return jsonify(new_item)
70 else:
71 return jsonify({"message" : " No such store"})
72 #GET /store/<string:name>/item
73 @app.route('/store/<string:name>/item') #<string:name> is a flask keyword
74 def get_item_in_store(name):
75 for store in stores:
76 if store['name'] == name:
77 return jsonify({'items' : store['items']})
78 else:
79 return jsonify({'message' : 'No such store'})
80
81
82 app.run(port=5000) #app runs on port/area of computer that the computer sends and recieces requests
83
84 #run from conda "python app.py"
85 #copy 127.0.0.1:5000 into browswer (127.0.0.1 is the IP reserved fro your computer) | 26 - warning: bad-indentation
35 - warning: bad-indentation
36 - warning: bad-indentation
41 - warning: bad-indentation
42 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
49 - warning: bad-indentation
50 - warning: bad-indentation
51 - warning: bad-indentation
56 - warning: bad-indentation
61 - warning: bad-indentation
62 - warning: bad-indentation
63 - warning: bad-indentation
64 - warning: bad-indentation
68 - warning: bad-indentation
69 - warning: bad-indentation
70 - warning: bad-indentation
71 - warning: bad-indentation
75 - warning: bad-indentation
76 - warning: bad-indentation
77 - warning: bad-indentation
78 - warning: bad-indentation
79 - warning: bad-indentation
48 - refactor: no-else-return
63 - refactor: no-else-return
76 - refactor: no-else-return
|
1
2 # Read in the data file with header=None: df_headers
3 df_headers = pd.read_csv(data_file, header=None)
4
5 # Print the output of df_headers.head()
6 print(df_headers.head())
7
8 # Split on the comma to create a list: column_labels_list
9 column_labels_list = column_labels.split(",")
10
11 # Assign the new column labels to the DataFrame: df.columns
12 df.columns = column_labels_list
13
14 # Remove the appropriate columns: df_dropped
15 df_dropped = df.drop(list_to_drop, axis = 'columns')
16
17 # Print the output of df_dropped.head()
18 print(df_dropped.head())
19
20 # Convert the date column to string: df_dropped['date']
21 df_dropped['date'] = df_dropped['date'].astype(str)
22
23 # Pad leading zeros to the Time column: df_dropped['Time']
24 df_dropped['Time'] = df_dropped['Time'].apply(lambda x:'{:0>4}'.format(x))
25
26 # Concatenate the new date and Time columns: date_string
27 date_string = df_dropped.date + df_dropped.Time
28
29 # Convert the date_string Series to datetime: date_times
30 date_times = pd.to_datetime(date_string, format='%Y%m%d%H%M')
31
32 # Set the index to be the new date_times container: df_clean
33 df_clean = df_dropped.set_index(date_times)
34
35 # Print the dry_bulb_faren temperature between 8 AM and 9 AM on June 20, 2011
36 print(df_clean.loc['2011-Jun-20 08:00':'2011-Jun-20 09:00', 'dry_bulb_faren'])
37
38 # Convert the dry_bulb_faren column to numeric values: df_clean['dry_bulb_faren']
39 df_clean['dry_bulb_faren'] = pd.to_numeric(df_clean['dry_bulb_faren'], errors='coerce')
40
41 # Print the transformed dry_bulb_faren temperature between 8 AM and 9 AM on June 20, 2011
42 print(df_clean.loc['2011-Jun-20 08:00':'2011-Jun-20 09:00', 'dry_bulb_faren'])
43
44 # Convert the wind_speed and dew_point_faren columns to numeric values
45 df_clean['wind_speed'] = pd.to_numeric(df_clean['wind_speed'], errors='coerce')
46 df_clean['dew_point_faren'] = pd.to_numeric(df_clean['dew_point_faren'], errors='coerce') | 3 - error: undefined-variable
3 - error: undefined-variable
9 - error: undefined-variable
12 - error: undefined-variable
15 - error: undefined-variable
15 - error: undefined-variable
24 - warning: unnecessary-lambda
30 - error: undefined-variable
39 - error: undefined-variable
45 - error: undefined-variable
46 - error: undefined-variable
|
1 kind='scatter' uses a scatter plot of the data points
2 kind='reg' uses a regression plot (default order 1)
3 kind='resid' uses a residual plot
4 kind='kde' uses a kernel density estimate of the joint distribution
5 kind='hex' uses a hexbin plot of the joint distribution
6
7 # Generate a joint plot of 'hp' and 'mpg'
8 sns.jointplot(x='hp', y='mpg', data=auto)
9
10 # Generate a joint plot of 'hp' and 'mpg' using a hexbin plot
11 sns.jointplot(x='hp', y='mpg', data=auto, kind='hex')
12
13 # Display the plot
14 plt.show()
15
16 #Plot of all numeric columns against one another
17 # Print the first 5 rows of the DataFrame
18 print(auto.head())
19
20 # Plot the pairwise joint distributions from the DataFrame
21 sns.pairplot(auto, hue='origin', kind='reg')
22
23 # Display the plot
24 plt.show()
25
26 # Print the covariance matrix
27 print(cov_matrix)
28
29 # Visualize the covariance matrix using a heatmap
30 sns.heatmap(cov_matrix)
31
32 # Display the heatmap
33 plt.show() | 1 - error: syntax-error
|
1 # Group titanic by 'pclass'
2 by_class = titanic.groupby('pclass')
3
4 # Aggregate 'survived' column of by_class by count
5 count_by_class = by_class['survived'].count()
6
7 # Print count_by_class
8 print(count_by_class)
9
10 # Group titanic by 'embarked' and 'pclass'
11 by_mult = titanic.groupby(['embarked', 'pclass'])
12
13 # Aggregate 'survived' column of by_mult by count
14 count_mult = by_mult['survived'].count()
15
16 # Print count_mult
17 print(count_mult)
18
19 # Read life_fname into a DataFrame: life
20 life = pd.read_csv(life_fname, index_col='Country')
21
22 # Read regions_fname into a DataFrame: regions
23 regions = pd.read_csv(regions_fname, index_col='Country')
24
25 # Group life by regions['region']: life_by_region. This is doable because of the same indexes
26 life_by_region = life.groupby(regions.region)
27
28 # Print the mean over the '2010' column of life_by_region
29 print(life_by_region['2010'].mean())
30
31
32 # Group titanic by 'pclass': by_class
33 by_class = titanic.groupby('pclass')
34
35 # Select 'age' and 'fare'
36 by_class_sub = by_class[['age','fare']]
37
38 # Aggregate by_class_sub by 'max' and 'median': aggregated
39 aggregated = by_class_sub.agg(['max', 'median'])
40
41 age fare
42 max median max median
43 pclass
44 1 80.0 39.0 512.3292 60.0000
45 2 70.0 29.0 73.5000 15.0458
46 3 74.0 24.0 69.5500 8.0500
47
48 # Print the maximum age in each class
49 print(aggregated.loc[:, ('age','max')])
50 pclass
51 1 80.0
52 2 70.0
53 3 74.0
54 Name: (age, max), dtype: float64
55
56 # Print the median fare in each class
57 print(aggregated.loc[:, ('fare', 'median')])
58 pclass
59 1 80.0
60 2 70.0
61 3 74.0
62 Name: (age, max), dtype: float64
63
64
65
66 # Read the CSV file into a DataFrame and sort the index: gapminder
67 gapminder = pd.read_csv('gapminder.csv', index_col=['Year','region','Country']).sort_index()
68
69 # Group gapminder by 'Year' and 'region': by_year_region
70 by_year_region = gapminder.groupby(level = ['Year', 'region'])
71
72 # Define the function to compute spread: spread
73 def spread(series):
74 return series.max() - series.min()
75
76 # Create the dictionary: aggregator
77 aggregator = {'population':'sum', 'child_mortality':'mean', 'gdp':spread}
78
79 # Aggregate by_year_region using the dictionary: aggregated
80 aggregated = by_year_region.agg(aggregator)
81
82 # Print the last 6 entries of aggregated
83 print(aggregated.tail(6))
84
85
86 # Read file: sales
87 sales = pd.read_csv('sales.csv', index_col='Date', parse_dates=True)
88
89 # Create a groupby object: by_day
90 by_day = sales.groupby(sales.index.strftime('%a'))
91
92 # Create sum: units_sum
93 units_sum = by_day['Units'].sum()
94
95 # Print units_sum
96 print(units_sum) | 41 - error: syntax-error
|
1 #pivot tables aggregate data with duplicate indices
2
3 weekday city visitors signups
4 0 Sun Austin 139 7
5 1 Sun Dallas 237 12
6 2 Mon Austin 326 3
7 3 Mon Dallas 456 5
8
9 # Create the DataFrame with the appropriate pivot table: by_city_day
10 by_city_day = users.pivot_table(index = 'weekday', columns = "city")
11
12 # Print by_city_day
13 print(by_city_day)
14 signups visitors
15 city Austin Dallas Austin Dallas
16 weekday
17 Mon 3 5 326 456
18 Sun 7 12 139 237
19
20 # Use a pivot table to display the count of each column: count_by_weekday1
21 count_by_weekday1 = users.pivot_table(index='weekday', aggfunc='count')
22
23 # Print count_by_weekday
24 print(count_by_weekday1)
25 city signups visitors
26 weekday
27 Mon 2 2 2
28 Sun 2 2 2
29
30 # Replace 'aggfunc='count'' with 'aggfunc=len': count_by_weekday2
31 count_by_weekday2 = users.pivot_table(index='weekday', aggfunc=len)
32
33 # Create the DataFrame with the appropriate pivot table: signups_and_visitors
34 signups_and_visitors = users.pivot_table(index = "weekday", aggfunc=sum)
35
36 # Print signups_and_visitors
37 print(signups_and_visitors)
38 signups visitors
39 weekday
40 Mon 8 782
41 Sun 19 376
42
43 # Add in the margins: signups_and_visitors_total
44 signups_and_visitors_total = users.pivot_table(index = "weekday", aggfunc=sum, margins=True)
45
46 # Print signups_and_visitors_total
47 print(signups_and_visitors_total)
48 signups visitors
49 weekday
50 Mon 8 782
51 Sun 19 376
52 All 27 1158 | 3 - error: syntax-error
|
1 # Import package
2 from urllib.request import urlretrieve
3
4 # Import pandas
5 import pandas as pd
6
7 # Assign url of file: url
8 url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
9
10 # Save file locally
11 urlretrieve(url, 'winequality-red.csv')
12
13 # Read file into a DataFrame and print its head
14 df = pd.read_csv('winequality-red.csv', sep=';')
15 #Alternatively
16 df = pd.read_csv(url, sep = ";") #does not save the file locally
17
18 #If file is an excel file
19 xl = pd.read_excel(url, sheetname = None)
20
21 # Print the sheetnames to the shell
22 print(xl.keys())
23
24 # Print the head of the first sheet (using its name, NOT its index)
25 print(xl['1700'].head())
26
27
28 ##HTTP requests
29 # Import packages
30 from urllib.request import urlopen, Request
31
32 # Specify the url
33 url = "http://www.datacamp.com/teach/documentation"
34
35 # This packages the request: request
36 request = Request(url)
37
38 # Sends the request and catches the response: response
39 response = urlopen(request)
40
41 # Print the datatype of response
42 print(type(response))
43
44 # Extract the response: html
45 html = response.read()
46
47 # Be polite and close the response!
48 response.close()
49
50
51 #The requests package simplifies this:
52
53 # Import package
54 import requests
55
56 # Specify the url: url
57 url = "http://www.datacamp.com/teach/documentation"
58
59 # Packages the request, send the request and catch the response: r
60 r = requests.get(url)
61
62 # Extract the response: text
63 text = r.text
64
65 #NO NEED TO CLOSE
66
67 # Print the html
68 print(text) | 60 - warning: missing-timeout
39 - refactor: consider-using-with
|
1 '''This was created after installing virtualenv. This allows use to create a virtual environment that mimics
2 a fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions.
3
4 Run: conda create -n venv python=3.5.0 anaconda
5 to create a virtual env called venv with python 3.5.0
6
7 conda activate venv
8 conda deactivate'''
9
10 from flask import Flask
11 from flask_restful import Api
12 from flask_jwt import JWT
13
14 from security import authenticate, identity
15 from user import UserRegister
16 from item import Item, ItemList
17
18 app = Flask(__name__)
19 app.secret_key = "secret_key" #this should be long and complicated in a production sense
20 api = Api(app)
21
22 jwt = JWT(app, authenticate, identity)
23 '''
24 JWT creates an endpoint /auth. When we call /auth we send a username and password, which is passed on to authenticate and identity
25 If authenticate returns a user, and that is the identity and the /auth endpoint returns a JWT
26 The JWT calls the identity function which gets the correct id and returns the user
27 '''
28
29
30 api.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name
31 api.add_resource(ItemList, '/items')
32 api.add_resource(UserRegister, '/register')
33
34 if __name__ == '__main__': #This ensures that this is not run if app.py is imported, but only when called
35 app.run(port=5000, debug=True) #debug gives better error messages | 35 - warning: bad-indentation
|
1 # Import package
2 import tweepy
3
4 # Store OAuth authentication credentials in relevant variables
5 access_token = "1092294848-aHN7DcRP9B4VMTQIhwqOYiB14YkW92fFO8k8EPy"
6 access_token_secret = "X4dHmhPfaksHcQ7SCbmZa2oYBBVSD2g8uIHXsp5CTaksx"
7 consumer_key = "nZ6EA0FxZ293SxGNg8g8aP0HM"
8 consumer_secret = "fJGEodwe3KiKUnsYJC3VRndj7jevVvXbK2D5EiJ2nehafRgA6i"
9
10 # Pass OAuth details to tweepy's OAuth handler
11 auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
12 auth.set_access_token(access_token, access_token_secret)
13
14
15
16 ####################################################
17 #Need to define a Stream Listener class
18 class MyStreamListener(tweepy.StreamListener):
19 def __init__(self, api=None):
20 super(MyStreamListener, self).__init__()
21 self.num_tweets = 0
22 self.file = open("tweets.txt", "w")
23
24 def on_status(self, status):
25 tweet = status._json
26 self.file.write( json.dumps(tweet) + '\n' )
27 self.num_tweets += 1
28 if self.num_tweets < 100:
29 return True
30 else:
31 return False
32 self.file.close()
33
34 def on_error(self, status):
35 print(status)
36 #####################################################
37
38 # Initialize Stream listener
39 l = MyStreamListener()
40
41 # Create your Stream object with authentication
42 stream = tweepy.Stream(auth, l)
43
44
45 # Filter Twitter Streams to capture data by the keywords:
46 stream.filter(track = ['clinton', 'trump', 'sanders', 'cruz'])
47
48
49
50 #Once the twitter data is sitting locally:
51 # Import package
52 import json
53
54 # String of path to file: tweets_data_path
55 tweets_data_path = "tweets.txt"
56
57 # Initialize empty list to store tweets: tweets_data
58 tweets_data = []
59
60 # Open connection to file
61 tweets_file = open(tweets_data_path, "r")
62
63 # Read in tweets and store in list: tweets_data
64 for line in tweets_file:
65 tweet = json.loads(line)
66 tweets_data.append(tweet)
67
68 # Close connection to file
69 tweets_file.close()
70
71 # Import package
72 import pandas as pd
73
74 # Build DataFrame of tweet texts and languages
75 df = pd.DataFrame(tweets_data, columns=['text', 'lang'])
76
77 # Print head of DataFrame
78 print(df.head())
| 20 - refactor: super-with-arguments
22 - warning: unspecified-encoding
22 - refactor: consider-using-with
19 - warning: unused-argument
25 - warning: redefined-outer-name
25 - warning: protected-access
28 - refactor: simplifiable-if-statement
28 - refactor: no-else-return
61 - warning: unspecified-encoding
61 - refactor: consider-using-with
|
1 # Create a figure with the "box_select" tool: p
2 p = figure(x_axis_label='Year', y_axis_label='Time', tools='box_select')
3
4 # Add circle glyphs to the figure p with the selected and non-selected properties
5 p.circle('Year', 'Time', source=source,selection_color='red', nonselection_alpha=0.1)
6
7 # Specify the name of the output file and show the result
8 output_file('selection_glyph.html')
9 show(p)
10
11
12
13 # import the HoverTool
14 from bokeh.models import HoverTool
15
16 # Add circle glyphs to figure p
17 p.circle(x, y, size=10,
18 fill_color='grey', alpha=0.1, line_color=None,
19 hover_fill_color='firebrick', hover_alpha=0.5,
20 hover_line_color='white')
21
22 # Create a HoverTool: hover
23 hover = HoverTool(tooltips=None, mode='vline')
24
25 # Add the hover tool to the figure p
26 p.add_tools(hover)
27
28 # Specify the name of the output file and show the result
29 output_file('hover_glyph.html')
30 show(p)
31
32
33
34 #Import CategoricalColorMapper from bokeh.models
35 from bokeh.models import CategoricalColorMapper
36
37 # Convert df to a ColumnDataSource: source
38 source = ColumnDataSource(df)
39
40 # Make a CategoricalColorMapper object: color_mapper
41 color_mapper = CategoricalColorMapper(factors=['Europe', 'Asia', 'US'],
42 palette=['red', 'green', 'blue'])
43
44 # Add a circle glyph to the figure p
45 p.circle('weight', 'mpg', source=source,
46 color=dict(field='origin', transform=color_mapper),
47 legend='origin')
48
49 # Specify the name of the output file and show the result
50 output_file('colormap.html')
51 show(p)
| 2 - error: undefined-variable
5 - error: used-before-assignment
8 - error: undefined-variable
9 - error: undefined-variable
17 - error: undefined-variable
17 - error: undefined-variable
29 - error: undefined-variable
30 - error: undefined-variable
38 - error: undefined-variable
38 - error: undefined-variable
46 - refactor: use-dict-literal
50 - error: undefined-variable
51 - error: undefined-variable
|
1 #melting restores pivoted dfs
2
3 visitors = pd.melt(visitors_by_city_weekday, id_vars=['weekday'], value_name='visitors')
4 #id_vars specify columns to maintain
5 #value_names specify name of column containing the values
6
7 # Set the new index: users_idx
8 users_idx = users.set_index(['city', 'weekday'])
9
10 # Print the users_idx DataFrame
11 print(users_idx)
12 visitors signups
13 city weekday
14 Austin Sun 139 7
15 Dallas Sun 237 12
16 Austin Mon 326 3
17 Dallas Mon 456 5
18 # Obtain the key-value pairs: kv_pairs
19 kv_pairs = pd.melt(users_idx, col_level=0)
20
21 # Print the key-value pairs
22 print(kv_pairs)
23 variable value
24 0 visitors 139
25 1 visitors 237
26 2 visitors 326
27 3 visitors 456
28 4 signups 7
29 5 signups 12
30 6 signups 3
31 7 signups 5 | 12 - error: syntax-error
|
1 #stack does something similar to pivot using the indices
2 # Unstack users by 'weekday': byweekday
3
4 users =
5 visitors signups
6 city weekday
7 Austin Mon 326 3
8 Sun 139 7
9 Dallas Mon 456 5
10 Sun 237 12
11
12 byweekday = users.unstack(level = 'weekday')
13
14 # Print the byweekday DataFrame
15 print(byweekday)
16 visitors signups
17 weekday Mon Sun Mon Sun
18 city
19 Austin 326 139 3 7
20 Dallas 456 237 5 12
21
22 # Stack byweekday by 'weekday' and print it
23 print(byweekday.stack(level = 'weekday'))
24 visitors signups
25 city weekday
26 Austin Mon 326 3
27 Sun 139 7
28 Dallas Mon 456 5
29 Sun 237 12
30
31 # Stack 'city' back into the index of bycity: newusers
32 newusers = bycity.stack(level = "city")
33
34 # Swap the levels of the index of newusers: newusers
35 newusers = newusers.swaplevel(0,1)
36
37 # Print newusers and verify that the index is not sorted
38 print(newusers)
39
40 # Sort the index of newusers: newusers
41 newusers = newusers.sort_index() | 4 - error: syntax-error
|
1 '''This was created after installing virtualenv. This allows use to create a virtual environment that mimics
2 a fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions.
3
4 Run: conda create -n venv python=3.5.0 anaconda
5 to create a virtual env called venv with python 3.5.0
6
7 conda activate venv
8 conda deactivate'''
9
10 from flask import Flask, request
11 from flask_restful import Resource, Api, reqparse
12 from flask_jwt import JWT, jwt_required
13
14 from security import authenticate, identity
15
16 app = Flask(__name__)
17 app.secret_key = "secret_key" #this should be long and complicated in a production sense
18 api = Api(app)
19
20 jwt = JWT(app, authenticate, identity)
21 '''
22 JWT creates an endpoint /auth. When we call /auth we send a username and password, which is passed on to authenticate and identity
23 If authenticate returns a user, and that is the identity and the /auth endpoint returns a JWT
24 The JWT calls the identity function which gets the correct id and returns the user
25 '''
26
27 items = []
28
29 class Item(Resource):
30 parser = reqparse.RequestParser() #This prevents code duplication and now belongs to the Item class
31 parser.add_argument('price',
32 type = float,
33 required = True,
34 help = "This field cannot be left blank")
35
36
37 @jwt_required()
38 def get(self, name):
39 item = next(filter(lambda x: x['name'] == name, items), None) #if next produces nothing, return None
40 return {"item" : item}, 200 if item is not None else 404
41
42 def post(self, name):
43 #Note that the 'Header' and 'Body' need to be set
44 if next(filter(lambda x: x['name'] == name, items), None) is not None:
45 return {"message" : "an item with name '{}' already exists.".format(name)}, 400 #400 = bad request
46
47 data = Item.parser.parse_args()
48 #data = request.get_json() #args: force:Forces the content header, silent: returns None (generally don't use)
49 item = {'name' : name, 'price' : data['price']}
50 items.append(item)
51
52 return item, 201 #201 is code for created
53
54 def delete(self, name):
55 global items
56 items = list(filter(lambda x : x['name'] != name, items))
57 return {"message" : "Item deleted"}
58
59 def put(slef, name):
60 # parser = reqparse.RequestParser() #reqparse allows us to specify which items in the JSON payload can be used for the variable updates
61 # parser.add_argument('price', #we add which arguments we can allow through. The request gets run through the parser
62 # type = float,
63 # required = True,
64 # help = "This field cannot be left blank") #and many more!
65 data = Item.parser.parse_args() #any args other than "price" will just get erased
66 #data = request.get_json() #this is sone in the above
67
68 #print(data['another']) --- this would return an error, even if 'another' was in the json payload as by this point it has been removed by the parser
69
70 item = next(filter(lambda x: x['name'] == name, items), None)
71
72 if item is None:
73 item = {"name" : name, "price" : data['price']}
74 items.append(item)
75 else:
76 item.update(data) #Note, item is a reference to the items entry and so will be updated there as well
77 print(items)
78 return item
79
80 class ItemList(Resource):
81 def get(self):
82 return{"items" : items}
83
84 api.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name
85 api.add_resource(ItemList, '/items')
86 app.run(port=5000, debug=True) #debug gives better error messages | 30 - warning: bad-indentation
31 - warning: bad-indentation
37 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
42 - warning: bad-indentation
44 - warning: bad-indentation
45 - warning: bad-indentation
47 - warning: bad-indentation
49 - warning: bad-indentation
50 - warning: bad-indentation
52 - warning: bad-indentation
54 - warning: bad-indentation
55 - warning: bad-indentation
56 - warning: bad-indentation
57 - warning: bad-indentation
59 - warning: bad-indentation
65 - warning: bad-indentation
70 - warning: bad-indentation
72 - warning: bad-indentation
73 - warning: bad-indentation
74 - warning: bad-indentation
75 - warning: bad-indentation
76 - warning: bad-indentation
77 - warning: bad-indentation
78 - warning: bad-indentation
81 - warning: bad-indentation
82 - warning: bad-indentation
55 - warning: global-statement
59 - error: no-self-argument
80 - refactor: too-few-public-methods
10 - warning: unused-import
|
1 #Dataframes are made up of Series objects. Each Series is labelled 1D numpy array
2
3 import pandas as pd
4 #df is some DataFrame
5 df.head()
6 df.tail()
7
8 df.iloc[1, :]
9 df.loc['row_index', :]
10
11 #to return column info
12 df.info()
13
14 #to convert DataFrame to numpy array:
15 df.values
16
17 #note though that many numpy methods work on pandas dfs
18
19 ########
20 #creating Dataframes from scratch
21 ########
22
23 d = {"col1" :[1,3,4,5], "col2" : [4,5,6,7]}
24 df = pd.DataFrame(d)
25
26 col1 = [1, 3, 5, 6]
27 col2 = [6, 7, 8, 9]
28
29 cols = [col1, col2]
30 indices = ["col1", "col2"]
31
32 d = zip(indices, cols)
33 d = dict(list(d))
34 df = pd.DataFramed
35
36 df.columns = ["newcol1", "newcol2"]
37
38 #Broadcasting
39 df['col3'] = "M"
40
41 d = {"col1" : [1, 3, 4, 5], "col2" : "M"}
42 df = pd.DataFrame(d) #Broadcasts col2 | 5 - error: used-before-assignment
8 - warning: pointless-statement
9 - warning: pointless-statement
15 - warning: pointless-statement
34 - error: no-member
|
1 # Import row from bokeh.layouts
2 from bokeh.layouts import row, column
3
4 # Create the first figure: p1
5 p1 = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)')
6
7 # Add a circle glyph to p1
8 p1.circle('fertility', 'female_literacy', source=source)
9
10 # Create the second figure: p2
11 p2 = figure(x_axis_label='population', y_axis_label='female_literacy (% population)')
12
13 # Add a circle glyph to p2
14 p2.circle('population', 'female_literacy', source=source)
15
16 # Put p1 and p2 into a horizontal row: layout
17 layout = row(p1,p2)
18 #layout = column(p1, p2)
19
20 # Specify the name of the output_file and show the result
21 output_file('fert_row.html')
22 show(layout)
23
24
25 # Import column and row from bokeh.layouts
26 from bokeh.layouts import row, column
27
28 # Make a column layout that will be used as the second row: row2
29 row2 = column([mpg_hp, mpg_weight], sizing_mode='scale_width')
30
31 # Make a row layout that includes the above column layout: layout
32 layout = row([avg_mpg, row2], sizing_mode='scale_width')
33
34 # Specify the name of the output_file and show the result
35 output_file('layout_custom.html')
36 show(layout)
37
38
39
40 # Import gridplot from bokeh.layouts
41 from bokeh.layouts import gridplot
42
43 # Create a list containing plots p1 and p2: row1
44 row1 = [p1, p2]
45
46 # Create a list containing plots p3 and p4: row2
47 row2 = [p3, p4]
48
49 # Create a gridplot using row1 and row2: layout
50 layout = gridplot([row1, row2])
51
52 # Specify the name of the output_file and show the result
53 output_file('grid.html')
54 show(layout)
55
56
57
58 #TABS
59 # Import Panel from bokeh.models.widgets
60 from bokeh.models.widgets import Panel
61
62 # Create tab1 from plot p1: tab1
63 tab1 = Panel(child=p1, title='Latin America')
64
65 # Create tab2 from plot p2: tab2
66 tab2 = Panel(child=p2, title='Africa')
67
68 # Create tab3 from plot p3: tab3
69 tab3 = Panel(child=p3, title='Asia')
70
71 # Create tab4 from plot p4: tab4
72 tab4 = Panel(child=p4, title='Europe')
73
74 # Import Tabs from bokeh.models.widgets
75 from bokeh.models.widgets import Tabs
76
77 # Create a Tabs layout: layout
78 layout = Tabs(tabs=[tab1, tab2, tab3, tab4])
79
80 # Specify the name of the output_file and show the result
81 output_file('tabs.html')
82 show(layout) | 5 - error: undefined-variable
8 - error: undefined-variable
11 - error: undefined-variable
14 - error: undefined-variable
21 - error: undefined-variable
22 - error: undefined-variable
26 - warning: reimported
26 - warning: reimported
29 - error: undefined-variable
29 - error: undefined-variable
32 - error: undefined-variable
35 - error: undefined-variable
36 - error: undefined-variable
47 - error: undefined-variable
47 - error: undefined-variable
53 - error: undefined-variable
54 - error: undefined-variable
69 - error: undefined-variable
72 - error: undefined-variable
81 - error: undefined-variable
82 - error: undefined-variable
|
1
2 # Build a query to count the distinct states values: stmt
3 stmt = select([func.count(census.columns.state.distinct())])
4
5 # Execute the query and store the scalar result: distinct_state_count
6 distinct_state_count = connection.execute(stmt).scalar()
7
8 # Print the distinct_state_count
9 print(distinct_state_count)
10
11 # Import func
12 from sqlalchemy import func
13
14 # Build a query to select the state and count of ages by state: stmt
15 stmt = select([census.columns.state, func.count(census.columns.age)])
16
17 # Group stmt by state
18 stmt = stmt.group_by(census.columns.state)
19
20 # Execute the statement and store all the records: results
21 results = connection.execute(stmt).fetchall()
22
23 # Print results
24 print(results)
25
26 # Print the keys/column names of the results returned
27 print(results[0].keys())
28
29
30 # Import func
31 from sqlalchemy import func
32
33 # Build an expression to calculate the sum of pop2008 labeled as population
34 pop2008_sum = func.sum(census.columns.pop2008).label('population')
35
36 # Build a query to select the state and sum of pop2008: stmt
37 stmt = select([census.columns.state, pop2008_sum])
38
39 # Group stmt by state
40 stmt = stmt.group_by(census.columns.state)
41
42 # Execute the statement and store all the records: results
43 results = connection.execute(stmt).fetchall()
44
45 # Print results
46 print(results)
47
48 # Print the keys/column names of the results returned
49 print(results[0].keys()) | 3 - error: undefined-variable
3 - error: used-before-assignment
3 - error: undefined-variable
6 - error: undefined-variable
15 - error: undefined-variable
15 - error: undefined-variable
15 - error: undefined-variable
18 - error: undefined-variable
21 - error: undefined-variable
31 - warning: reimported
34 - error: undefined-variable
37 - error: undefined-variable
37 - error: undefined-variable
40 - error: undefined-variable
43 - error: undefined-variable
|
1 # -*- coding: utf-8 -*-
2 """
3 Created on Tue Jul 24 15:04:52 2018
4
5 @author: Damien
6 """
7
8 from flask import Flask
9
10 app = Flask(__name__) #unique __name__ - special python variable
11
12 #What requests we need to understand
13 @app.route('/') #http://www.google.com/ - '/' represents the home page [http://www.google.com/maps represents a '/maps' endpoint]
14 def home(): #whatever this does it must return a reponse to the browser
15 return "Hello, world!"
16
17 app.run(port=5000) #app runs on port/area of computer that the computer sends and recieces requests
18
19 #run from conda "python app.py"
20 #copy 127.0.0.1:5000 into browswer (127.0.0.1 is the IP reserved fro your computer) | 15 - warning: bad-indentation
|
1
2 for i in range(50):
3 # Generate bootstrap sample: bs_sample
4 bs_sample = np.random.choice(rainfall, size=len(rainfall))
5
6 # Compute and plot ECDF from bootstrap sample
7 x, y = ecdf(bs_sample)
8 _ = plt.plot(x=x, y=y, marker='.', linestyle='none',
9 color='gray', alpha=0.1)
10
11 # Compute and plot ECDF from original data
12 x, y = ecdf(rainfall)
13 _ = plt.plot(x=x, y=y, marker='.')
14
15 # Make margins and label axes
16 plt.margins(0.02)
17 _ = plt.xlabel('yearly rainfall (mm)')
18 _ = plt.ylabel('ECDF')
19
20 # Show the plot
21 plt.show()
22
23
24 def draw_bs_reps(data, func, size=1):
25 """Draw bootstrap replicates."""
26
27 # Initialize array of replicates: bs_replicates
28 bs_replicates = np.empty(size)
29
30 # Generate replicates
31 for i in range(size):
32 bs_replicates[i] = bootstrap_replicate_1d(data, func) #applies func to bootstrap sample
33
34 return bs_replicates
35
36 # Take 10,000 bootstrap replicates of the mean: bs_replicates
37 bs_replicates = draw_bs_reps(rainfall,np.mean,10000)
38
39 # Compute and print SEM
40 sem = np.std(rainfall) / np.sqrt(len(rainfall))
41 print(sem)
42
43 # Compute and print standard deviation of bootstrap replicates
44 bs_std = np.std(bs_replicates)
45 print(bs_std)
46
47 # Make a histogram of the results
48 _ = plt.hist(bs_replicates, bins=50, normed=True)
49 _ = plt.xlabel('mean annual rainfall (mm)')
50 _ = plt.ylabel('PDF')
51
52 # Show the plot
53 plt.show()
54
55
56
57 # Draw bootstrap replicates of the mean no-hitter time (equal to tau): bs_replicates
58 bs_replicates = draw_bs_reps(nohitter_times, np.mean, 10000)
59
60 # Compute the 95% confidence interval: conf_int
61 conf_int = np.percentile(bs_replicates, [2.5, 97.5])
62
63 # Print the confidence interval
64 print('95% confidence interval =', conf_int, 'games')
65
66 # Plot the histogram of the replicates
67 _ = plt.hist(bs_replicates, bins=50, normed=True)
68 _ = plt.xlabel(r'$\tau$ (games)')
69 _ = plt.ylabel('PDF')
70
71 # Show the plot
72 plt.show()
| 4 - error: undefined-variable
4 - error: undefined-variable
4 - error: undefined-variable
7 - error: undefined-variable
8 - error: undefined-variable
12 - error: undefined-variable
12 - error: undefined-variable
13 - error: undefined-variable
16 - error: undefined-variable
17 - error: undefined-variable
18 - error: undefined-variable
21 - error: undefined-variable
28 - warning: redefined-outer-name
31 - warning: redefined-outer-name
28 - error: undefined-variable
32 - error: undefined-variable
37 - error: undefined-variable
37 - error: undefined-variable
40 - error: undefined-variable
40 - error: undefined-variable
40 - error: undefined-variable
40 - error: undefined-variable
44 - error: undefined-variable
48 - error: undefined-variable
49 - error: undefined-variable
50 - error: undefined-variable
53 - error: undefined-variable
58 - error: undefined-variable
58 - error: undefined-variable
61 - error: undefined-variable
67 - error: undefined-variable
68 - error: undefined-variable
69 - error: undefined-variable
72 - error: undefined-variable
|
1 #If a df is indexed by date-time, we can perform resampling.
2 #Downsampling is when we go to a lower unit, lower unit being one with fewer units in a period (lowere frequency)
3 #Downsample from hours to days
4
5 #Upsampling is the opposite and will introduce Nana, unless otherwise catered for through filling methods
6
7 # Downsample to 6 hour data and aggregate by mean: df1
8 df1 = df.Temperature.resample('6h').mean()
9
10 # Downsample to daily data and count the number of data points: df2
11 df2 = df.Temperature.resample('D').count()
12
13 # Extract temperature data for August: august
14 august = df.Temperature.loc['2010-08']
15
16 # Downsample to obtain only the daily highest temperatures in August: august_highs
17 august_highs = august.resample('D').max()
18
19 # Extract temperature data for February: february
20 february = df.Temperature.loc['2010-02']
21
22 # Downsample to obtain the daily lowest temperatures in February: february_lows
23 february_lows = february.resample('D').min()
24
25 # Extract data from 2010-Aug-01 to 2010-Aug-15: unsmoothed
26 unsmoothed = df['Temperature']['2010-Aug-01':'2010-Aug-15']
27
28 # Apply a rolling mean with a 24 hour window: smoothed
29 smoothed = unsmoothed.rolling(window=24).mean()
30
31 # Create a new DataFrame with columns smoothed and unsmoothed: august
32 august = pd.DataFrame({'smoothed':smoothed, 'unsmoothed':unsmoothed})
33
34 # Plot both smoothed and unsmoothed data using august.plot().
35 august.plot()
36 plt.show()
| 8 - error: undefined-variable
11 - error: undefined-variable
14 - error: undefined-variable
20 - error: undefined-variable
26 - error: undefined-variable
32 - error: undefined-variable
36 - error: undefined-variable
|
1 #To read in
2 df = pd.read_csv('data.csv', parse_dates=True, index_col='Date)
3
4 # Prepare a format string: time_format
5 time_format = '%Y-%m-%d %H:%M'
6
7 # Convert date_list into a datetime object: my_datetimes
8 my_datetimes = pd.to_datetime(date_list, format=time_format)
9
10 # Construct a pandas Series using temperature_list and my_datetimes: time_series
11 time_series = pd.Series(temperature_list, index=my_datetimes)
12
13 # Extract the hour from 9pm to 10pm on '2010-10-11': ts1
14 ts1 = ts0.loc['2010-10-11 21:00:00':'2010-10-11 22:00:00']
15
16 # Extract '2010-07-04' from ts0: ts2
17 ts2 = ts0.loc['2010-07-04']
18
19 # Extract data from '2010-12-15' to '2010-12-31': ts3
20 ts3 = ts0.loc['2010-12-15':'2010-12-31']
21
22 #Sometimes we may wnat to reindex a df using the timeseries index of another df.
23 #python fills in non-matching indices with Nan values
24 # Reindex without fill method: ts3
25 ts3 = ts2.reindex(ts1.index)
26
27 # Reindex with fill method, using forward fill: ts4
28 ts4 = ts2.reindex(ts1.index, method='ffill') | 2 - error: syntax-error
|
1 #indexing as:
2 df[['...', '....']]
3 #returns a DataFrame
4
5 p_counties = election.loc['Perry':'Potter', :]
6 # Slice the row labels 'Potter' to 'Perry' in reverse order: p_counties_rev
7 p_counties_rev = election.loc['Potter':'Perry':-1, :]
8
9 # Slice the columns from the starting column to 'Obama': left_columns
10 left_columns = election.loc[:, :'Obama']
11
12 # Print the output of left_columns.head()
13 print(left_columns.head())
14
15 # Slice the columns from 'Obama' to 'winner': middle_columns
16 middle_columns = election.loc[:, 'Obama':'winner']
17
18 # Print the output of middle_columns.head()
19 print(middle_columns.head())
20
21 # Slice the columns from 'Romney' to the end: 'right_columns'
22 right_columns = election.loc[:, 'Romney':]
23
24 #inddexes are immutables, therefore to change it the whole index needs to be overwritten;
25 # Create the list of new indexes: new_idx
26 new_idx = [ind.upper() for ind in sales.index]
27
28 # Assign new_idx to sales.index
29 sales.index = new_idx
30
31 # Assign the string 'MONTHS' to sales.index.name
32 sales.index.name = 'MONTHS'
33
34 # Print the sales DataFrame
35 print(sales)
36
37 # Assign the string 'PRODUCTS' to sales.columns.name
38 sales.columns.name = 'PRODUCTS'
39
40 # Print the sales dataframe again
41 print(sales) | 2 - warning: pointless-statement
2 - error: undefined-variable
5 - error: undefined-variable
7 - error: undefined-variable
10 - error: undefined-variable
16 - error: undefined-variable
22 - error: undefined-variable
26 - error: undefined-variable
29 - error: undefined-variable
32 - error: undefined-variable
35 - error: undefined-variable
38 - error: undefined-variable
41 - error: undefined-variable
|
1 # Construct arrays of data: dems, reps
2 dems = np.array([True] * 153 + [False] * 91)
3 reps = np.array([True] * 136 + [False] * 35)
4
5 def frac_yea_dems(dems, reps):
6 """Compute fraction of Democrat yea votes."""
7 frac = np.sum(dems) / len(dems)
8 return frac
9
10 # Acquire permutation samples: perm_replicates
11 perm_replicates = draw_perm_reps(dems, reps, frac_yea_dems, 10000)
12
13 # Compute and print p-value: p
14 p = np.sum(perm_replicates <= 153/244) / len(perm_replicates)
15 print('p-value =', p)
16
17
18
19 # Compute the difference in mean sperm count: diff_means
20 diff_means = diff_of_means(control, treated)
21
22 # Compute mean of pooled data: mean_count
23 mean_count = np.mean(np.concatenate([control, treated]))
24
25 # Generate shifted data sets
26 control_shifted = control - np.mean(control) + mean_count
27 treated_shifted = treated - np.mean(treated) + mean_count
28
29 # Generate bootstrap replicates
30 bs_reps_control = draw_bs_reps(control_shifted,
31 np.mean, size=10000)
32 bs_reps_treated = draw_bs_reps(treated_shifted,
33 np.mean, size=10000)
34
35 # Get replicates of difference of means: bs_replicates
36 bs_replicates = bs_reps_control- bs_reps_treated
37
38 # Compute and print p-value: p
39 p = np.sum(bs_replicates >= np.mean(control) - np.mean(treated)) \
40 / len(bs_replicates)
41 print('p-value =', p) | 2 - error: undefined-variable
3 - error: undefined-variable
5 - warning: redefined-outer-name
5 - warning: redefined-outer-name
7 - error: undefined-variable
5 - warning: unused-argument
11 - error: undefined-variable
14 - error: undefined-variable
20 - error: undefined-variable
20 - error: undefined-variable
20 - error: undefined-variable
23 - error: undefined-variable
23 - error: undefined-variable
23 - error: undefined-variable
23 - error: undefined-variable
26 - error: undefined-variable
26 - error: undefined-variable
26 - error: undefined-variable
27 - error: undefined-variable
27 - error: undefined-variable
27 - error: undefined-variable
30 - error: undefined-variable
31 - error: undefined-variable
32 - error: undefined-variable
33 - error: undefined-variable
39 - error: undefined-variable
39 - error: undefined-variable
39 - error: undefined-variable
39 - error: undefined-variable
39 - error: undefined-variable
|
1 # Import create_engine
2 from sqlalchemy import create_engine
3
4 # Create an engine that connects to the census.sqlite file: engine
5 engine = create_engine('sqlite:///census.sqlite')# Create an engine to the census database
6 engine = create_engine('mysql+pymysql://'+'student:datacamp'+'@courses.csrrinzqubik.us-east-1.rds.amazonaws.com:3306/'+'census')
7
8 # Print table names
9 print(engine.table_names())
10
11 #Reflection is the process of reading the database and building the metadata
12 #based on that information. It's the opposite of creating a Table by hand and
13 #is very useful for working with existing databases. To perform reflection, you need to import
14 #the Table object from the SQLAlchemy package. Then, you use this Table object to read
15 #your table from the engine and autoload the columns. Using the Table object in this manner
16 #is a lot like passing arguments to a function. For example, to autoload the columns with the engine,
17 #you have to specify the keyword arguments autoload=True and autoload_with=engine to Table().
18
19 # Import Table
20 from sqlalchemy import Table, MetaData
21
22 metadata = MetaData()
23
24 # Reflect census table from the engine: census
25 census = Table('census', metadata, autoload=True, autoload_with=engine)
26
27 # Print the column names
28 print(census.columns.keys())
29
30 # Print full table metadata
31 print(repr(metadata.tables['census']))
32
33 # Print census table metadata
34 print(repr(census)) | Clean Code: No Issues Detected
|
1
2 # Import numpy as np
3 import numpy as np
4
5 # Create array using np.linspace: x
6 x = np.linspace(0,5,100)
7
8 # Create array using np.cos: y
9 y = np.cos(x)
10
11 # Add circles at x and y
12 p.circle(x,y)
13
14 # Specify the name of the output file and show the result
15 output_file('numpy.html')
16 show(p)
17
18
19 #pandas
20 # Import pandas as pd
21 import pandas as pd
22
23 # Read in the CSV file: df
24 df = pd.read_csv('auto.csv')
25
26 # Import figure from bokeh.plottin
27 from bokeh.plotting import figure
28
29 # Create the figure: p
30 p = figure(x_axis_label='HP', y_axis_label='MPG')
31
32 # Plot mpg vs hp by color
33 p.circle( df['hp'], df['mpg'], color=df['color'], size=10)
34
35 # Specify the name of the output file and show the result
36 output_file('auto-df.html')
37 show(p)
38
39
40 #ColumnDataSource
41 # Import the ColumnDataSource class from bokeh.plotting
42 from bokeh.plotting import ColumnDataSource
43
44 # Create a ColumnDataSource from df: source
45 source = ColumnDataSource(df)
46
47 # Add circle glyphs to the figure p
48 p.circle('Year', 'Time', source=source, color='color',size=8)
49
50 # Specify the name of the output file and show the result
51 output_file('sprint.html')
52 show(p)
| 12 - error: used-before-assignment
15 - error: undefined-variable
16 - error: undefined-variable
36 - error: undefined-variable
37 - error: undefined-variable
51 - error: undefined-variable
52 - error: undefined-variable
|
1 #Used for mereging when there is an ordering (eg dates)
2
3 # Perform the first ordered merge: tx_weather
4 tx_weather = pd.merge_ordered(austin, houston)
5
6 # Print tx_weather
7 print(tx_weather)
8
9 # Perform the second ordered merge: tx_weather_suff
10 tx_weather_suff = pd.merge_ordered(austin, houston, on='date', suffixes=['_aus','_hus'])
11
12 # Print tx_weather_suff
13 print(tx_weather_suff)
14
15 # Perform the third ordered merge: tx_weather_ffill
16 tx_weather_ffill = pd.merge_ordered(austin, houston, on='date', suffixes=['_aus','_hus'], fill_method='ffill')
17
18 # Print tx_weather_ffill
19 print(tx_weather_ffill)
20
21 #Similar to pd.merge_ordered(), the pd.merge_asof() function will also merge
22 #values in order using the on column, but for each row in the left DataFrame,
23 #only rows from the right DataFrame whose 'on' column values are less than the
24 #left value will be kept.
25 #This function can be used to align disparate datetime frequencies without having to first resample.
26
27 oil.head()
28 Date Price
29 0 1970-01-01 3.35
30 1 1970-02-01 3.35
31 2 1970-03-01 3.35
32 3 1970-04-01 3.35
33 4 1970-05-01 3.35
34
35 auto.head()
36 mpg cyl displ hp weight accel yr origin \
37 0 18.0 8 307.0 130 3504 12.0 1970-01-01 US
38 1 15.0 8 350.0 165 3693 11.5 1970-01-01 US
39 2 18.0 8 318.0 150 3436 11.0 1970-01-01 US
40 3 16.0 8 304.0 150 3433 12.0 1970-01-01 US
41 4 17.0 8 302.0 140 3449 10.5 1970-01-01 US
42
43 name
44 0 chevrolet chevelle malibu
45 1 buick skylark 320
46 2 plymouth satellite
47 3 amc rebel sst
48 4 ford torino
49
50
51 # Merge auto and oil: merged
52 merged = pd.merge_asof(auto, oil, left_on='yr', right_on='Date')
53
54 # Print the tail of merged
55 print(merged.tail())
56
57 mpg cyl displ hp weight accel yr origin name \
58 387 27.0 4 140.0 86 2790 15.6 1982-01-01 US ford mustang gl
59 388 44.0 4 97.0 52 2130 24.6 1982-01-01 Europe vw pickup
60 389 32.0 4 135.0 84 2295 11.6 1982-01-01 US dodge rampage
61 390 28.0 4 120.0 79 2625 18.6 1982-01-01 US ford ranger
62 391 31.0 4 119.0 82 2720 19.4 1982-01-01 US chevy s-10
63
64 Date Price
65 387 1982-01-01 33.85
66 388 1982-01-01 33.85
67 389 1982-01-01 33.85
68 390 1982-01-01 33.85
69 391 1982-01-01 33.85
70 # Resample merged: yearly
71 yearly = merged.resample('A', on='Date')[['mpg','Price']].mean()
72
73 # Print yearly
74 print(yearly)
75
76 mpg Price
77 Date
78 1970-12-31 17.689655 3.35
79 1971-12-31 21.111111 3.56
80 1972-12-31 18.714286 3.56
81 1973-12-31 17.100000 3.56
82 1974-12-31 22.769231 10.11
83 1975-12-31 20.266667 11.16
84 1976-12-31 21.573529 11.16
85 1977-12-31 23.375000 13.90
86 1978-12-31 24.061111 14.85
87 1979-12-31 25.093103 14.85
88 1980-12-31 33.803704 32.50
89 1981-12-31 30.185714 38.00
90 1982-12-31 32.000000 33.85
91
92 # print yearly.corr()
93 print(yearly.corr())
94
95 mpg Price
96 Date
97 1970-12-31 17.689655 3.35
98 1971-12-31 21.111111 3.56
99 1972-12-31 18.714286 3.56
100 1973-12-31 17.100000 3.56
101 1974-12-31 22.769231 10.11
102 1975-12-31 20.266667 11.16
103 1976-12-31 21.573529 11.16
104 1977-12-31 23.375000 13.90
105 1978-12-31 24.061111 14.85
106 1979-12-31 25.093103 14.85
107 1980-12-31 33.803704 32.50
108 1981-12-31 30.185714 38.00
109 1982-12-31 32.000000 33.85 | 28 - error: syntax-error
|
1
2 # Append names_1981 after names_1881 with ignore_index=True: combined_names
3 combined_names = names_1881.append(names_1981, ignore_index=True)
4 #ignore_index resets the index, else the indices from the original dfs are placed on top of one another
5
6
7 # Concatenate weather_max and weather_mean horizontally: weather
8 weather = pd.concat([weather_max, weather_mean], axis=1)
9 #axis=1 means concat horizontally (this does something similar to a full outer join)
10 Max TemperatureF Mean TemperatureF
11 Apr 89.0 53.100000
12 Aug NaN 70.000000
13 Dec NaN 34.935484
14 Feb NaN 28.714286
15 Jan 68.0 32.354839
16 Jul 91.0 72.870968
17 Jun NaN 70.133333
18 Mar NaN 35.000000
19 May NaN 62.612903
20 Nov NaN 39.800000
21 Oct 84.0 55.451613
22 Sep NaN 63.766667
23
24 for medal in medal_types:
25
26 # Create the file name: file_name
27 file_name = "%s_top5.csv" % medal
28
29 # Create list of column names: columns
30 columns = ['Country', medal]
31
32 # Read file_name into a DataFrame: df
33 medal_df = pd.read_csv(file_name, header=0, index_col='Country', names=columns) #names sets the column names
34
35 # Append medal_df to medals
36 medals.append(medal_df)
37
38 # Concatenate medals horizontally: medals
39 medals = pd.concat(medals, axis='columns') #same as axis=1
40
41 # Print medals
42 print(medals)
43
44
45 #using multi level indexes:
46 for medal in medal_types:
47
48 file_name = "%s_top5.csv" % medal
49
50 # Read file_name into a DataFrame: medal_df
51 medal_df = pd.read_csv(file_name, index_col='Country')
52
53 # Append medal_df to medals
54 medals.append(medal_df)
55
56 # Concatenate medals: medals
57 medals = pd.concat(medals, axis='rows', keys=['bronze', 'silver', 'gold'])
58
59 # Print medals in entirety
60 print(medals)
61 Total
62 Country
63 bronze United States 1052.0
64 Soviet Union 584.0
65 United Kingdom 505.0
66 France 475.0
67 Germany 454.0
68 silver United States 1195.0
69 Soviet Union 627.0
70 United Kingdom 591.0
71 France 461.0
72 Italy 394.0
73 gold United States 2088.0
74 Soviet Union 838.0
75 United Kingdom 498.0
76 Italy 460.0
77 Germany 407.0
78
79 # Sort the entries of medals: medals_sorted
80 medals_sorted = medals.sort_index(level=0)
81
82 # Print the number of Bronze medals won by Germany
83 print(medals_sorted.loc[('bronze','Germany')])
84
85 # Print data about silver medals
86 print(medals_sorted.loc['silver'])
87
88 # Create alias for pd.IndexSlice: idx
89 #A slicer pd.IndexSlice is required when slicing on the inner level of a MultiIndex
90 idx = pd.IndexSlice
91
92 # Print all the data on medals won by the United Kingdom
93 print(medals_sorted.loc[idx[:,'United Kingdom'], :])
94
95 # Make the list of tuples: month_list
96 month_list = [('january', jan), ('february', feb), ('march', mar)]
97
98 # Create an empty dictionary: month_dict
99 month_dict = {}
100
101 for month_name, month_data in month_list:
102
103 # Group month_data: month_dict[month_name]
104 month_dict[month_name] = month_data.groupby('Company').sum()
105
106 # Concatenate data in month_dict: sales
107 sales = pd.concat(month_dict)
108
109 # Print sales
110 print(sales)
111 Units
112 Company
113 february Acme Coporation 34
114 Hooli 30
115 Initech 30
116 Mediacore 45
117 Streeplex 37
118 january Acme Coporation 76
119 Hooli 70
120 Initech 37
121 Mediacore 15
122 Streeplex 50
123 march Acme Coporation 5
124 Hooli 37
125 Initech 68
126 Mediacore 68
127 Streeplex 40
128 # Print all sales by Mediacore
129 idx = pd.IndexSlice
130 print(sales.loc[idx[:, 'Mediacore'], :]) | 10 - error: syntax-error
|
1 from flask_restful import Resource, reqparse
2 from flask_jwt import jwt_required
3 import sqlite3
4
5
6 class Item(Resource):
7 parser = reqparse.RequestParser() #This prevents code duplication and now belongs to the Item class
8 parser.add_argument('price',
9 type = float,
10 required = True,
11 help = "This field cannot be left blank")
12
13
14 @jwt_required()
15 def get(self, name):
16 item = self.find_by_name(name)
17
18 #http://127.0.0.1:5000/item/wine?price=17 will pass 17 to the args
19 #args = Item.parser.parse_args()
20 #print(args['price'])
21
22
23 if item is not None:
24 return item, 200
25 else:
26 return {"message" : "Item not found"}, 404
27
28 @classmethod
29 def find_by_name(cls, name):
30 connection = sqlite3.connect('data.db')
31 cursor = connection.cursor()
32
33 select_query = "SELECT * FROM items WHERE name = ?"
34 result = cursor.execute(select_query, (name,))
35 item_in_db = result.fetchone()
36 connection.close()
37
38 if item_in_db is not None:
39 return {'item' : {'name' : item_in_db[0], 'price': item_in_db[1]}}
40
41 #We could use the get() method but that requires a JWT
42 #Thus we use the alternative class method
43 def post(self, name):
44
45 item = self.find_by_name(name)
46 if item is not None:
47 return {"message":"item already in database"}, 400
48
49 data = Item.parser.parse_args()
50 item = {'name' : name, 'price': data['price']}
51
52 try:
53 self.insert_item(item)
54 except:
55 return {"message" : "An error occurred"}, 500
56
57 return {'name' : name, 'price' : data['price']}, 201 #201 is code for created
58
59
60 @classmethod
61 def insert_item(cls, item):
62 connection = sqlite3.connect('data.db')
63 cursor = connection.cursor()
64
65 insert_query = "INSERT INTO items VALUES (?, ?)"
66 cursor.execute(insert_query, (item['name'], item['price']))
67
68 connection.commit()
69 connection.close()
70
71 def delete(self, name):
72
73 connection = sqlite3.connect('data.db')
74 cursor = connection.cursor()
75
76 delete_query = "DELETE FROM items WHERE name = ?"
77
78 cursor.execute(delete_query, (name,))
79
80 connection.commit()
81 connection.close()
82
83 return {"message" : "Item deleted"}
84
85 def put(self, name):
86
87 item = self.find_by_name(name)
88 data = Item.parser.parse_args()
89 updated_item = {'name' : name, 'price': data['price']}
90
91 if item is None:
92 try:
93 self.insert_item(updated_item)
94 except:
95 {"message" : "an error occurred"}, 500
96 else:
97 try:
98 self.update(updated_item)
99 except:
100 {"message" : "an error occurred"}, 500
101
102 return updated_item, 201 #201 is code for created
103
104
105 @classmethod
106 def update(cls, item):
107 connection = sqlite3.connect('data.db')
108 cursor = connection.cursor()
109
110 insert_query = "UPDATE items SET price = ? WHERE name = ?"
111 cursor.execute(insert_query, (item['price'], item['name']))
112
113 connection.commit()
114 connection.close()
115
116
117
118 class ItemList(Resource):
119 def get(self):
120 connection = sqlite3.connect('data.db')
121 cursor = connection.cursor()
122
123 query = "SELECT * FROM items"
124
125 result = cursor.execute(query)
126 items = result.fetchall()
127 connection.close()
128
129 if items is not None:
130 return {'items' : items}
131 else:
132 return {"message" : "No items in database"}
133 | 7 - warning: bad-indentation
8 - warning: bad-indentation
14 - warning: bad-indentation
15 - warning: bad-indentation
16 - warning: bad-indentation
23 - warning: bad-indentation
24 - warning: bad-indentation
25 - warning: bad-indentation
26 - warning: bad-indentation
28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
31 - warning: bad-indentation
33 - warning: bad-indentation
34 - warning: bad-indentation
35 - warning: bad-indentation
36 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
43 - warning: bad-indentation
45 - warning: bad-indentation
46 - warning: bad-indentation
47 - warning: bad-indentation
49 - warning: bad-indentation
50 - warning: bad-indentation
52 - warning: bad-indentation
53 - warning: bad-indentation
54 - warning: bad-indentation
55 - warning: bad-indentation
57 - warning: bad-indentation
60 - warning: bad-indentation
61 - warning: bad-indentation
62 - warning: bad-indentation
63 - warning: bad-indentation
65 - warning: bad-indentation
66 - warning: bad-indentation
68 - warning: bad-indentation
69 - warning: bad-indentation
71 - warning: bad-indentation
73 - warning: bad-indentation
74 - warning: bad-indentation
76 - warning: bad-indentation
78 - warning: bad-indentation
80 - warning: bad-indentation
81 - warning: bad-indentation
83 - warning: bad-indentation
85 - warning: bad-indentation
87 - warning: bad-indentation
88 - warning: bad-indentation
89 - warning: bad-indentation
91 - warning: bad-indentation
92 - warning: bad-indentation
93 - warning: bad-indentation
94 - warning: bad-indentation
95 - warning: bad-indentation
96 - warning: bad-indentation
97 - warning: bad-indentation
98 - warning: bad-indentation
99 - warning: bad-indentation
100 - warning: bad-indentation
102 - warning: bad-indentation
105 - warning: bad-indentation
106 - warning: bad-indentation
107 - warning: bad-indentation
108 - warning: bad-indentation
110 - warning: bad-indentation
111 - warning: bad-indentation
113 - warning: bad-indentation
114 - warning: bad-indentation
119 - warning: bad-indentation
120 - warning: bad-indentation
121 - warning: bad-indentation
123 - warning: bad-indentation
125 - warning: bad-indentation
126 - warning: bad-indentation
127 - warning: bad-indentation
129 - warning: bad-indentation
130 - warning: bad-indentation
131 - warning: bad-indentation
132 - warning: bad-indentation
23 - refactor: no-else-return
29 - refactor: inconsistent-return-statements
54 - warning: bare-except
94 - warning: bare-except
95 - warning: pointless-statement
99 - warning: bare-except
100 - warning: pointless-statement
129 - refactor: no-else-return
118 - refactor: too-few-public-methods
|
1 #Sometimes we may want multiple row indexes in a heirachical order
2 # Set the index to be the columns ['state', 'month']: sales
3 sales = sales.set_index(['state', 'month'])
4
5 # Sort the MultiIndex: sales
6 sales = sales.sort_index()
7
8 sales =
9 eggs salt spam
10 state month
11 CA 1 47 12.0 17
12 2 110 50.0 31
13 NY 1 221 89.0 72
14 2 77 87.0 20
15 TX 1 132 NaN 52
16 2 205 60.0 55
17
18 # Look up data for NY in month 1: NY_month1
19 NY_month1 = sales.loc[('NY', 1)]
20
21 # Look up data for CA and TX in month 2: CA_TX_month2
22 CA_TX_month2 = sales.loc[(['CA', 'TX'], 2),:]
23
24 # Look up data for all states in month 2: all_month2
25 all_month2 = sales.loc[(slice(None), 2),:] | 8 - error: syntax-error
|
1 import pandas as pd
2
3 df = pd.read_csv('....')
4
5 df.head()
6 df.tail()
7 df.columns
8 df.shape
9
10 #Display summary stats of numeric columns
11 df.describe()
12
13
14 #Display frequencies of categorical columns
15 df['Borough'].value_counts(dropna=False)
16
17 #display means and counts of columns
18 df[['col1', 'col2']].count()
19 df[['col1', 'col2']].mean()
20
21 df['2015'].quantile([0.05, 0.95])
22
23 # Import matplotlib.pyplot
24 import matplotlib.pyplot as plt
25
26 # Plot the histogram
27 df['Existing Zoning Sqft'].plot(kind='hist', rot=70, logx=True, logy=True)
28
29 # Display the histogram
30 plt.show()
31
32 # Create the boxplot
33 df.boxplot(column='initial_cost', by='Borough', rot=90)
34
35 # Display the plot
36 plt.show() | 7 - warning: pointless-statement
8 - warning: pointless-statement
|
1 import os
2 import sqlite3
3
4 db_path = "Autocomplete/names.sqlite"
5 os.remove(db_path)
6
7 db = sqlite3.connect(db_path)
8
9 db.execute("pragma synchronous=off")
10 db.execute("pragma journal_mode=memory")
11 db.execute("pragma temp_store=memory")
12
13 db.execute("create table names (name text)")
14 db.execute("create table parts (part text collate nocase)")
15 db.execute("""create table names_parts (part_id integer, name_id integer,
16 foreign key(name_id) references names(rowid),
17 foreign key(part_id) references parts(rowid))
18 """)
19 db.execute("create index parts_idx on parts (part)")
20 db.execute("create index names_parts_idx on names_parts (part_id, name_id)")
21
22 c = db.cursor()
23
24 all_parts = {}
25
26 for name in open("Autocomplete/fake-full-names.txt", "r"):
27 name = name.replace("\n", "")
28
29 c.execute("insert into names values (?)", (name,))
30 name_id = c.lastrowid
31 for part in name.split(" "):
32 if len(part) > 1:
33 if part in all_parts:
34 part_id = all_parts[part]
35 else:
36 c.execute("insert into parts values(?)", (part,))
37 part_id = c.lastrowid
38
39 c.execute("insert into names_parts values (?, ?)", (part_id, name_id))
40
41 db.commit()
42 db.close()
| 26 - refactor: consider-using-with
26 - warning: unspecified-encoding
|
1 import numpy as np
2 import pandas as pd
3 import os
4 from random import shuffle
5
6
7
8 def generate_proposals(start_gt, end_gt, label, n_frame, alpha=5, beta=2.5, n_to_generate=100):
9 duration = end_gt - start_gt
10 proposals = []
11
12 while n_to_generate:
13 iou = np.random.beta(alpha, beta)
14 not_success = True
15 while not_success:
16 is_start = np.random.randint(2)
17 endpoint1 = np.random.randint(start_gt, end_gt)
18 if is_start:
19 start_ps = endpoint1
20 intersection = end_gt - start_ps
21 if intersection / duration < iou:
22 continue
23 x = (intersection - duration * iou) / iou
24 end_ps = round(end_gt + x)
25 if end_ps > n_frame:
26 continue
27 else:
28 end_ps = endpoint1
29 intersection = end_ps - start_gt
30 x = (intersection - duration * iou) / iou
31 if intersection / duration < iou:
32 continue
33 start_ps = round(start_gt - x)
34 if start_ps < 0:
35 continue
36 not_success = False
37 n_to_generate = n_to_generate - 1
38 proposals.append([label, iou, intersection/(end_ps - start_ps), start_ps, end_ps])
39 return proposals
40
41
42 def generate_proposal_file_per_video(index, video_path, gt_path, mapping, f, n_ps_per_gt):
43 video = pd.read_csv(gt_path, header=None)
44 video = video[video.columns[0]].values.tolist()
45 n_frame = len(video)
46 current_label = video[0]
47 start_idx = 0
48 n_gt = 0
49 gt=[]
50 proposals = []
51 for i in range(n_frame):
52 if video[i] == current_label:
53 continue
54 else:
55 end_idx = i - 1
56 label = mapping[current_label]
57
58 if label != 0:
59 n_gt = n_gt + 1
60 gt.append([label, start_idx, end_idx])
61 print(current_label, mapping[current_label], start_idx, end_idx)
62 start_idx = i
63 current_label = video[i]
64
65 print(len(proposals))
66
67 f.write("#%s\n" %index)
68 f.write(video_path + "\n")
69 f.write(str(n_frame)+"\n" + "1" + "\n")
70 f.write(str(n_gt) + "\n")
71 for i in range(n_gt):
72 f.write(str(gt[i][0]) + " " + str(gt[i][1]) + " "+ str(gt[i][2]) + "\n")
73 ps = generate_proposals(start_gt=gt[i][1], end_gt=gt[i][2], label=gt[i][0], n_frame=n_frame,
74 n_to_generate=n_ps_per_gt)
75 proposals.extend(ps)
76 shuffle(proposals)
77 f.write(str(len(proposals)) + "\n")
78 for i in range(len(proposals)):
79 f.write(str(proposals[i][0]) + " " + str(proposals[i][1]) + " " + str(proposals[i][2]) + " " +
80 str(proposals[i][3]) + " " + str(proposals[i][4]) + "\n")
81
82
83
84
85
86
87 def main():
88 path = "CS6101/"
89 mapping_filepath = path + "splits/mapping_bf.txt"
90 mapping_df = pd.read_csv(mapping_filepath, header=None, sep=" ")
91
92 mapping = dict(zip(mapping_df[mapping_df.columns[1]], mapping_df[mapping_df.columns[0]]))
93 print(mapping)
94
95 videos = os.listdir(path + "groundtruth")
96 print()
97 print(len(videos))
98
99 output_filepath = "data/breakfast_proposal.txt"
100 f = open(output_filepath, "w")
101 for i in range(len(videos)):
102 generate_proposal_file_per_video(i, video_path= path + "groundtruth/" + videos[i],
103 gt_path=path + "groundtruth/" + videos[i],
104 mapping=mapping,
105 f=f,
106 n_ps_per_gt=100)
107 f.close()
108
109 if __name__ == '__main__':
110 main()
111
112
113
114
115
| 8 - refactor: too-many-arguments
8 - refactor: too-many-positional-arguments
8 - refactor: too-many-locals
42 - refactor: too-many-arguments
42 - refactor: too-many-positional-arguments
42 - refactor: too-many-locals
52 - refactor: no-else-continue
100 - warning: unspecified-encoding
100 - refactor: consider-using-with
|
1 """
2 *This module was create for Data Mining subject in Universidad Autonóma de Chihuahua
3 *Professor: M.I.C Normando Ali Zubia Hernández
4
5 Module information:
6 The principal functions of this module are:
7 *Create violin graphs
8 *Create box-Graphs
9 *Create Histograms
10
11 Information contact:
12 email: azubiah@uach.mx
13
14 """
15
16 import pandas as pd
17 import matplotlib.pyplot as plt
18 import numpy as np
19 from pandas.tools.plotting import scatter_matrix
20
21 def open_file(fileName):
22 '''
23 This method will open a file in csv format
24 :param fileName: file to open (Complete PATH)
25 :return: Pandas Data Frame
26 '''
27 #TODO csv file validation
28
29 data = pd.read_json(fileName)
30
31 return data
32
33 def create_histogram(data):
34 data.hist(column = 'bedrooms')
35
36 plt.show()
37
38 def create_density_plot(data):
39 data.plot(kind='density', subplots=True, layout=(3, 3), sharex=False)
40 plt.show()
41
42 def create_whisker_plots(data):
43 data.plot(kind='box', subplots=True, layout=(3, 3), sharex=False, sharey=False)
44 plt.show()
45
46 def show_data_info(data):
47 print("Number of instance: " + str(data.shape[0]))
48 print("Number of fetures: " + str(data.shape[1]))
49
50 print('------------------------------------------')
51
52 print("Initial instances:\n")
53 print(data.head(10))
54
55 print("Numerical Information:\n")
56 numerical_info = data.iloc[:, :data.shape[1]]
57 print(numerical_info.describe())
58
59 def get_feature_subset(data, *args):
60 featureDict = []
61 for arg in args:
62 featureDict.append(arg)
63
64 subset = data[featureDict]
65
66 return subset
67
68 def delete_column(data, *args):
69 for arg in args:
70 data = data.drop(arg, 1)
71
72 return data
73
74 def delete_missing_objects(data, type):
75 type = 0 if type == 'instance' else 1
76
77 data = data.dropna(axis = type)
78
79 return data
80
81 def replace_missing_values_with_constant(data, column, constant):
82 temp = data[column].fillna(constant)
83 data[column] = temp
84
85 return data
86
87 def replace_missing_values_with_mean(data, column):
88 temp = data[column].fillna(data[column].mean())
89 data[column] = temp
90
91 return data
92
93 def numero_banios_influye_precio(data):
94
95 numbBath = data['bathrooms'].value_counts()
96 numbBathKeys = numbBath.keys()
97
98 priceArray = []
99 for number in numbBathKeys:
100 subset = data.loc[data['bathrooms'] == number]
101 print('Numero de banios:' + str(number))
102 print(subset['price'])
103 priceArray.append(subset["price"].mean())
104
105 print(numbBathKeys)
106 print(priceArray)
107
108 width = .2
109 plt.bar(numbBathKeys, priceArray, width, color="blue")
110
111 plt.ylabel('precio')
112 plt.xlabel('#banios')
113 plt.title('banios inlfuye precio')
114 plt.xticks(np.arange(0, max(numbBathKeys), .5))
115 plt.yticks(np.arange(0, 60000, 5000))
116
117
118 plt.show()
119
120 def numero_habitaciones_influye_precio(data):
121
122 numbHab = data['bedrooms'].value_counts()
123 numbHabKeys = numbHab.keys()
124
125 priceArray = []
126 for number in numbHabKeys:
127 subset = data.loc[data['bedrooms'] == number]
128 print('Numero de habitaciones:' + str(number))
129 print(subset['price'])
130 priceArray.append(subset["price"].mean())
131
132 print(numbHabKeys)
133 print(priceArray)
134
135 width = .2
136 plt.bar(numbHabKeys, priceArray, width, color="blue")
137
138 plt.ylabel('precio')
139 plt.xlabel('#habitaciones')
140 plt.title('Habitaciones influye precio')
141 plt.xticks(np.arange(0, max(numbHabKeys), .5))
142 plt.yticks(np.arange(0, 15000, 1000))
143
144
145 plt.show()
146
147 if __name__ == '__main__':
148 filePath = "train.json"
149
150 data = open_file(filePath)
151
152
153
154 #headers = [x for x in data]
155 #print(headers)
156 #for head in headers:
157 # if head != 'description' and head != 'features' and head != 'photos':
158 # print(data[head].value_counts())
159 #print(data.head)
160 #show_data_info(data)
161 #print(data[0:10])
162
163 #numero_banios_influye_precio(data)
164 numero_habitaciones_influye_precio(data)
165
166 #create_histogram(data)
167 #create_density_plot(data)
168 #create_whisker_plots(data)
| 27 - warning: fixme
19 - error: no-name-in-module
29 - warning: redefined-outer-name
33 - warning: redefined-outer-name
38 - warning: redefined-outer-name
42 - warning: redefined-outer-name
46 - warning: redefined-outer-name
59 - warning: redefined-outer-name
68 - warning: redefined-outer-name
74 - warning: redefined-outer-name
74 - warning: redefined-builtin
81 - warning: redefined-outer-name
87 - warning: redefined-outer-name
93 - warning: redefined-outer-name
120 - warning: redefined-outer-name
19 - warning: unused-import
|
1 import pandas as pd
2 import matplotlib.pyplot as ptl
3 import math as mt
4
5 def open_file(fileName):
6 data = pd.read_csv(fileName)
7 return data
8
9 def show_data_info(data):
10 print("Number of instance:" + str(data.shape[0]))
11 print("Number of features:" + str(data.shape[1]))
12 print("------------------------------------------")
13
14 print("Initial instance:\n")
15 print(data)
16
17 print("Numerical info:\n")
18 numerical_info = data.iloc[:, :data.shape[1]]
19 print(numerical_info.describe())
20
21 def count_words(data, column):
22 temp = []
23 array = []
24 for x in range(len(data)):
25 array = data.iloc[x][column].split(' ')
26 temp.append(len(array))
27 data[column] = temp
28 return data
29
30 def save(data):
31 data.to_csv('clean.csv', index = False)
32
33 if __name__ == '__main__':
34 data = open_file('train.csv')
35 show_data_info(data)
36 #save(data);
| 6 - warning: redefined-outer-name
9 - warning: redefined-outer-name
21 - warning: redefined-outer-name
30 - warning: redefined-outer-name
2 - warning: unused-import
3 - warning: unused-import
|
1 """
2 Author: Normando Ali Zubia Hernández
3
4 This file is created to explain the use of normalization
5 with different tools in sklearn library.
6
7 Every function contained in this file belongs to a different tool.
8 """
9
10 from sklearn import preprocessing
11
12 import pandas as pd
13 import numpy
14
15 def z_score_normalization(data):
16 # import data
17 X = data[:,0:-2]
18 Y = numpy.asarray(data[:,-1], dtype="int16")
19
20 # First 10 rows
21 print('Training Data:\n\n' + str(X[:10]))
22 print('\n')
23 print('Targets:\n\n' + str(Y[:10]))
24
25 # Data standarization
26 standardized_data = preprocessing.scale(X)
27
28 # First 10 rows of new feature vector
29 print('\nNew feature vector:\n')
30 print(standardized_data[:10])
31
32 def min_max_scaler(data):
33 # import data
34 X = data[:,0:-2]
35 Y = numpy.asarray(data[:,-1], dtype="int16")
36
37 # First 10 rows
38 print('Training Data:\n\n' + str(X[:10]))
39 print('\n')
40 print('Targets:\n\n' + str(Y[:10]))
41
42 # Data normalization
43 min_max_scaler = preprocessing.MinMaxScaler()
44
45 min_max_scaler.fit(X)
46
47 # Model information:
48 print('\nModel information:\n')
49 print('Data min: ' + str(min_max_scaler.data_min_))
50 print('Data max: ' + str(min_max_scaler.data_max_))
51
52 new_feature_vector = min_max_scaler.transform(X)
53
54 # First 10 rows of new feature vector
55 print('\nNew feature vector:\n')
56 print(new_feature_vector[:10])
57
58 def convert_data_to_numeric(data):
59 numpy_data = data.values
60
61 for i in range(len(numpy_data[0])):
62 temp = numpy_data[:,i]
63 dict = numpy.unique(numpy_data[:,i])
64 # print(dict)
65 for j in range(len(dict)):
66 # print(numpy.where(numpy_data[:,i] == dict[j]))
67 temp[numpy.where(numpy_data[:,i] == dict[j])] = j
68
69 numpy_data[:,i] = temp
70
71 return numpy_data
72
73 if __name__ == '__main__':
74 data = pd.read_csv('train.csv')
75 data = convert_data_to_numeric(data)
76 z_score_normalization(data)
77 min_max_scaler(data)
| 15 - warning: redefined-outer-name
32 - warning: redefined-outer-name
43 - warning: redefined-outer-name
58 - warning: redefined-outer-name
63 - warning: redefined-builtin
|
1 # Create choropleth map
2 #
3 # Date: Dec 2017
4
5 import plotly as py
6 import pandas as pd
7 import pycountry
8
9 def get_data(filename):
10 '''
11 Loads data from file and cleans it.
12
13 Inputs:
14 filename: file directory
15
16 Returns: a cleaned dataframe
17 '''
18 df = pd.read_csv(filename)
19
20 # Reset header row
21 df.columns = df.iloc[0]
22 df = df[1:]
23
24 # Rename column
25 df = df.rename(index=str, columns={"2016": "Estimated no. w/ HIV"})
26
27 # Remove all parenthesis and square brackets
28 df['Country'] = df.Country.apply(lambda x: x.replace(' (',', ').replace(')',''))
29 # Alternative to above: df['Country'] = df['Country'].str.replace(r"\s+\((.*)\)", r", \1")
30 df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.replace(r"\s+\[.*\]","")
31
32 # Lower case, remove spaces between numbers, remove strings and set to 0
33 df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.replace(" ","")
34 df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.strip("<>")
35 df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.replace("Nodata","")
36
37 # Modify names of countries not recognized by pycountry
38 df['Country'] = df['Country'].replace('Democratic Republic of the Congo','Congo, the Democratic Republic of the')
39 df['Country'] = df['Country'].replace('Republic of Korea',"Korea, Democratic People's Republic of")
40 return df
41
42
43 def get_country_code(x):
44 '''
45 Finds the 3 letter alpha code for a country.
46
47 Inputs:
48 x: country name
49
50 Returns: alpha_3 code for the country
51 '''
52 if pycountry.countries.lookup(x) != None:
53 return pycountry.countries.lookup(x).alpha_3
54
55
56 # Get and clean data
57 df = get_data('data.csv')
58 df['Code'] = df['Country'].apply(get_country_code)
59
60 # Make choropleth map using data
61 data = [ dict(
62 type = 'choropleth',
63 locations = df['Code'],
64 z = df['Estimated no. w/ HIV'],
65 text = df['Country'],
66 colorscale = [[0,"#c6dbef"],[0.2,"#6baed6"],[0.4,"#4292c6"],\
67 [0.6,"#2171b5"],[0.8,"#0e5693"],[1,"#013e7c"]],
68 autocolorscale = False,
69 reversescale = False,
70 marker = dict(
71 line = dict (
72 color = 'rgb(180,180,180)',
73 width = 0.5
74 ) ),
75 colorbar = dict(
76 autotick = False,
77 title = 'Estimated no.<br>w/ HIV'),
78 ) ]
79
80 layout = dict(
81 title = 'Number of people (all ages) living with HIV<br>Estimates by country<br><br>\
82 [Source:<a href="http://apps.who.int/gho/data/node.main.620?lang=en"> World Health Organization</a>]',
83 margin = dict(
84 l=10,
85 r=10,
86 b=50,
87 t=150,
88 pad=4
89 ),
90 geo = dict(
91 showframe = False,
92 showcoastlines = False,
93 projection = dict(
94 type = 'Mercator'
95 )
96 )
97 )
98
99 # Display map
100 fig = dict( data=data, layout=layout )
101 py.offline.plot( fig, validate=False, filename='d3-world-map' )
| 18 - warning: redefined-outer-name
43 - refactor: inconsistent-return-statements
61 - refactor: use-dict-literal
70 - refactor: use-dict-literal
71 - refactor: use-dict-literal
75 - refactor: use-dict-literal
80 - refactor: use-dict-literal
83 - refactor: use-dict-literal
90 - refactor: use-dict-literal
93 - refactor: use-dict-literal
100 - refactor: use-dict-literal
|
1 import requests
2 from lxml import etree
3 import pymysql
4
5 url = 'http://data.10jqka.com.cn/funds/ddzz/#refCountId=db_50741cd6_397,db_509381c1_860'
6 headers = {
7 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1;WOW64; rv:6.0) '
8 'Gecko/20100101 Firefox/6.0',
9 }
10
11 html = requests.get(url,headers=headers).text
12 parse_html = etree.HTML(html)
13
14 num_list = parse_html.xpath('//tbody/tr/td[2]/a/text()')
15 name_list = parse_html.xpath('//tbody/tr/td[3]/a/text()')
16 stacks = []
17 count = 0
18 for i in range(len(num_list)):
19 if count==20:
20 break
21 demo = [name_list[i],num_list[i],]
22 if demo not in stacks:
23 count+=1
24 stacks.append(demo)
25 else:
26 continue
27 print(stacks)
28 print(len(stacks))
29
30
31 # [['300785', 'N值得买'], ['002105', '信隆健康'], ['002453', '华软科技'], ['300167', '迪威迅'], ['600078', '澄星股份'], ['002473', '圣莱达'], ['002225', '濮耐股份'], ['000586', '汇源通信'], ['002124', '天邦股份'], ['300527', '中国应急'], ['603189', '网达软件'], ['300378', '鼎捷软件'], ['300417', '南华仪器'], ['300632', '光莆股份'], ['300424', '航新科技'], ['002915', '中欣氟材'], ['300769', '德方纳米'], ['603068', '博通集成'], ['002312', '三泰控股'], ['300253', '卫宁健康']]
32 db = pymysql.connect('localhost','root','123456','SSEC',charset='utf8')
33 cursor = db.cursor()
34 count = 0
35
36 for i in stacks:
37 cursor.execute('select count(id) from stacks')
38 res = cursor.fetchall()
39 if res[0][0] == 20:
40 print('数据已满')
41 break
42 try:
43
44 cursor.execute('insert into stacks values(Null,%s,%s)',[i[0],i[1]])
45 db.commit()
46 count += 1
47 print(count/20*100,'%--完成')
48 except Exception as e:
49 print(e)
50 result = input('>>r键返回')
51 if result == 'r':
52 db.rollback()
53 break
54 else:
55 continue
56 cursor.execute('select * from stacks')
57 res = cursor.fetchall()
58 print(res)
59 print(len(res))
60 cursor.close()
61 db.close()
62 for i in range(20):
63 print(i//4+1,i%4+1,end=' ')
64
65
66
| 11 - warning: missing-timeout
48 - warning: broad-exception-caught
51 - refactor: no-else-break
|
1 dic = {
2
3 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
4 'Accept-Encoding':'gzip, deflate',
5 'Accept-Language':'zh-CN,zh;q=0.9',
6 'Cache-Control':'max-age=0',
7 'Connection':'keep-alive',
8 'Cookie':'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; s_histo=601678; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',
9 'Host':'www.aigaogao.com',
10 'Referer':'http://www.aigaogao.com/tools/history.html?s=604675',
11 'Upgrade-Insecure-Requests':'1',
12 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
13
14
15 } | Clean Code: No Issues Detected
|
1 import tkinter as tk
2 from threading import Thread
3 from tkinter import messagebox
4 import pymysql as sql
5 import requests
6 import time
7 from lxml import etree
8 import json
9 from stack_detail import *
10 from gevent import monkey # monkey 插件
11 from queue import Queue
12 import os
13
14
15
16 class SSEC:
17 """
18 界面可视化
19 """
20 def __init__(self,window):
21 self.window = window
22 self.table = tk.Label(self.window,bg='#2c3842')
23 self.table.pack(fill='both', expand=1)
24 self.image = tk.PhotoImage(file='stacks_SEG.png')
25 self.db = sql.connect('localhost', 'root', '123456', 'SSEC', charset='utf8')
26 self.cursor = self.db.cursor()
27
28 self.index()
29 def index(self):
30 """
31 主页面,默认有20枚股票
32 :return:
33 """
34 messagebox.showwarning(title='SSEC',message='准备获取实时数据,这会占用您几秒钟,\n点击[ok]开始')
35 self.label = tk.Label(self.table,bg='#2c3842')
36 self.label.pack()
37 self.cursor.execute('select * from stacks') # 从数据库提取股票数据(股票名称与股票编号)
38 self.res = self.cursor.fetchall()
39 count = -1
40 stack_box = {}
41 self.url = 'http://www.aigaogao.com/tools/action.aspx?act=apr'
42 ths = []
43 self.colors = {}
44 for i in self.res:
45 """
46 使用多线程分别爬取20枚股票当前的涨跌状态
47 """
48 name = i[1]
49 number = i[2]
50 t = Thread(target=self.get_color,args=(name,number))
51 ths.append(t)
52 t.start()
53 for i in ths:
54 i.join()
55 for i in self.res:
56 """
57 根据当前的涨跌状态为每一枚股票上色
58 """
59 count += 1
60 name = i[1]
61 number = i[2]
62 stack_box[str(count)] = tk.Label(self.label, bg='#2c3842')
63 stack_box[str(count)].grid(row=count // 4 + 1, column=count % 4 + 1, pady=6, padx=3)
64 tk.Button(stack_box[str(count)], bd=1, text=name, width=10, height=2, font=('黑体', '12', 'bold'), bg=self.colors[name],
65 fg='white', command=lambda num=number, name=name: self.detail(num, name)).grid(row=1, column=1)
66 tk.Button(stack_box[str(count)], bd=1, text='X', bg='#f84b4c', font=('黑体', '12', 'bold'), fg='white',
67 height=2).grid(row=1, column=2)
68 self.entry = tk.Entry(self.table, width=30, font=('黑体', '12', 'bold'))
69 self.entry.place(x=140, y=420)
70 btn = tk.Button(self.table, width=20, text='搜索其他股票', fg='white', bg='#25a9e1')
71 btn.place(x=420, y=420)
72
73 def get_color(self,name,number):
74 """
75 每个线程爬取自己当前股票的颜色值
76 :param name:
77 :param number:
78 :return:
79 """
80 headers = {
81 'Accept': '*/*',
82 'Accept-Encoding': 'gzip, deflate',
83 'Accept-Language': 'zh-CN,zh;q=0.9',
84 'Connection': 'keep-alive',
85 'Content-Length': '11',
86 'Content-type': 'application/x-www-form-urlencoded',
87 'Cookie': 'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; __utmt=1; s_histo=601678; __utmb=90353546.12.10.1563262167; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',
88 'Host': 'www.aigaogao.com',
89 'Origin': 'http://www.aigaogao.com',
90 'Referer': 'http://www.aigaogao.com/tools/history.html?s={}'.format(number),
91 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
92 'X-Prototype-Version': '1.4.0',
93 'X-Requested-With': 'XMLHttpRequest',
94 }
95
96 data = {'s': str(number)}
97 html = requests.post(self.url, headers=headers, data=data).text
98 d = eval(html)
99 num = float(d['data'][0]['change'])
100 if num > 0:
101 self.colors[name] = '#da7252'
102 elif num == 0:
103 self.colors[name] = '#747474'
104 else:
105 self.colors[name] = '#2db67a'
106
107
108
109
110
111
112
113
114
115
116
117 def detail(self,num,name):
118
119 """
120 生成子进程,用于观察股票的走势
121 :param num:
122 :param name:
123 :return:
124 """
125 monkey.patch_all()
126 pid = os.fork()
127 if pid<0:
128 print('子进程创建失败')
129 elif pid==0:
130 Details(num,name)
131 else:
132 while True:
133 time.sleep(0.1)
134 def back_to_index(self):
135 """
136 返回首页函数
137 :return:
138 """
139 os._exit(0) # 结束子进程
140 self.label.destroy()
141 self.index()
142
143 def views(self):
144
145 self.label = tk.Label(self.table, bg='#2c3842',image=self.image)
146 tk.Button(self.table,bg='#25a9e1',command=self.back_to_index)
147
148
149
150
151
152 if __name__=='__main__':
153 window = tk.Tk(className='S-SEC')
154 window.geometry('720x500')
155
156 SSEC(window)
157 window.mainloop()
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
| 9 - warning: wildcard-import
16 - refactor: too-many-instance-attributes
20 - warning: redefined-outer-name
45 - warning: pointless-string-statement
56 - warning: pointless-string-statement
97 - warning: missing-timeout
98 - warning: eval-used
130 - error: undefined-variable
140 - warning: unreachable
145 - warning: attribute-defined-outside-init
7 - warning: unused-import
8 - warning: unused-import
11 - warning: unused-import
|
1 import sys
2 import math
3 import pretty_midi
4
5
6 class Note:
7 def __init__(self, base: str, accidental: str, octave_num: int):
8 self.base = base
9 self.accidental = accidental
10 self.octave_num = octave_num
11
12 def name(self):
13 return self.base + self.accidental + str(self.octave_num)
14
15 def __repr__(self):
16 return self.name()
17
18
19 class MidiGenerator:
20 def __init__(self, instrument_list, bpm, velocity):
21 self.dt4 = int((60 * 1000) / bpm)
22 self.t = 0
23 self.velocity = velocity
24 self.instrument_list = instrument_list
25
26 program = 20 #pretty_midi.instrument_name_to_program(instrument)
27 self.inst = pretty_midi.Instrument(program=program, is_drum=True)
28
29 def append_rest(self, rest_type):
30 dt = self.dt4 * 2**(2 - math.log2(rest_type))
31 self.t += dt
32
33 def append_note(self, note_type, index_list):
34 dt = self.dt4 * 2**(2 - math.log2(note_type))
35 print(index_list, dt)
36 for index in index_list:
37 note_number = pretty_midi.drum_name_to_note_number(
38 self.instrument_list[index])
39 note = pretty_midi.Note(velocity=self.velocity,
40 pitch=note_number,
41 start=self.t / 1000,
42 end=(self.t + dt) / 1000)
43 self.inst.notes.append(note)
44 self.t += dt
45
46 def finish_bar(self):
47 left = self.t % (4 * self.dt4)
48 if left != 0:
49 self.t += left
50
51 def write(self, filename):
52 midi = pretty_midi.PrettyMIDI()
53 midi.instruments.append(self.inst)
54 midi.write(filename)
55
56
57 class EOL(Exception):
58 pass
59
60
61 class Parser:
62 def __init__(self, midi_gen, code):
63 self.cur = 0
64 self.midi_gen = midi_gen
65 self.code = code
66 self.look_ahead = code[0]
67 self.index_list = []
68 self.index_list_reset_flag = False
69 self.last_index = 'c'
70
71 def _match(self, x):
72 if self.look_ahead == x:
73 self._consume()
74 else:
75 raise RuntimeError("not match {}".format(x))
76
77 def _consume(self):
78 self.cur += 1
79 if len(self.code) == self.cur:
80 raise EOL
81 self.look_ahead = self.code[self.cur]
82
83 def parse(self):
84 try:
85 while True:
86 if self.look_ahead == ';':
87 print('end')
88 return
89 elif self.look_ahead == '|':
90 print('finish bar')
91 self.midi_gen.finish_bar()
92 self._consume()
93 elif self.look_ahead in (' ', '\t', '\n'):
94 print('ignore')
95 self._consume()
96 elif self.look_ahead in "0123456789":
97 print('set index', self.look_ahead)
98 if self.index_list_reset_flag:
99 self.index_list = []
100 self.index_list_reset_flag = False
101 index = int(self.look_ahead)
102 self._consume()
103 self.index_list.append(index)
104 self.last_index = index
105 elif self.look_ahead in ".*":
106 print('rest')
107 if self.look_ahead == '.':
108 self.midi_gen.append_rest(16)
109 elif self.look_ahead == '*':
110 self.midi_gen.append_rest(4)
111 self._consume()
112 elif self.look_ahead in "ihqox":
113 self.index_list_reset_flag = True
114 if self.look_ahead == 'i':
115 self.midi_gen.append_note(1, self.index_list)
116 elif self.look_ahead == 'h':
117 self.midi_gen.append_note(2, self.index_list)
118 elif self.look_ahead == 'q':
119 self.midi_gen.append_note(4, self.index_list)
120 elif self.look_ahead == 'o':
121 self.midi_gen.append_note(8, self.index_list)
122 elif self.look_ahead == 'x':
123 self.midi_gen.append_note(16, self.index_list)
124 self._consume()
125 else:
126 print(self.look_ahead)
127 raise
128 except EOL:
129 print("end")
130
131
132 def main(instrument_list: str, bpm: int, filename: str, velocity: int):
133 midi_gen = MidiGenerator(instrument_list, bpm, velocity)
134 parser = Parser(midi_gen, sys.stdin.read())
135 parser.parse()
136 midi_gen.write(filename)
137
138
139 if __name__ == '__main__':
140 import fire
141 fire.Fire(main)
| 86 - refactor: no-else-return
127 - error: misplaced-bare-raise
83 - refactor: too-many-branches
61 - refactor: too-few-public-methods
|
1 import sys
2 import math
3 import pretty_midi
4
5
6 class Note:
7 def __init__(self, base: str, accidental: str, octave_num: int):
8 self.base = base
9 self.accidental = accidental
10 self.octave_num = octave_num
11
12 def name(self):
13 return self.base + self.accidental + str(self.octave_num)
14
15 def __repr__(self):
16 return self.name()
17
18
19 class MidiGenerator:
20 def __init__(self, instrument, bpm, velocity):
21 self.dt4 = int((60 * 1000) / bpm)
22 self.t = 0
23 self.velocity = velocity
24
25 program = pretty_midi.instrument_name_to_program(instrument)
26 self.inst = pretty_midi.Instrument(program=program)
27
28 def append_rest(self, rest_type):
29 dt = self.dt4 * 2**(2 - math.log2(rest_type))
30 self.t += dt
31
32 def append_note(self, note_type, note_list):
33 dt = self.dt4 * 2**(2 - math.log2(note_type))
34 print(note_list, dt)
35 for note in note_list:
36 note_number = pretty_midi.note_name_to_number(note.name())
37 note = pretty_midi.Note(velocity=self.velocity,
38 pitch=note_number,
39 start=self.t/1000,
40 end=(self.t + dt)/1000)
41 self.inst.notes.append(note)
42 self.t += dt
43
44 def finish_bar(self):
45 left = self.t % (4*self.dt4)
46 if left != 0:
47 self.t += left
48
49 def write(self, filename):
50 midi = pretty_midi.PrettyMIDI()
51 midi.instruments.append(self.inst)
52 midi.write(filename)
53
54
55 class EOL(Exception):
56 pass
57
58
59 class Parser:
60 def __init__(self, midi_gen, code):
61 self.cur = 0
62 self.midi_gen = midi_gen
63 self.code = code
64 self.look_ahead = code[0]
65 self.note_list = []
66 self.note_list_reset_flag = False
67 self.last_note_base = 'c'
68 self.last_octave = 3
69
70 def _match(self, x):
71 if self.look_ahead == x:
72 self._consume()
73 else:
74 raise RuntimeError("not match {}".format(x))
75
76 def _consume(self):
77 self.cur += 1
78 if len(self.code) == self.cur:
79 raise EOL
80 self.look_ahead = self.code[self.cur]
81
82 def parse(self):
83 try:
84 while True:
85 if self.look_ahead == '|':
86 print('finish bar')
87 self.midi_gen.finish_bar()
88 self._consume()
89 elif self.look_ahead in (' ', '\t', '\n'):
90 print('ignore')
91 self._consume()
92 elif self.look_ahead in "abcdefg":
93 print('set note', self.look_ahead)
94 if self.note_list_reset_flag:
95 self.note_list = []
96 self.note_list_reset_flag = False
97 note_base = self.look_ahead
98 self._consume()
99 if self.look_ahead in "!#":
100 accidental = self.look_ahead
101 self._consume()
102 else:
103 accidental = ''
104 if self.look_ahead in "0123456789":
105 octave = int(self.look_ahead)
106 self._consume()
107 else:
108 octave = int(self.last_octave)
109 if (ord(self.last_note_base) - ord(note_base)) > 0:
110 print("+1 octave")
111 octave += 1
112 self.note_list.append(
113 Note(note_base.capitalize(), accidental, octave))
114 self.last_note_base = note_base
115 self.last_octave = octave
116 elif self.look_ahead in ".*":
117 print('rest')
118 if self.look_ahead == '.':
119 self.midi_gen.append_rest(16)
120 elif self.look_ahead == '*':
121 self.midi_gen.append_rest(4)
122 self._consume()
123 elif self.look_ahead in "ihqox":
124 self.note_list_reset_flag = True
125 if self.look_ahead == 'i':
126 self.midi_gen.append_note(1, self.note_list)
127 elif self.look_ahead == 'h':
128 self.midi_gen.append_note(2, self.note_list)
129 elif self.look_ahead == 'q':
130 self.midi_gen.append_note(4, self.note_list)
131 elif self.look_ahead == 'o':
132 self.midi_gen.append_note(8, self.note_list)
133 elif self.look_ahead == 'x':
134 self.midi_gen.append_note(16, self.note_list)
135 self._consume()
136 else:
137 raise RuntimeError("invalid charactor: ", self.look_ahead)
138 except EOL:
139 print("end")
140
141
142 def main(instrument: str, bpm: int, filename: str, velocity: int):
143 midi_gen = MidiGenerator(instrument, bpm, velocity)
144 parser = Parser(midi_gen, sys.stdin.read())
145 parser.parse()
146 midi_gen.write(filename)
147
148
149 if __name__ == '__main__':
150 import fire
151 fire.Fire(main)
| 59 - refactor: too-many-instance-attributes
82 - refactor: too-many-branches
82 - refactor: too-many-statements
59 - refactor: too-few-public-methods
|
1 import subprocess
2
3
4 class EOL(Exception):
5 pass
6
7
8 class Parser:
9 def __init__(self, filename, code):
10 self.filename = filename
11 self.cur = 0
12 self.code = code
13 self.look_ahead = code[0]
14
15 self.bpm = 120
16 self.velocity = 90
17 self.instrument = 'Cello'
18 self.note_code = ''
19 self.note_code_reset_flag = False
20
21 self.middle_midi_list = []
22
23 self.drum_mode = False
24 self.instruments = []
25
26 def _match(self, x):
27 if self.look_ahead == x:
28 self._consume()
29 else:
30 raise RuntimeError("not match {}".format(x))
31
32 def _match_str(self, xs):
33 for x in xs:
34 self._match(x)
35
36 def _ignore_ws(self):
37 while self.look_ahead in ' \t\n':
38 self._consume()
39
40 def _ws(self):
41 if self.look_ahead not in ' \t\n':
42 raise RuntimeError("not match white space")
43 self._ignore_ws()
44
45 def _int(self):
46 int_str = ''
47 while self.look_ahead in '0123456789':
48 int_str += self.look_ahead
49 self._consume()
50 return int(int_str)
51
52 def _str(self):
53 s = ''
54 while self.look_ahead.isalpha() or self.look_ahead in "0123456789":
55 s += self.look_ahead
56 self._consume()
57 return s
58
59 def _consume(self):
60 self.cur += 1
61 if len(self.code) == self.cur:
62 raise EOL
63 self.look_ahead = self.code[self.cur]
64
65 def process_note_code(self):
66 print('note code', self.note_code)
67 filename = '{0}-{2}-{1}.mid'.format(self.filename, self.instrument,
68 len(self.middle_midi_list))
69 print("process", self.instrument)
70 if '-' in self.instrument:
71 subprocess.call(
72 'echo \'{code}\' | python3 drum_seq.py \'[{insts}]\' {bpm} {filename} {velocity}'
73 .format(code=self.note_code,
74 insts=','.join(['"' + s + '"' for s in self.instruments]),
75 bpm=self.bpm,
76 velocity=self.velocity,
77 filename=filename),
78 shell=True)
79 else:
80 subprocess.call(
81 'echo \'{code}\' | python3 chord_bass_seq.py \'{inst}\' {bpm} {filename} {velocity}'
82 .format(code=self.note_code,
83 inst=self.instrument,
84 bpm=self.bpm,
85 velocity=self.velocity,
86 filename=filename),
87 shell=True)
88 self.middle_midi_list.append(filename)
89 self.note_code = ''
90
91 def parse(self):
92 try:
93 while True:
94 self._ignore_ws()
95 if self.look_ahead == 'b':
96 self._match_str('bpm')
97 self._ignore_ws()
98 self._match('=')
99 self._ignore_ws()
100 self.bpm = self._int()
101 print('bpm', self.bpm)
102 self._ws()
103 elif self.look_ahead == 'v':
104 self._match_str('velocity')
105 self._ignore_ws()
106 self._match('=')
107 self._ignore_ws()
108 self.velocity = self._int()
109 print('velocity', self.velocity)
110 self._ws()
111 elif self.look_ahead == 'i':
112 if self.note_code != '':
113 self.process_note_code()
114 self._match_str('instrument')
115 self._ignore_ws()
116 self._match('=')
117 self._ignore_ws()
118 if self.drum_mode:
119 self._match('{')
120 self._ignore_ws()
121 instruments = []
122 instruments.append(self._str())
123 self._ignore_ws()
124 while self.look_ahead == ',':
125 self._consume()
126 self._ignore_ws()
127 instruments.append(self._str())
128 self._ignore_ws()
129 self._match('}')
130 self.instruments = instruments
131 self.instrument = '-'.join(instruments)
132 print('instrument detected', self.instrument)
133 else:
134 self.instrument = self._str()
135 print('instrument detected', self.instrument)
136 self._ws()
137 elif self.look_ahead == 'd':
138 print()
139 print(self.code[self.cur:])
140 self._match_str('drum')
141 self.drum_mode = True
142 print("drum_mode on")
143 elif self.look_ahead == '|':
144 print('note code detect')
145 while self.look_ahead != '\n':
146 self.note_code += self.look_ahead
147 self._consume()
148 except EOL:
149 print("end")
150 if self.note_code != '':
151 print('note code', self.note_code)
152 self.process_note_code()
153
154 print("stack", self.middle_midi_list)
155 subprocess.call('python3 stack_midi.py \'[{0}]\' {1}.mid'.format(
156 ','.join(['"' + s + '"' for s in self.middle_midi_list]),
157 self.filename),
158 shell=True)
159
160
161 def main(filename):
162 with open(filename, 'r') as f:
163 code = f.read()
164 parser = Parser(filename, code)
165 try:
166 parser.parse()
167 except RuntimeError as e:
168 print('"{}"'.format(parser.look_ahead))
169 raise e
170
171
172 if __name__ == "__main__":
173 import fire
174 fire.Fire(main)
| 8 - refactor: too-many-instance-attributes
91 - refactor: too-many-branches
91 - refactor: too-many-statements
162 - warning: unspecified-encoding
|
1 import pretty_midi
2
3
4 def main(src_filename_list, dst_filename):
5 dst_midi = pretty_midi.PrettyMIDI()
6 for filename in src_filename_list:
7 src_midi = pretty_midi.PrettyMIDI(filename)
8 dst_midi.instruments.extend(src_midi.instruments)
9 dst_midi.write(dst_filename)
10
11
12 if __name__ == '__main__':
13 import fire
14 fire.Fire(main)
| Clean Code: No Issues Detected
|
1 import pretty_midi
2 from scipy.io import wavfile
3
4
5 def main(midi_filename, wav_filename):
6 midi = pretty_midi.PrettyMIDI(midi_filename)
7 audio = midi.fluidsynth()
8 wavfile.write(wav_filename, 44100, audio)
9
10
11 if __name__ == '__main__':
12 import fire
13 fire.Fire(main)
| Clean Code: No Issues Detected
|
1 """empty message
2
3 Revision ID: fa590b961f4f
4 Revises: ffdc0a98111c
5 Create Date: 2021-08-16 13:55:52.581549
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = 'fa590b961f4f'
14 down_revision = 'ffdc0a98111c'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.create_table('forms',
22 sa.Column('id', sa.Integer(), nullable=False),
23 sa.Column('title', sa.String(length=50), nullable=True),
24 sa.Column('owner_id', sa.Integer(), nullable=False),
25 sa.Column('description', sa.Text(), nullable=True),
26 sa.Column('label_align', sa.String(length=10), nullable=True),
27 sa.Column('description_align', sa.String(length=10), nullable=True),
28 sa.Column('title_align', sa.String(length=10), nullable=True),
29 sa.PrimaryKeyConstraint('id')
30 )
31 # ### end Alembic commands ###
32
33
34 def downgrade():
35 # ### commands auto generated by Alembic - please adjust! ###
36 op.drop_table('forms')
37 # ### end Alembic commands ###
| Clean Code: No Issues Detected
|
1 from .db import db
2
3
4 class Form(db.Model):
5 __tablename__ = 'forms'
6
7 id = db.Column(db.Integer, primary_key=True)
8 title = db.Column(db.String(50), nullable=False)
9 owner_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
10 description = db.Column(db.Text)
11 label_placement = db.Column(db.String(10))
12 description_align = db.Column(db.String(10))
13 title_align = db.Column(db.String(10))
14 # creates a pseudo-column (you won't see it) in the 'fields' table called 'form' that can be assigned a Form instance when creating a Field instance -- 'form' is not the name of this table
15 fields = db.relationship('Field', backref='form')
16
17
18 # field_id = db.Column(db.Integer, db.ForeignKey('fields.id'))
19 # fields = db.relationship("Field", foreign_keys=field_id ,back_populates="forms", lazy="joined")
20
21
22 def to_dict(self):
23 # convert associated fields to serializable dictionaries
24 form_fields = [field.to_dict() for field in self.fields]
25
26 return {
27 'id': self.id,
28 'fields': form_fields,
29 'title': self.title,
30 'owner_id': self.owner_id,
31 'description': self.description,
32 'label_placement': self.label_placement,
33 'description_align': self.description_align,
34 'title_align': self.title_align
35 }
36
37 def __repr__(self):
38 return str(self.to_dict())
| 1 - error: relative-beyond-top-level
|
1 """empty message
2
3 Revision ID: 4df12f583573
4 Revises: 2453c767d036
5 Create Date: 2021-08-21 16:10:57.556468
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = '4df12f583573'
14 down_revision = '2453c767d036'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.alter_column('fields', 'form_id',
22 existing_type=sa.INTEGER(),
23 nullable=True)
24 # ### end Alembic commands ###
25
26
27 def downgrade():
28 # ### commands auto generated by Alembic - please adjust! ###
29 op.alter_column('fields', 'form_id',
30 existing_type=sa.INTEGER(),
31 nullable=False)
32 # ### end Alembic commands ###
| Clean Code: No Issues Detected
|
1 """empty message
2
3 Revision ID: b8ec5632d693
4 Revises: beeeac90e4ba
5 Create Date: 2021-08-20 10:05:24.638509
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = 'b8ec5632d693'
14 down_revision = 'beeeac90e4ba'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.alter_column('fields', 'label',
22 existing_type=sa.VARCHAR(length=55),
23 nullable=False)
24 # ### end Alembic commands ###
25
26
27 def downgrade():
28 # ### commands auto generated by Alembic - please adjust! ###
29 op.alter_column('fields', 'label',
30 existing_type=sa.VARCHAR(length=55),
31 nullable=True)
32 # ### end Alembic commands ###
| Clean Code: No Issues Detected
|
1 from app.models import db, Field
2 from app.models import Form
3
4
5 def seed_fields():
6 form = Form(
7 title='To Test Fields',
8 owner_id=1
9 )
10 db.session.add(form)
11
12 testField = Field(
13 type="text",
14 label="Test Field",
15 required=False,
16 form=form, # creates the form_id / association
17 choices='Some Stuff&&Another choice&&Hello from hell&&'
18 )
19
20 db.session.add(testField)
21 db.session.commit()
22
23
24 def undo_fields():
25 db.session.execute('TRUNCATE fields RESTART IDENTITY CASCADE;')
26 db.session.commit()
| Clean Code: No Issues Detected
|
1 """empty message
2
3 Revision ID: 94f5eda37179
4 Revises: b3e721c02f48
5 Create Date: 2021-08-20 17:15:46.455809
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = '94f5eda37179'
14 down_revision = 'b3e721c02f48'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.alter_column('fields', 'label',
22 existing_type=sa.VARCHAR(length=55),
23 nullable=False)
24 op.alter_column('forms', 'title',
25 existing_type=sa.VARCHAR(length=50),
26 nullable=False)
27 # ### end Alembic commands ###
28
29
30 def downgrade():
31 # ### commands auto generated by Alembic - please adjust! ###
32 op.alter_column('forms', 'title',
33 existing_type=sa.VARCHAR(length=50),
34 nullable=True)
35 op.alter_column('fields', 'label',
36 existing_type=sa.VARCHAR(length=55),
37 nullable=True)
38 # ### end Alembic commands ###
| Clean Code: No Issues Detected
|
1 from flask_wtf import FlaskForm
2 from wtforms import StringField, PasswordField
3 from wtforms.validators import Email, ValidationError, InputRequired, Length, EqualTo
4 from app.models import User
5
6
7 def user_exists(form, field):
8 # Checking if user exists
9 email = field.data
10 user = User.query.filter(User.email == email).first()
11 if user:
12 raise ValidationError('Email address is already in use.')
13
14
15 def username_exists(form, field):
16 # Checking if username is already in use
17 username = field.data
18 user = User.query.filter(User.username == username).first()
19 if user:
20 raise ValidationError('Username is already in use.')
21
22
23 class SignUpForm(FlaskForm):
24 username = StringField(
25 'username', validators=[InputRequired(message='Input Required'), Length(max=40, message='Must be less than 40 characters'), username_exists])
26 email = StringField('email', validators=[InputRequired(), Length(
27 max=40, message='Must be less than 40 characters'), Email(message='Invalid'), user_exists])
28 password = PasswordField('password', validators=[
29 InputRequired(), EqualTo('confirm', message='Passwords must match')])
30 confirm = PasswordField('confirm')
| 7 - warning: unused-argument
15 - warning: unused-argument
23 - refactor: too-few-public-methods
|
1 """empty message
2
3 Revision ID: b05fdd14ae4f
4 Revises: 4563136888fd
5 Create Date: 2021-08-20 10:34:08.171553
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = 'b05fdd14ae4f'
14 down_revision = '4563136888fd'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.alter_column('fields', 'label',
22 existing_type=sa.VARCHAR(length=55),
23 nullable=True)
24 op.alter_column('fields', 'required',
25 existing_type=sa.BOOLEAN(),
26 nullable=False)
27 # ### end Alembic commands ###
28
29
30 def downgrade():
31 # ### commands auto generated by Alembic - please adjust! ###
32 op.alter_column('fields', 'required',
33 existing_type=sa.BOOLEAN(),
34 nullable=True)
35 op.alter_column('fields', 'label',
36 existing_type=sa.VARCHAR(length=55),
37 nullable=False)
38 # ### end Alembic commands ###
| Clean Code: No Issues Detected
|
1 """empty message
2
3 Revision ID: d0c387e43ca4
4 Revises: 94f5eda37179
5 Create Date: 2021-08-21 11:33:10.206199
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = 'd0c387e43ca4'
14 down_revision = '94f5eda37179'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.add_column('forms', sa.Column('field_id', sa.Integer(), nullable=True))
22 op.create_foreign_key(None, 'forms', 'fields', ['field_id'], ['id'])
23 # ### end Alembic commands ###
24
25
26 def downgrade():
27 # ### commands auto generated by Alembic - please adjust! ###
28 op.drop_constraint(None, 'forms', type_='foreignkey')
29 op.drop_column('forms', 'field_id')
30 # ### end Alembic commands ###
| Clean Code: No Issues Detected
|
1 from app.models import db, Form
2
3
4 def seed_forms():
5 test = Form(
6 title = "Test Form Render",
7 owner_id = 1,
8 description = "",
9 label_placement = "",
10 description_align = "",
11 title_align = "",
12 )
13
14 db.session.add(test)
15 db.session.commit()
16
17
18 def undo_forms():
19 db.session.execute('TRUNCATE forms RESTART IDENTITY CASCADE;')
20 db.session.commit()
| Clean Code: No Issues Detected
|
1 from .db import db
2 from .user import User
3 from .form import Form
4 from .field import Field
| 1 - error: relative-beyond-top-level
2 - error: relative-beyond-top-level
3 - error: relative-beyond-top-level
4 - error: relative-beyond-top-level
1 - warning: unused-import
2 - warning: unused-import
3 - warning: unused-import
4 - warning: unused-import
|
1 """empty message
2
3 Revision ID: beeeac90e4ba
4 Revises: d25f4d1b7ea0
5 Create Date: 2021-08-20 10:00:09.924819
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = 'beeeac90e4ba'
14 down_revision = 'd25f4d1b7ea0'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.alter_column('fields', 'required',
22 existing_type=sa.BOOLEAN(),
23 nullable=True)
24 # ### end Alembic commands ###
25
26
27 def downgrade():
28 # ### commands auto generated by Alembic - please adjust! ###
29 op.alter_column('fields', 'required',
30 existing_type=sa.BOOLEAN(),
31 nullable=False)
32 # ### end Alembic commands ###
| Clean Code: No Issues Detected
|
1 """empty message
2
3 Revision ID: b3e721c02f48
4 Revises: 9aec744a6b98
5 Create Date: 2021-08-20 13:35:16.871785
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = 'b3e721c02f48'
14 down_revision = '9aec744a6b98'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.alter_column('fields', 'form_id',
22 existing_type=sa.INTEGER(),
23 nullable=False)
24 # ### end Alembic commands ###
25
26
27 def downgrade():
28 # ### commands auto generated by Alembic - please adjust! ###
29 op.alter_column('fields', 'form_id',
30 existing_type=sa.INTEGER(),
31 nullable=True)
32 # ### end Alembic commands ###
| Clean Code: No Issues Detected
|
1 """empty message
2
3 Revision ID: 2453c767d036
4 Revises: d0c387e43ca4
5 Create Date: 2021-08-21 14:53:11.208418
6
7 """
8 from alembic import op
9 import sqlalchemy as sa
10
11
12 # revision identifiers, used by Alembic.
13 revision = '2453c767d036'
14 down_revision = 'd0c387e43ca4'
15 branch_labels = None
16 depends_on = None
17
18
19 def upgrade():
20 # ### commands auto generated by Alembic - please adjust! ###
21 op.drop_constraint('forms_field_id_fkey', 'forms', type_='foreignkey')
22 op.drop_column('forms', 'field_id')
23 # ### end Alembic commands ###
24
25
26 def downgrade():
27 # ### commands auto generated by Alembic - please adjust! ###
28 op.add_column('forms', sa.Column('field_id', sa.INTEGER(), autoincrement=False, nullable=True))
29 op.create_foreign_key('forms_field_id_fkey', 'forms', 'fields', ['field_id'], ['id'])
30 # ### end Alembic commands ###
| Clean Code: No Issues Detected
|
1 # from flask import Blueprint, jsonify, request
2 # from flask_login import login_required
3 # from app.models import Field, db
4
5 # field_routes = Blueprint('fields', __name__)
6
7
8 # @field_routes.route('/', methods=['POST'])
9 # def fields():
10 # if request.method == 'POST':
11 # # get fields data from request body
12 # data = request.get_json()
13 # form_fields = []
14
15 # for field_info in data:
16 # field = Field(
17 # type=field_info["type"],
18 # label=field_info["label"],
19 # max_length=field_info["max_length"],
20 # required=field_info["required"],
21 # placeholder=field_info["placeholder"],
22 # instructions=field_info["instructions"],
23 # choices=field_info["choices"],
24 # form_id=field_info["form_id"]
25 # )
26
27 # # db.session.add(field)
28 # form_fields.append(field)
29
30 # # adds each instance individually, so list format is ok
31 # db.session.add_all(form_fields)
32 # db.session.commit()
33
34 # # must return dictionary, tuple, or string
35 # return {"fields": [field.to_dict for field in form_fields]}
36
37
38 # @field_routes.route('/forms/<int:id>')
39 # def form_fields(id):
40 # fields = Field.query.filter_by(form_id=id).all()
41
42 # return {'fields': [field.to_dict for field in fields]}
| Clean Code: No Issues Detected
|
1 from app.models import db, User
2
3
4 # Adds a demo user, you can add other users here if you want
5 def seed_users():
6 demo = User(
7 username='Demo', email='demo@aa.io', password='password')
8 marnie = User(
9 username='marnie', email='marnie@aa.io', password='password')
10 bobbie = User(
11 username='bobbie', email='bobbie@aa.io', password='password')
12
13 db.session.add(demo)
14 db.session.add(marnie)
15 db.session.add(bobbie)
16
17 db.session.commit()
18
19
20 # Uses a raw SQL query to TRUNCATE the users table.
21 # SQLAlchemy doesn't have a built in function to do this
22 # TRUNCATE Removes all the data from the table, and RESET IDENTITY
23 # resets the auto incrementing primary key, CASCADE deletes any
24 # dependent entities
25 def undo_users():
26 db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')
27 db.session.commit()
| Clean Code: No Issues Detected
|
1 from .db import db
2
3
4 class Field(db.Model):
5 __tablename__ = 'fields'
6
7 id = db.Column(db.Integer, primary_key=True)
8 type = db.Column(db.String(255), nullable=False)
9 label = db.Column(db.String(55), nullable=False)
10 max_length = db.Column(db.Integer)
11 required = db.Column(db.Boolean, nullable=False)
12 placeholder = db.Column(db.String(255))
13 instructions = db.Column(db.String(255))
14 choices = db.Column(db.Text)
15 form_id = db.Column(db.Integer, db.ForeignKey("forms.id"))
16 # forms = db.relationship("Form", foreign_keys=form_id, lazy="joined") # redundant
17
18 def to_dict(self):
19 return {
20 'id': self.id,
21 'form_id': self.form_id,
22 'type': self.type,
23 'label': self.label,
24 'max_length': self.max_length,
25 'required': self.required,
26 'placeholder': self.placeholder,
27 'instructions': self.instructions,
28 # splits choices into a list, removes empty list entry at the end
29 'choices': self.choices[:-2].split('&&')
30 }
| 1 - error: relative-beyond-top-level
4 - refactor: too-few-public-methods
|
1 #!/usr/bin/env python
2
3 import os
4 import sys
5 import setuptools.command.egg_info as egg_info_cmd
6 import shutil
7
8 from setuptools import setup, find_packages
9
10 SETUP_DIR = os.path.dirname(__file__)
11 README = os.path.join(SETUP_DIR, 'README')
12
13 setup(name='cwltool_service',
14 version='2.0',
15 description='Common workflow language runner service',
16 long_description=open(README).read(),
17 author='Common workflow language working group',
18 author_email='common-workflow-language@googlegroups.com',
19 url="https://github.com/common-workflow-language/cwltool-service",
20 download_url="https://github.com/common-workflow-language/cwltool-service",
21 license='Apache 2.0',
22 py_modules=["cwltool_stream", "cwl_flask", "cwltool_client"],
23 install_requires=[
24 'Flask',
25 'requests',
26 'PyYAML'
27 ],
28 entry_points={
29 'console_scripts': [ "cwltool-stream=cwltool_stream:main",
30 "cwl-server=cwl_flask:main",
31 "cwl-client=cwl_client:main"]
32 },
33 zip_safe=True
34 )
| 16 - refactor: consider-using-with
16 - warning: unspecified-encoding
4 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
8 - warning: unused-import
|
1 from db import DatabaseController as DbC
2
3 def get_results():
4 all_results = DbC.get_admission_results(1)
5 return all_results
6
7 def calculate_results():
8 specializations = DbC.get_all_specializations()
9 candidates = DbC.get_all_candidates()
10 repartition = []
11 specs = {}
12 opt_arr = {}
13
14 for item in specializations:
15 specs[item.identifier] = {}
16 specs[item.identifier]["name"] = item.name
17 specs[item.identifier]["capacity"] = item.capacity
18 specs[item.identifier]["free_spots"] = item.capacity
19
20
21 for item in candidates:
22 r = DbC.AdmissionResult()
23 r.candidate_cnp = item.cnp
24 r.final_score = max(item.info_grade, item.math_grade)*0.3 + item.high_school_avg_grade*0.2 + 0.5*item.admission_grade
25 r.specialization_id = item.first_option
26 r.allocation = DbC.AdmissionStatus.UNPROCESSED
27 repartition.append(r)
28 opt_arr[str(item.cnp)] = {}
29 opt_arr[str(item.cnp)]["first_option"] = item.first_option
30 opt_arr[str(item.cnp)]["second_option"] = item.second_option
31
32 repartition = sorted(repartition, key = lambda x: (x.specialization_id, (-1)*x.final_score, ))
33
34 for item in repartition:
35 if item.final_score < 5:
36 item.allocation = DbC.AdmissionStatus.REJECTED
37 continue
38 if specs[item.specialization_id]["free_spots"] > 2:
39 item.allocation = DbC.AdmissionStatus.FREE
40 specs[item.specialization_id]["free_spots"] -= 1
41 elif specs[item.specialization_id]["free_spots"] > 0:
42 item.allocation = DbC.AdmissionStatus.FEE
43 specs[item.specialization_id]["free_spots"] -= 1
44 else:
45 item.specialization_id = opt_arr[str(item.candidate_cnp)]["second_option"]
46 if specs[item.specialization_id]["free_spots"] > 2:
47 item.allocation = DbC.AdmissionStatus.FREE
48 specs[item.specialization_id]["free_spots"] -= 1
49 elif specs[item.specialization_id]["free_spots"] > 0:
50 item.allocation = DbC.AdmissionStatus.FEE
51 specs[item.specialization_id]["free_spots"] -= 1
52 else:
53 item.allocation = DbC.AdmissionStatus.REJECTED
54 # print("Candidate CNP: ", item.candidate_cnp)
55 # print("Admission Grade: ", item.final_score)
56 # print("AdmissionResult: ", item.allocation)
57 # print("Specialization: ", specs[item.specialization_id]["name"])
58 # print("Specialization ID: ", item.specialization_id)
59 return repartition
60
61 def set_results():
62 results = calculate_results()
63
64 for item in results:
65 if DbC.save_admission_result_for_candidate(item) != "OK":
66 raise "Error in repartition processing!"
67
68 print("Repartition completed successfully.")
69
70 # set_results()
| 4 - warning: bad-indentation
5 - warning: bad-indentation
8 - warning: bad-indentation
9 - warning: bad-indentation
10 - warning: bad-indentation
11 - warning: bad-indentation
12 - warning: bad-indentation
14 - warning: bad-indentation
15 - warning: bad-indentation
16 - warning: bad-indentation
17 - warning: bad-indentation
18 - warning: bad-indentation
21 - warning: bad-indentation
22 - warning: bad-indentation
23 - warning: bad-indentation
24 - warning: bad-indentation
25 - warning: bad-indentation
26 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
32 - warning: bad-indentation
34 - warning: bad-indentation
35 - warning: bad-indentation
36 - warning: bad-indentation
37 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
41 - warning: bad-indentation
42 - warning: bad-indentation
43 - warning: bad-indentation
44 - warning: bad-indentation
45 - warning: bad-indentation
46 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
49 - warning: bad-indentation
50 - warning: bad-indentation
51 - warning: bad-indentation
52 - warning: bad-indentation
53 - warning: bad-indentation
59 - warning: bad-indentation
62 - warning: bad-indentation
64 - warning: bad-indentation
65 - warning: bad-indentation
66 - warning: bad-indentation
68 - warning: bad-indentation
66 - error: raising-bad-type
|
1 import imageio
2 import pytest
3 import sys, os
4 sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
5 import src
6
7 def test_get_digital_goban_state():
8 rgb_pix = imageio.imread('images/digital_goban.png')
9
10 # Process KGS goban grayscale and find the stones
11 assert src.get_digital_goban_state(rgb_pix) == \
12 set([(1,1,1), (1, 1, 14), (2,19,19)])
| 2 - warning: unused-import
|
1 import sys, os
2 sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
3 from pynput.mouse import Button, Controller
4 import pytest
5 import imageio
6 import src
7
8
9 # Write a test of play_handsfree_GO.py using already existing frames
10 img_name = []
11 folder_name = 'images/sample_game_log/ex1/'
12 # empty board for outer board boundary detection
13 img_name.append(folder_name + 'opencv_frame_1.png')
14 UL_outer_x, UL_outer_y = 376.27419354838713, 91.34516129032261
15 UR_outer_x, UR_outer_y = 962.08064516129020, 101.66774193548395
16 BL_outer_x, BL_outer_y = 120.79032258064518, 641.0225806451613
17 BR_outer_x, BR_outer_y = 1265.3064516129032, 652.6354838709677
18 # black stones on corners and a white stone at center
19 img_name.append(folder_name + 'opencv_frame_3.png')
20 # white stones on corners and a black stone at center
21 img_name.append(folder_name + 'opencv_frame_4.png')
22 # verifying calibration
23 img_name.append(folder_name + 'opencv_frame_b_1_1.png') # black at (1,1)
24 img_name.append(folder_name + 'opencv_frame_b_1_19.png') # black at (1,19)
25 img_name.append(folder_name + 'opencv_frame_b_19_19.png') # black at (19,19)
26 img_name.append(folder_name + 'opencv_frame_b_19_1.png') # black at (19,1)
27 img_name.append(folder_name + 'opencv_frame_b_10_10.png') # black at (10,10)
28 img_name.append(folder_name + 'opencv_frame_b_4_4.png') # black at (4,4)
29 img_name.append(folder_name + 'opencv_frame_b_4_10.png') # black at (4,10)
30 img_name.append(folder_name + 'opencv_frame_b_4_16.png') # black at (4,16)
31 img_name.append(folder_name + 'opencv_frame_b_16_16.png') # black at (16,16)
32 img_name.append(folder_name + 'opencv_frame_w_1_1.png') # white at (1,1)
33 img_name.append(folder_name + 'opencv_frame_w_10_10.png') # white at (10,10)
34 img_name.append(folder_name + 'opencv_frame_w_16_16.png') # white at (16,16)
35 img_name.append(folder_name + 'opencv_frame_w_19_19.png') # white at (19,19)
36 #opencv_frame_b_10_4.png
37 #opencv_frame_b_10_16.png
38 #opencv_frame_b_16_4.png
39 #opencv_frame_b_16_10.png
40 #opencv_frame_b_19_1.png
41 #opencv_frame_w_1_19.png
42 #opencv_frame_w_4_4.png
43 #opencv_frame_w_4_10.png
44 #opencv_frame_w_4_16.png
45 #opencv_frame_w_10_16.png
46 #opencv_frame_w_16_4.png
47 #opencv_frame_w_16_10.png
48 #opencv_frame_w_19_1.png
49
50 def test_play_handsfree_GO():
51 ps = False
52 # STEP 0 - EMPTY GOBAN
53 # Get outer boundaries of pyhsical goban -- skipped for speed
54 ob = [UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
55 BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y]
56 # Remove non-goban part from the RGB matrix and make it a square matrix
57 # Find the indices of board points in the new square RGB matrix
58 #UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
59 # BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y = \
60 # src.get_pyhsical_board_outer_corners(img_name[0])
61 rgb = imageio.imread(img_name[0])
62 rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
63 x_idx, y_idx = src.find_board_points(rgb, plot_stuff=ps)
64
65 # STEP 1 - GOBAN WITH BLACK STONES ON CORNERS AND A WHITE STONE AT CENTER
66 rgb = imageio.imread(img_name[1])
67 bxy, wxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]
68 rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
69 red_scale_th1, blue_scale_th1 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
70 _, _ = src.mark_stones(rgb, x_idx, y_idx, \
71 red_scale_th1, blue_scale_th1, plot_stuff=ps)
72
73 # STEP 2 - GOBAN WITH WHITE STONES ON CORNERS AND A BLACK STONE AT CENTER
74 rgb = imageio.imread(img_name[2])
75 wxy, bxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]
76 rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
77 red_scale_th2, blue_scale_th2 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
78 _, _ = src.mark_stones(rgb, x_idx, y_idx, \
79 red_scale_th2, blue_scale_th2, plot_stuff=ps)
80
81 red_scale_th = 0.5 * (red_scale_th1 + red_scale_th2)
82 blue_scale_th = 0.5 * (blue_scale_th1 + blue_scale_th2)
83
84 # STEP 3 - VERIFY CALIBRATION
85 verify_calibration_for_test_purposes(img_name[3], ob, x_idx, y_idx, \
86 red_scale_th, blue_scale_th, 'black', 1, 1, ps)
87 verify_calibration_for_test_purposes(img_name[4], ob, x_idx, y_idx, \
88 red_scale_th, blue_scale_th, 'black', 1, 19, ps)
89 verify_calibration_for_test_purposes(img_name[5], ob, x_idx, y_idx, \
90 red_scale_th, blue_scale_th, 'black', 19, 19, ps)
91 verify_calibration_for_test_purposes(img_name[6], ob, x_idx, y_idx, \
92 red_scale_th, blue_scale_th, 'black', 19, 1, ps)
93 verify_calibration_for_test_purposes(img_name[7], ob, x_idx, y_idx, \
94 red_scale_th, blue_scale_th, 'black', 10, 10, ps)
95 verify_calibration_for_test_purposes(img_name[8], ob, x_idx, y_idx, \
96 red_scale_th, blue_scale_th, 'black', 4, 4, ps)
97 verify_calibration_for_test_purposes(img_name[9], ob, x_idx, y_idx, \
98 red_scale_th, blue_scale_th, 'black', 4, 10, ps)
99 verify_calibration_for_test_purposes(img_name[10], ob, x_idx, y_idx, \
100 red_scale_th, blue_scale_th, 'black', 4, 16, ps)
101 verify_calibration_for_test_purposes(img_name[11], ob, x_idx, y_idx, \
102 red_scale_th, blue_scale_th, 'black', 16, 16, ps)
103 verify_calibration_for_test_purposes(img_name[12], ob, x_idx, y_idx, \
104 red_scale_th, blue_scale_th, 'white', 1, 1, ps)
105 verify_calibration_for_test_purposes(img_name[13], ob, x_idx, y_idx, \
106 red_scale_th, blue_scale_th, 'white', 10, 10, ps)
107 verify_calibration_for_test_purposes(img_name[14], ob, x_idx, y_idx, \
108 red_scale_th, blue_scale_th, 'white', 16, 16, ps)
109 verify_calibration_for_test_purposes(img_name[15], ob, x_idx, y_idx, \
110 red_scale_th, blue_scale_th, 'white', 19, 19, ps)
111
112 # DIGITAL BOARD DETECTION
113
114 # Ask the user to open a KGS board
115 print('\n OPEN A KGS BOARD/GAME NOW')
116 input('ENTER when the digital board is open: ')
117
118 # Get the user to click on come corners to get to know the digital board
119 UL_x, UL_y, goban_step = src.get_goban_corners()
120
121 # START REPLAYING PYHSICAL BOARD MOVES ON THE DIGITAL BOARD
122 mouse = Controller() # obtain mouse controller
123 print("Placing a black stone at (10,10)")
124 bxy, wxy = [], [] # empty board in the beginning
125 color, i, j = src.scan_next_move(img_name[7], ob, x_idx, y_idx, \
126 red_scale_th, blue_scale_th, bxy, wxy, plot_stuff=ps)
127 _, _ = src.play_next_move_on_digital_board(mouse, color, i, j, bxy, wxy, \
128 UL_x, UL_y, goban_step)
129
130
131 def verify_calibration_for_test_purposes(img, ob, x, y, r, b, c, i, j, ps):
132 rgb = imageio.imread(img)
133 rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
134 print(f"Verifying a {c} stone at {src.convert_physical_board_ij_to_str(i,j)}...")
135 assert src.is_this_stone_on_the_board(rgb, x, y, r, b, c, i, j, ps)
| 50 - refactor: too-many-locals
131 - refactor: too-many-arguments
131 - refactor: too-many-positional-arguments
3 - warning: unused-import
4 - warning: unused-import
|
1 import src
2 import time
3
4 UL_x, UL_y, goban_step = src.get_goban_corners()
5
6 prev_stone_set = set()
7 print("Started scanning the board for moves every 5 seconds...")
8 while True:
9 # wait between screenshots
10 time.sleep(5)
11 # get board screenshot
12 board_rgb_screenshot = src.KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step)
13 # find the stones on the board
14 current_stone_set = src.get_goban_state(board_rgb_screenshot)
15 # is there a new stone on the board?
16 if current_stone_set > prev_stone_set:
17 # find the new stone
18 stone = current_stone_set - prev_stone_set
19 # IN THE FUTURE, ALLOW FOR OPPONENT TO MAKE A QUICK MOVE!!!
20 assert len(stone) == 1
21 # say the new moves on the board
22 player = list(stone)[0][0] # 1-black, 2-white
23 i, j = list(stone)[0][1], list(stone)[0][2]
24 pos = src.int_coords_to_str(i,j)
25 if player==1:
26 update_msg = "Black played at " + pos
27 elif player==2:
28 update_msg = "White played at " + pos
29 print(update_msg)
30 prev_stone_set = current_stone_set
31 else:
32 print("No moves made!")
| 29 - error: possibly-used-before-assignment
|
1 import matplotlib.pyplot as plt
2 import imageio
3 import numpy as np
4 import src
5
6 IMG_PATH = 'images/pyshical_goban_pic1.png'
7 #IMG_PATH = 'images/pyshical_goban_pic2.png'
8 #IMG_PATH = 'images/pyshical_goban_pic3.png'
9 UL_outer_x, UL_outer_y = 315, 24
10 UR_outer_x, UR_outer_y = 999, 40
11 BL_outer_x, BL_outer_y = 3, 585
12 BR_outer_x, BR_outer_y = 1273, 621
13 #IMG_PATH = 'images/pyshical_goban_pic4.png'
14 #UL_outer_x, UL_outer_y = 321, 235
15 #UR_outer_x, UR_outer_y = 793, 244
16 #BL_outer_x, BL_outer_y = 92, 603
17 #BR_outer_x, BR_outer_y = 933, 608
18
19 # Get RGB matrix of the picture with goban
20 rgb = imageio.imread(IMG_PATH)
21 plt.imshow(rgb)
22 plt.show()
23
24 # Remove non-goban part from the RGB matrix and make it a square matrix
25 rgb = src.rescale_pyhsical_goban_rgb(rgb, \
26 UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
27 BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y)
28
29 # Find the indices of board points in the new square RGB matrix
30 x_idx, y_idx = src.find_board_points(rgb, plot_stuff=True)
31
32 bxy, wxy = [(4,4), (16,4)], [(4,16),(16,16)]
33
34 src.mark_board_points(rgb, x_idx, y_idx, bxy, wxy)
35
36 red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
37
38 bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th)
39
40 src.is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \
41 'black', 16,4)
| 3 - warning: unused-import
|
1 from pynput.mouse import Button, Controller
2 import src
3 import time
4
5 def get_goban_corners():
6 # Obtain mouse controller
7 mouse = Controller()
8
9 # Ask the user to define goban corners
10 print('Move cursor to upper-left (A19) corner of Goban and keep it there five seconds')
11 time.sleep(5)
12 (UL_x, UL_y) = mouse.position
13 print(f"Upper-Left: ({UL_x},{UL_y})")
14 print()
15
16 print('Move cursor to bottom-right (T1) corner of Goban and keep it there five seconds')
17 time.sleep(5)
18 (BR_x, BR_y) = mouse.position
19 print(f"Bottom-Right: ({BR_x},{BR_y})")
20 print()
21
22 # Compute goban step sizes
23 goban_step = 0.5 * (BR_x - UL_x) * 1/18 + 0.5 * (BR_y - UL_y) * 1/18
24 print(f"Goban-steps is {goban_step}")
25
26 return UL_x, UL_y, goban_step
27
28 def make_the_move(mouse, x, y, no_click=False):
29 (cx, cy) = mouse.position
30 time.sleep(0.5)
31 mouse.move(x - cx, y - cy)
32 time.sleep(0.2)
33 if not no_click:
34 mouse.click(Button.left, 1)
35
36 def int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step):
37 x = UL_x + (i-1) * goban_step
38 y = UL_y + (j-1) * goban_step
39 return x, y
40
41 def str_to_integer_coordinates(str):
42 # Upper-lef corner is 1,1 and Bottom-right corner is 19,19
43 # Goban boards skip the letter I
44 j = 19 - int(str[1:3]) + 1
45 if ord(str[0]) < ord('I'):
46 i = ord(str[0]) - ord('A') + 1
47 else:
48 i = ord(str[0]) - ord('A')
49 return i,j
50
51 def int_coords_to_str(i, j):
52 # Upper-lef corner is 1,1 and Bottom-right corner is 19,19
53 # Goban boards skip the letter I
54 if i <= ord('I') - ord('A'):
55 return chr(ord('A') + i-1) + f"{20-j}"
56 else:
57 return chr(ord('A') + i) + f"{20-j}"
| 41 - warning: redefined-builtin
54 - refactor: no-else-return
2 - warning: unused-import
|
1 import pyscreeze
2 import numpy as np
3 import matplotlib.pyplot as plt
4 import src
5
6 def get_digital_goban_state(rgb_pix, plot_stuff=False):
7 # RGB of Black = [ 0, 0, 0]
8 # RGB of White = [255, 255, 255]
9 # RGB of Orange = [255, 160, 16]
10 # Use red scale to find out black stones, blue scale to find out white stones
11 # (1, 1, 1) - Black A1 (upper corner)
12 # (2, 19, 19) - White T10 (lower corner)
13 idx = np.arange(19)+1
14
15 m, n, z = rgb_pix.shape
16 assert m == n
17
18 # Approximate diameter of a stone in terms of pixels
19 stone_diam = n/19
20
21 # Calculate pixels where stone centers will be positioned
22 stone_centers = np.round(stone_diam*idx) - 0.5 * np.round(stone_diam) - 1
23 stone_centers = stone_centers.astype(int)
24
25 # For every stone center, we will check a square matrix centered around
26 # the stone center and find the average color. If it is black, then the
27 # stone is black, if it is white, then the stone is white, otherwise no stone
28 square_length_in_a_stone = int(np.round((n/19) / np.sqrt(2)))
29 if square_length_in_a_stone % 2 == 0:
30 d = square_length_in_a_stone / 2
31 else:
32 d = (square_length_in_a_stone-1) / 2
33 d = int(d-1) # just in case, make square smaller and integer
34
35 # Calculate the mean of a small matrix around every board point to find out
36 # if there is a black stone or white stone or nothing
37 stones = set()
38 for posi, i in enumerate(stone_centers, start=1):
39 for posj, j in enumerate(stone_centers, start=1):
40 # Find black stones
41 mat = rgb_pix[:,:,0]
42 color = np.mean(np.mean(mat[i:i+d+1, j:j+d+1]))
43 if color < 125:
44 stones.add((1, posj, posi)) # black stone
45 rgb_pix[i-d+1:i+d, j-d+1:j+d, :] = 0
46
47 # Find white stones
48 mat = rgb_pix[:,:,2]
49 color = np.mean(np.mean(mat[i:i+d+1, j:j+d+1]))
50 if color > 125:
51 stones.add((2, posj, posi)) # white stone
52 rgb_pix[i-d+1:i+d, j-d+1:j+d] = 255
53
54 # Plot for debugging
55 if plot_stuff:
56 plt.imshow(rgb_pix)
57 plt.show()
58
59 return stones
60
61 def KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step):
62 UL_outer_x = UL_x - 0.5*goban_step
63 UL_outer_y = UL_y - 0.5*goban_step
64 BR_outer_x = UL_x + 18*goban_step + 0.5*goban_step
65 BR_outer_y = UL_y + 18*goban_step + 0.5*goban_step
66 im = pyscreeze.screenshot(region=(UL_outer_x, UL_outer_y, \
67 BR_outer_x-UL_outer_x, BR_outer_y-UL_outer_y))
68
69 pix = np.array(im)
70 rgb_pix = pix[...,:3]
71
72 return rgb_pix
| 6 - refactor: too-many-locals
15 - warning: unused-variable
4 - warning: unused-import
|
1 import pytest
2 import imageio
3 import sys, os
4 sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
5 import src
6
7 # stones - upper-left corner is (1,1), lower-left corner is (19,1)
8 IMG_PATH = ['images/pyshical_goban_pic1.png', 'images/pyshical_goban_pic2.png', \
9 'images/pyshical_goban_pic3.png', 'images/pyshical_goban_pic4.png', \
10 'images/pyshical_goban_pic5.png']
11 bxy0, wxy0 = [(4,4), (16,4)], [(4,16),(16,16)]
12 bxy1, wxy1 = [(1,9), (16,8)], [(10,1),(13,19)]
13 bxy2, wxy2 = [(1,19), (17,3)], [(1,3),(19,19)]
14 bxy3, wxy3 = [(1,19), (19,1), (5,4), (6,16), (12,8), (14,6), (16,10), (19,13)], \
15 [(1,1), (4,10), (7,7), (10,4), (10,10), (12,11), (15,7), (19,19)]
16 bxy4, wxy4 = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]
17 UL_outer_x0, UL_outer_y0 = 315, 24
18 UR_outer_x0, UR_outer_y0 = 999, 40
19 BL_outer_x0, BL_outer_y0 = 3, 585
20 BR_outer_x0, BR_outer_y0 = 1273, 621
21
22 UL_outer_x3, UL_outer_y3 = 321, 235
23 UR_outer_x3, UR_outer_y3 = 793, 244
24 BL_outer_x3, BL_outer_y3 = 92, 603
25 BR_outer_x3, BR_outer_y3 = 933, 608
26
27 UL_outer_x4, UL_outer_y4 = 414, 256
28 UR_outer_x4, UR_outer_y4 = 962, 269
29 BL_outer_x4, BL_outer_y4 = 217, 659
30 BR_outer_x4, BR_outer_y4 = 1211, 679
31
32 @pytest.mark.skip
33 def test_board_outer_corner():
34 UL_outer_x0_click, UL_outer_y0_click, _, _, _, _, _, _ = \
35 src.get_pyhsical_board_outer_corners(IMG_PATH[0])
36 assert abs(UL_outer_x0_click - UL_outer_x0) < 5 # five pixels
37 assert abs(UL_outer_y0_click - UL_outer_y0) < 5
38
39 def test_board_state_detection_from_camera_picture():
40 assert_board_state(IMG_PATH[4], bxy4, wxy4, 'black', bxy4[0], \
41 UL_outer_x4, UL_outer_y4, UR_outer_x4, UR_outer_y4, \
42 BL_outer_x4, BL_outer_y4, BR_outer_x4, BR_outer_y4, \
43 plot_stuff=False)
44 assert_board_state(IMG_PATH[0], bxy0, wxy0, 'black', bxy0[1], \
45 UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \
46 BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0)
47 assert_board_state(IMG_PATH[1], bxy1, wxy1, 'white', wxy1[0], \
48 UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \
49 BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0, \
50 plot_stuff=True)
51 assert_board_state(IMG_PATH[2], bxy2, wxy2, 'black', bxy2[0], \
52 UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \
53 BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0)
54 assert_board_state(IMG_PATH[3], bxy3, wxy3, 'white', wxy3[6], \
55 UL_outer_x3, UL_outer_y3, UR_outer_x3, UR_outer_y3, \
56 BL_outer_x3, BL_outer_y3, BR_outer_x3, BR_outer_y3)
57
58 def assert_board_state(IMG_PATH, bxy, wxy, color, ij_pair, \
59 UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
60 BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y, \
61 plot_stuff=False):
62 # Get RGB matrix of the picture with goban
63 rgb = imageio.imread(IMG_PATH)
64
65 # Remove non-goban part from the RGB matrix and make it a square matrix
66 rgb = src.rescale_pyhsical_goban_rgb(rgb, \
67 UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
68 BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y)
69
70 # Find the indices of board points in the new square RGB matrix
71 x_idx, y_idx = src.find_board_points(rgb, plot_stuff=plot_stuff)
72
73 # Find color thresholds for stone detection
74 red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
75
76 # Refind stones using the above thresholds
77 bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \
78 red_scale_th, blue_scale_th, plot_stuff=plot_stuff)
79
80 assert set(bxy) == set(bxy_new)
81 assert set(wxy) == set(wxy_new)
82 assert src.is_this_stone_on_the_board(rgb, x_idx, y_idx, \
83 red_scale_th, blue_scale_th, color, ij_pair[0], ij_pair[1], \
84 plot_stuff=True)
| 58 - refactor: too-many-arguments
58 - refactor: too-many-positional-arguments
58 - refactor: too-many-locals
58 - warning: redefined-outer-name
|
1 import matplotlib.pyplot as plt
2 import imageio
3 import numpy as np
4 import src
5
6 IMG_PATH = 'images/empty_pyshical_goban1.png'
7
8 board_corners = []
9 def onclick(event):
10 print(event.xdata, event.ydata)
11 board_corners.append((event.xdata, event.ydata))
12
13 # Get RGB matrix of the picture with goban
14 rgb = imageio.imread(IMG_PATH)
15 fig = plt.figure()
16 plt.imshow(rgb)
17 plt.title("Please click on UL-UR-BL-BR corners...")
18 fig.canvas.mpl_connect('button_press_event', onclick)
19 plt.show()
20
21 UL_outer_x, UL_outer_y = board_corners[0]
22 UR_outer_x, UR_outer_y = board_corners[1]
23 BL_outer_x, BL_outer_y = board_corners[2]
24 BR_outer_x, BR_outer_y = board_corners[3]
25
26 # Remove non-goban part from the RGB matrix and make it a square matrix
27 rgb = src.rescale_pyhsical_goban_rgb(rgb, \
28 UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
29 BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y)
30
31 # Find the indices of board points in the new square RGB matrix
32 x_idx, y_idx = src.find_board_points(rgb, plot_stuff=True)
33
34 # Mark board points
35 src.mark_board_points(rgb, x_idx, y_idx)
36
37 #bxy, wxy = [(4,4), (16,4)], [(4,16),(16,16)]
38
39 #src.mark_board_points(rgb, x_idx, y_idx, bxy, wxy)
40
41 #red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
42 #bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th)
43
44 #src.is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \
45 # 'black', 16,4)
| 3 - warning: unused-import
|
1 import matplotlib.pyplot as plt
2
3 xy=[]
4
5 def onclick(event):
6 print(event.xdata, event.ydata)
7 xy.append((event.xdata, event.ydata))
8
9 fig = plt.figure()
10 plt.plot(range(10))
11 fig.canvas.mpl_connect('button_press_event', onclick)
12 plt.show()
13
14 print(xy)
| Clean Code: No Issues Detected
|
1 from setuptools import setup, find_packages
2 setup( name='Handsfree-KGS',
3 version='0.0',
4 description='Pay Handsfree Go on KGS',
5 author='Fatih Olmez',
6 author_email='folmez@gmail.com',
7 packages=find_packages())
| Clean Code: No Issues Detected
|
1 from .mouse_actions import get_goban_corners, str_to_integer_coordinates
2 from .mouse_actions import int_coords_to_screen_coordinates, make_the_move
3 from .mouse_actions import int_coords_to_str
4 from .screenshot_actions import KGS_goban_rgb_screenshot, get_digital_goban_state
5 from .picture_actions import plot_goban_rgb, average_RGB, make_indices_agree
6 from .picture_actions import return_int_pnts, subtract_rolling_sum
7 from .picture_actions import rolling_sum, find_custom_local_minima
8 from .picture_actions import mark_board_points, is_this_stone_on_the_board
9 from .picture_actions import mark_stones, calibrate
10 from .picture_actions import find_board_points, rescale_pyhsical_goban_rgb
11 from .picture_actions import get_pyhsical_board_outer_corners
12 from .picture_actions import convert_physical_board_ij_to_str
13 from .picture_actions import play_next_move_on_digital_board, scan_next_move
| 1 - error: relative-beyond-top-level
2 - error: relative-beyond-top-level
3 - error: relative-beyond-top-level
4 - error: relative-beyond-top-level
5 - error: relative-beyond-top-level
6 - error: relative-beyond-top-level
7 - error: relative-beyond-top-level
8 - error: relative-beyond-top-level
9 - error: relative-beyond-top-level
10 - error: relative-beyond-top-level
11 - error: relative-beyond-top-level
12 - error: relative-beyond-top-level
13 - error: relative-beyond-top-level
1 - warning: unused-import
1 - warning: unused-import
2 - warning: unused-import
2 - warning: unused-import
3 - warning: unused-import
4 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
5 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
6 - warning: unused-import
7 - warning: unused-import
7 - warning: unused-import
8 - warning: unused-import
8 - warning: unused-import
9 - warning: unused-import
9 - warning: unused-import
10 - warning: unused-import
10 - warning: unused-import
11 - warning: unused-import
12 - warning: unused-import
13 - warning: unused-import
13 - warning: unused-import
|
1 import imageio
2
3 def get_pyhsical_goban_state(rgb_pix):
4 pass
5
6 def picture_to_rgb(path):
7 return misc.imageio(path)
| 3 - warning: unused-argument
7 - error: undefined-variable
1 - warning: unused-import
|
1 from pynput.mouse import Button, Controller
2 import time
3 import sys
4 import os
5 import pytest
6
7 sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
8 import src
9
10 def test_str_to_integer_coordinates():
11 assert src.str_to_integer_coordinates('A19') == (1, 1)
12 assert src.str_to_integer_coordinates('D16') == (4, 4)
13 assert src.str_to_integer_coordinates('D10') == (4, 10)
14 assert src.str_to_integer_coordinates('T1') == (19, 19)
15 assert src.str_to_integer_coordinates('K10') == (10, 10)
16
17 def test_integer_coordinates_to_str():
18 assert src.int_coords_to_str(1, 1) == 'A19'
19 assert src.int_coords_to_str(4, 4) == 'D16'
20 assert src.int_coords_to_str(4, 10) == 'D10'
21 assert src.int_coords_to_str(19, 19) == 'T1'
22 assert src.int_coords_to_str(10, 10) == 'K10'
23
24 @pytest.mark.slow
25 def test_place_stones_on_all_stars():
26 print()
27 # Get goban corners
28 UL_x, UL_y, goban_step = src.get_goban_corners()
29
30 # Obtain mouse controller
31 mouse = Controller()
32
33 # Place stones on stars
34 print('\n', 41*'-')
35 print(5*'-', 'Placing stones on all stars', 5*'-')
36 print(41*'-', '\n')
37 for str in ['D16', 'K16', 'Q16', 'D10', 'K10', 'Q10', 'D4', 'K4', 'Q4']:
38 i, j = src.str_to_integer_coordinates(str)
39 x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step)
40 src.make_the_move(mouse, x, y)
41
42 # Get KGS goban as a square grayscale
43 rgb_pix = src.KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step)
| 37 - warning: redefined-builtin
43 - warning: unused-variable
1 - warning: unused-import
2 - warning: unused-import
|
1 import numpy as np
2 import pandas as pd
3 import os
4 import win32com.client as win32
5 import datetime
6
7 path = r"D:/python/EventDriven/result/"
8 date_format = "%Y-%m-%d"
9
10 def formatDate(date, fm=date_format):
11 return date.strftime(fm)
12
13 def moveDate(date, dayDelta=0, hourDelta=0):
14 if type(date) == str:
15 return datetime.datetime.strptime(date, date_format) + datetime.timedelta(days=dayDelta) + datetime.timedelta(hours=hourDelta)
16 else:
17 return date + datetime.timedelta(days=dayDelta) + + datetime.timedelta(hours=hourDelta)
18
19 def email(to, sub, HTMLBody, attachmentURLList):
20 outlook = win32.Dispatch('outlook.application')
21 mail = outlook.CreateItem(0)
22 mail.To = to
23 mail.Subject = sub
24 mail.HTMLBody = HTMLBody
25 for url in attachmentURLList:
26 if os.path.exists(url): mail.Attachments.Add(url)
27 mail.Send()
28
29 def main():
30 today = datetime.datetime.now()
31 url1 = path + formatDate(today) + "_HK.csv"
32 url2 = path + formatDate(today) + "_AX.csv"
33 url3 = path + formatDate(today) + "_SI.csv"
34 email("isaac.law@rbccm.com", "_News_", "", [url1, url2, url3])
35
36 if __name__=="__main__":
37 main()
| 14 - refactor: no-else-return
1 - warning: unused-import
2 - warning: unused-import
|
1 import eikon as ek
2 import numpy as np
3 import pandas as pd
4 import os
5 import zipfile
6 import datetime
7 import cufflinks as cf
8 import configparser as cp
9
10 ek.set_app_key('e4ae85e1e08b47ceaa1ee066af96cabe6e56562a')
11
12 dataRootPath = r"D:/Eikon_Data/"
13 dataRootPathNews = r"D:/Eikon_Data/News/"
14 dataRootPathMarketData = r"D:/Eikon_Data/Market_Data/"
15 databasePath = r"D:/Database/"
16 zipFolderPath = r"D:/Zip_Folder/"
17 date_format = "%Y-%m-%d"
18
19 def checkFolderExist(path):
20 return os.path.isdir(path)
21
22 def checkFileExist(path):
23 return os.path.isfile(path)
24
25 def createFolder(rootPath, folderName):
26 if rootPath[-1] == "/":
27 myRootPath = rootPath[:-1]
28 else:
29 myRootPath = rootPath
30 if not checkFolderExist(myRootPath+"/"+folderName):
31 os.mkdir(myRootPath+"/"+folderName)
32 return True
33 else:
34 return "Folder already exist"
35
36 def formatDate(date, fm=date_format):
37 return date.strftime(fm)
38
39 def moveDate(date, dayDelta=0, hourDelta=0):
40 if type(date) == str:
41 return datetime.datetime.strptime(date, date_format) + datetime.timedelta(days=dayDelta) + datetime.timedelta(hours=hourDelta)
42 else:
43 return date + datetime.timedelta(days=dayDelta) + + datetime.timedelta(hours=hourDelta)
44
45 def zipdir(path, ziph):
46 # ziph is zipfile handle
47 for root, dirs, files in os.walk(path):
48 for file in files:
49 ziph.write(os.path.join(root, file))
50
51 def iaZipFolder(path):
52 if path[-1] == '/':
53 zipFileName = path.split("/")[-2] + "_zip.zip"
54 else:
55 zipFileName = path.split("/")[-1] + "_zip.zip"
56
57 if checkFileExist(zipFolderPath + zipFileName): os.remove(zipFolderPath + zipFileName)
58 zipf = zipfile.ZipFile(zipFolderPath + zipFileName, 'w', zipfile.ZIP_DEFLATED)
59 zipdir(path, zipf)
60 zipf.close()
61
62 def downloadNews(undlName, date, savePath):
63 if not checkFolderExist(savePath + formatDate(date)):
64 createFolder(savePath, formatDate(date))
65
66 # download data
67 df = ek.get_news_headlines("R:"+undlName+" and english",
68 date_from=formatDate(moveDate(date,-1)) + "T16:00:00",
69 date_to=formatDate(moveDate(date)) + "T16:00:00",
70 count=100)
71
72 # move date back to HK time
73 df.index = moveDate(np.array(list(df.index)),0,8)
74 df.versionCreated = moveDate(np.array(list(df.versionCreated)),0,8)
75 # save data
76 df.to_csv(savePath + formatDate(date) + "/" + undlName + "_headlines.csv")
77
78 def downloadHistoricalNews(undlName, dateFrom, dateTo, savePath):
79 if type(dateFrom) == str:
80 myDateFrom = datetime.datetime.strptime(dateFrom, date_format)
81 else:
82 myDateFrom = dateFrom
83
84 if type(dateTo) == str:
85 myDateTo = datetime.datetime.strptime(dateTo, date_format)
86 else:
87 myDateTo = dateTo
88
89 dateRef = myDateFrom
90 while dateRef <= myDateTo:
91 print("Download", undlName, dateRef)
92 downloadNews(undlName, dateRef, savePath)
93 dateRef = moveDate(dateRef, 1)
94
95 def downloadMarketData(undlName, date, savePath):
96
97 # download data
98 try:
99 df_new = ek.get_timeseries(undlName, fields=["CLOSE", "HIGH", "LOW", "OPEN", "VOLUME"],
100 start_date=formatDate(date), end_date=formatDate(date), interval="daily", corax="adjusted")
101 except:
102 df_new = []
103
104 if type(df_new) == pd.core.frame.DataFrame:
105 myUndlName = undlName.split('.')[0] + '_' + undlName.split('.')[1]
106 df_new.index = pd.Series(df_new.index).apply(formatDate)
107 if checkFileExist(savePath + myUndlName + ".csv"):
108 df = pd.read_csv(savePath + myUndlName + ".csv")
109 df = df.set_index("Date")
110 if df_new.index[0] not in list(df.index):
111 df = pd.concat([df, df_new], axis=0)
112 df.to_csv(savePath + myUndlName + ".csv")
113 else:
114 df_new.to_csv(savePath + myUndlName + ".csv")
115
116 def downloadHistoricalMarketData(undlName, dateFrom, dateTo, savePath):
117
118 # download data
119 df = ek.get_timeseries(undlName, fields=["CLOSE", "HIGH", "LOW", "OPEN", "VOLUME"],
120 start_date=dateFrom, end_date=dateTo, interval="daily", corax="adjusted")
121 df.index = pd.Series(df.index).apply(formatDate)
122 myUndlName = undlName.split('.')[0] + '_' + undlName.split('.')[1]
123 df.to_csv(savePath + myUndlName + ".csv")
124
125 def main():
126 today = datetime.datetime.now()
127 df = pd.read_csv(r'D:/Database/Underlying_Database/undlNameList.csv')
128 undlNameList = list(df.undlName.values)
129
130 # download News Headlines
131 for undlName in undlNameList:
132 print("Download", undlName, today)
133 downloadNews(undlName, today, dataRootPathNews)
134
135 if __name__=="__main__":
136 main()
| 30 - refactor: no-else-return
40 - refactor: no-else-return
47 - warning: unused-variable
58 - refactor: consider-using-with
101 - warning: bare-except
7 - warning: unused-import
8 - warning: unused-import
|
1 import time
2 import RPi.GPIO as GPIO
3 import Adafruit_ADS1x15
4
5 THERMISTORVALUE 100000
6 SERIESRESISTOR 100000 #series resistor to thermistor
7 BCOEFFICIENT 4072
8
9 thermistorR2Temp = {3.2575:0, 2.5348:5, 1.9876:10, 1.5699:15, 1.2488:20, 1.0000:25, 0.80594:30, 0.65355:35, 0.53312:40, 0.43735:45, 0.36074:50, 0.29911:55, 0.24925:60, 0.20872:65, 0.17558:70, 0.14837:75, 0.12592:80, 0.10731:85, 0.091816:90, 0.078862:95, 0.067988:100, 0.058824:105, 0.051071:110}
10
11 GPIO.setmode(GPIO.BOARD) #pin numbering scheme uses board header pins
12 GPIO.setup(19,GPIO.out) #pin 19, GPIO12 output
13 GPIO.setup(26,GPIO.out) #pin 26, GPIO07 output
14
15 adc = Adafruit_ADS1x15.ADS1015() #create an ADS1015 ADC (12-bit) instance.
16 # Choose a gain of 1 for reading voltages from 0 to 4.09V.
17 # Or pick a different gain to change the range of voltages that are read:
18 # - 2/3 = +/-6.144V
19 # - 1 = +/-4.096V
20 # - 2 = +/-2.048V
21 # - 4 = +/-1.024V
22 # - 8 = +/-0.512V
23 # - 16 = +/-0.256V
24 # See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.
25 GAIN = 1
26
27 while True:
28 reading = adc.read_adc(0, gain=GAIN) #read A0, 12 bit signed integer, -2048 to 2047 (0=GND, 2047=4.096*gain)
29 voltReading = reading * 4.096 / 2047.0 #convert adc to voltage
30 thermoR = SERIESRESISTOR / ((4.0/voltReading) - 1)#convert voltage to thermoster resistance
31 #7002 thermistor
32 #temp =
33
34 print ("reading: " + reading)
35 print ("thermistor resistance: " + thermoR)
36 #print ("temp: " + temp)
| 5 - error: syntax-error
|
1 import time
2 import RPi.GPIO as GPIO
3 import Adafruit_ADS1x15
4
5 GPIO.setmode(GPIO.BOARD) #pin numbering scheme uses board header pins
6 GPIO.setup(8,GPIO.OUT) #pin 8, GPIO15
7
8 adc = Adafruit_ADS1x15.ADS1015() #create an ADS1015 ADC (12-bit) instance.
9
10 # Choose a gain of 1 for reading voltages from 0 to 4.09V.
11 # Or pick a different gain to change the range of voltages that are read:
12 # - 2/3 = +/-6.144V
13 # - 1 = +/-4.096V
14 # - 2 = +/-2.048V
15 # - 4 = +/-1.024V
16 # - 8 = +/-0.512V
17 # - 16 = +/-0.256V
18 # See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.
19 GAIN = 1
20
21 while True:
22 '''test adc'''
23 reading = adc.read_adc(0, gain=GAIN) #read A0, 12 bit signed integer, -2048 to 2047 (0=GND, 2047=4.096*gain)
24 #reading = adc.read_adc(0) #gain defaults to 1
25 print(reading)
26 if reading < 1000:
27 GPIO.output(8,0)
28 else:
29 GPIO.output(8.1)
30 time.sleep(0.5)
| 22 - warning: bad-indentation
23 - warning: bad-indentation
25 - warning: bad-indentation
26 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
2 - refactor: consider-using-from-import
22 - warning: pointless-string-statement
|
1 import time
2 import RPi.GPIO as GPIO
3
4 GPIO.setmode(GPIO.BOARD) #pin numbering scheme uses board header pins
5 GPIO.setup(8,GPIO.OUT) #pin 8, GPIO15
6
7 while True:
8 '''test pin8 GPIO15'''
9 GPIO.output(8,1) #output high to pin 8
10 time.sleep(0.5) #delay 0.5 sec
11 GPIO.output(8,0) #output low to pin 8
12 time.sleep(0.5)
13
| 8 - warning: bad-indentation
9 - warning: bad-indentation
10 - warning: bad-indentation
11 - warning: bad-indentation
12 - warning: bad-indentation
2 - refactor: consider-using-from-import
8 - warning: pointless-string-statement
|
1 import os
2 import csv
3 import cv2
4 import numpy as np
5 import sklearn
6 from sklearn.model_selection import train_test_split
7 from sklearn.utils import shuffle
8
9 data_path = 'record'
10
11
12 samples = []
13 with open( data_path + '/driving_log.csv') as csvfile:
14 reader = csv.reader(csvfile)
15 for line in reader:
16 samples.append(line)
17
18
19 train_samples, validation_samples = train_test_split(samples, test_size=0.2)
20
21
22 def generator(samples, batch_size=32):
23 num_samples = len(samples)
24 while 1: # Loop forever so the generator never terminates
25 shuffle(samples)
26 for offset in range(0, num_samples, batch_size):
27 batch_samples = samples[offset:offset+batch_size]
28
29 images = []
30 angles = []
31 for batch_sample in batch_samples:
32 def get_image_path(row):
33 return data_path + '/IMG/'+batch_sample[row].split('/')[-1]
34
35 def read_image(path):
36 img = cv2.imread(path)
37 return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
38
39 center_image_path = get_image_path(0)
40 left_image_path = get_image_path(1)
41 right_image_path = get_image_path(2)
42
43 center_image = read_image(center_image_path)
44 left_image = read_image(left_image_path)
45 right_image = read_image(right_image_path)
46
47 correction = 0.25 # this is a parameter to tune
48 center_angle = float(batch_sample[3])
49 left_angle = center_angle + correction
50 right_angle = center_angle - correction
51
52 fliped_center_image = cv2.flip(center_image, 1)
53 fliped_center_angle = center_angle*-1.0
54
55 images.extend((center_image, left_image, right_image, fliped_center_image))
56 angles.extend((center_angle, left_angle, right_angle, fliped_center_angle))
57
58 # trim image to only see section with road
59 X_train = np.array(images)
60 y_train = np.array(angles)
61 yield sklearn.utils.shuffle(X_train, y_train)
62
63 # compile and train the model using the generator function
64 train_generator = generator(train_samples, batch_size=32)
65 validation_generator = generator(validation_samples, batch_size=32)
66
67 ch, row, col = 3, 80, 320 # Trimmed image format
68
69 from keras.models import Sequential, Model
70 from keras.layers import Cropping2D, Lambda, Convolution2D, Flatten, Dense, Dropout
71 import tensorflow as tf
72 import cv2
73
74 def resize_image(x):
75 from keras.backend import tf as ktf
76 return ktf.image.resize_images(x, (66, 200))
77
78 model = Sequential()
79 model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160,320,3)))
80 model.add(Cropping2D(cropping=((70,25),(0,0))))
81 model.add(Lambda(resize_image))
82 model.add(Convolution2D(24,5,5, subsample=(2,2), activation ="relu"))
83 model.add(Convolution2D(36,5,5, subsample=(2,2), activation ="relu"))
84 model.add(Convolution2D(48,5,5, subsample=(2,2), activation ="relu"))
85 model.add(Convolution2D(64,3,3, activation ="relu"))
86 model.add(Convolution2D(64,3,3, activation ="relu"))
87 model.add(Flatten())
88 model.add(Dense(100))
89 model.add(Dense(50))
90 model.add(Dropout(0.7))
91 model.add(Dense(10))
92 model.add(Dropout(0.7))
93 model.add(Dense(1))
94
95 model.compile(loss='mse', optimizer='adam')
96 model.summary()
97 history_object = model.fit_generator(train_generator, samples_per_epoch=
98 len(train_samples), validation_data=validation_generator,
99 nb_val_samples=len(validation_samples), nb_epoch=40)
100
101
102 model.save("model.h5") | 13 - warning: unspecified-encoding
22 - refactor: too-many-locals
22 - warning: redefined-outer-name
32 - warning: redefined-outer-name
33 - warning: cell-var-from-loop
72 - warning: reimported
1 - warning: unused-import
69 - warning: unused-import
71 - warning: unused-import
|
1 # Simple script that generates a random
2 # combination of words from separate strings
3
4 colours = ['red','blue','green'.'yellow']
5 shapes = ['circle','square','triangle','star']
6
7 import random
8 x = random.randint(0, 2)
9 y = random.randint(0, 2)
10
11 combination = colours[x] + (" ") + shapes[y]
12
13 print(combination)
| 4 - error: syntax-error
|
1 import requests
2 response=requests.get("https://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=text")
3 print(response.content)
| 2 - warning: missing-timeout
|
1 from django.urls import path, include
2
3 from store import views
4
5
6 urlpatterns = [
7 path('',views.store,name='store'),
8 path('cart',views.cart, name='cart'),
9 path('checkout',views.checkout, name='checkout')
10 ]
| 1 - warning: unused-import
|
1 """
2 * ____ _
3 * | _ \ ___ __| |_ __ _ _ _ __ ___
4 * | |_) / _ \ / _` | '__| | | | '_ ` _ \
5 * | __/ (_) | (_| | | | |_| | | | | | |
6 * |_| \___/ \__,_|_| \__,_|_| |_| |_|
7 *
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published by
10 * the Free Software Foundation, either version 3 of the License, or
11 * (at your option) any later version.
12 """
13
14 from podrum.network.protocol.ClientToServerHandshakePacket import ClientToServerHandshakePacket
15 from podrum.network.protocol.DataPacket import DataPacket
16 from podrum.network.protocol.DisconnectPacket import DisconnectPacket
17 from podrum.network.protocol.LoginPacket import LoginPacket
18 from podrum.network.protocol.PlayStatusPacket import PlayStatusPacket
19 from podrum.network.protocol.ResourcePacksInfoPacket import ResourcePacksInfoPacket
20 from podrum.network.protocol.ServerToClientHandshakePacket import ServerToClientHandshakePacket
21
22 class PacketPool:
23 packetPool = {}
24
25 def __init__(self):
26 self.registerPackets()
27
28 def registerPacket(packet):
29 self.pool[packet.NID] = packet.copy()
30
31 def registerPackets(self):
32 self.registerPacket(ClientToServerHandshakePacket)
33 self.registerPacket(DisconnectPacket)
34 self.registerPacket(LoginPacket)
35 self.registerPacket(PlayStatusPacket)
36 self.registerPacket(ResourcePacksInfoPacket)
37 self.registerPacket(ServerToClientHandshakePacket)
38
| 3 - warning: anomalous-backslash-in-string
4 - warning: anomalous-backslash-in-string
6 - warning: anomalous-backslash-in-string
6 - warning: anomalous-backslash-in-string
6 - warning: anomalous-backslash-in-string
28 - error: no-self-argument
29 - error: undefined-variable
29 - error: no-member
29 - error: no-member
32 - error: too-many-function-args
33 - error: too-many-function-args
34 - error: too-many-function-args
35 - error: too-many-function-args
36 - error: too-many-function-args
37 - error: too-many-function-args
15 - warning: unused-import
|
1 """
2 * ____ _
3 * | _ \ ___ __| |_ __ _ _ _ __ ___
4 * | |_) / _ \ / _` | '__| | | | '_ ` _ \
5 * | __/ (_) | (_| | | | |_| | | | | | |
6 * |_| \___/ \__,_|_| \__,_|_| |_| |_|
7 *
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published by
10 * the Free Software Foundation, either version 3 of the License, or
11 * (at your option) any later version.
12 """
13
14 import time
15 import os
16
17 from podrum.lang.Base import Base
18 from podrum.utils.Logger import Logger
19 from podrum.utils.ServerFS import ServerFS
20 from podrum.utils.Utils import Utils
21 from podrum.wizard.Wizard import Wizard
22
23 from pyraklib.server.PyRakLibServer import PyRakLibServer
24 from pyraklib.server.ServerHandler import ServerHandler
25
26
27 class Server:
28
29 path = None
30 withWizard = None
31 port = 19132
32 podrumLogo = """
33 ____ _
34 | _ \ ___ __| |_ __ _ _ _ __ ___
35 | |_) / _ \ / _` | '__| | | | '_ ` _ \
36 | __/ (_) | (_| | | | |_| | | | | | |
37 |_| \___/ \__,_|_| \__,_|_| |_| |_|
38 """
39
40 def __init__(self, path, withWizard, isTravisBuild = False):
41 super().__init__()
42 startTime = Utils.microtime(True)
43 self.path = path
44 self.withWizard = withWizard
45 if(withWizard):
46 ServerFS.checkAllFiles(path)
47 else:
48 Wizard.skipWizard(path, True)
49 port = self.port
50 print(str(self.podrumLogo))
51 Wizard.isInWizard = False
52 Logger.log('info', str(Base.get("startingServer")).replace("{ip}", str(Utils.getPrivateIpAddress())).replace("{port}", str(port)))
53 Logger.log('info', str(Base.get("extIpMsg")).replace("{ipPublic}", str(Utils.getPublicIpAddress())))
54 Logger.log('info', str(Base.get("license")))
55 server = PyRakLibServer(port=19132)
56 handler = ServerHandler(server, None)
57 handler.sendOption("name", "MCPE;Podrum powered server;407;1.16.0;0;0;0;PodrumPoweredServer;0")
58 doneTime = Utils.microtime(True)
59 finishStartupSeconds = "%.3f" % (doneTime - startTime)
60 Logger.log('info', f'Done in {str(finishStartupSeconds)}s. Type "help" to view all available commands.')
61 if (isTravisBuild):
62 Server.checkTravisBuild(path)
63 else:
64 while Wizard.isInWizard == False:
65 cmd = input('> ')
66 Server.command(cmd, True)
67 cmd = None
68 ticking = True
69 while ticking:
70 time.sleep(0.002)
71
72 def command(string, fromConsole):
73 if string.lower() == 'stop':
74 Logger.log('info', 'Stopping server...')
75 Utils.killServer()
76 elif string.lower() == '':
77 return
78 elif string.lower() == 'help':
79 Logger.log('info', '/stop: Stops the server')
80 else:
81 Logger.log('error', str(Base.get("invalidCommand")))
82
83 def checkTravisBuild(path):
84 if not ServerFS.checkForFile(path, "server.json"):
85 Logger.log("error", "Couldn't find server.json file.")
86 os._exit(1)
87 if os.path.getsize(f'{path}/server.json') == 0:
88 Logger.log("error", "The server.json file is empty.")
89 os._exit(1)
90 print("Build success.")
91 os._exit(0)
| 3 - warning: anomalous-backslash-in-string
4 - warning: anomalous-backslash-in-string
6 - warning: anomalous-backslash-in-string
6 - warning: anomalous-backslash-in-string
6 - warning: anomalous-backslash-in-string
34 - warning: anomalous-backslash-in-string
35 - warning: anomalous-backslash-in-string
35 - warning: anomalous-backslash-in-string
37 - warning: anomalous-backslash-in-string
37 - warning: anomalous-backslash-in-string
37 - warning: anomalous-backslash-in-string
72 - error: no-self-argument
73 - error: no-member
76 - error: no-member
78 - error: no-member
72 - warning: unused-argument
83 - error: no-self-argument
|
1 """
2 * ____ _
3 * | _ \ ___ __| |_ __ _ _ _ __ ___
4 * | |_) / _ \ / _` | '__| | | | '_ ` _ \
5 * | __/ (_) | (_| | | | |_| | | | | | |
6 * |_| \___/ \__,_|_| \__,_|_| |_| |_|
7 *
8 * This program is free software: you can redistribute it and/or modify
9 * it under the terms of the GNU Lesser General Public License as published by
10 * the Free Software Foundation, either version 3 of the License, or
11 * (at your option) any later version.
12 """
13
14 class ResourcePack:
15 def getPath(): pass
16
17 def getPackName(): pass
18
19 def getPackId(): pass
20
21 def getPackSize(): pass
22
23 def getPackVersion(): pass
24
25 def getSha256(): pass
26
27 def getPackChunk(start, length): pass
| 3 - warning: anomalous-backslash-in-string
4 - warning: anomalous-backslash-in-string
6 - warning: anomalous-backslash-in-string
6 - warning: anomalous-backslash-in-string
6 - warning: anomalous-backslash-in-string
15 - error: no-method-argument
17 - error: no-method-argument
19 - error: no-method-argument
21 - error: no-method-argument
23 - error: no-method-argument
25 - error: no-method-argument
27 - error: no-self-argument
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.