code
stringlengths 20
13.2k
| label
stringlengths 21
6.26k
|
---|---|
1 #Write a python program to count the numbers of alphabets, digits and spaces in a file.
2
3 f = open("demo.txt", "r")
4 alphabets = 0
5 digits = 0
6 spaces = 0
7 others = 0
8 lines = f.readlines()
9 for line in lines:
10 for c in line:
11 if c.isalpha():
12 alphabets += 1
13 elif c.isdigit():
14 digits += 1
15 elif c.isspace():
16 spaces += 1
17 else:
18 others += 1
19
20
21 print("Number of alphabets", alphabets)
22 print("Number of digits", digits)
23 print("Number of spaces", spaces)
24 print("Others", others) | 3 - warning: unspecified-encoding
3 - refactor: consider-using-with
|
1 def is_leap(y):
2 return y % 4 == 0
3
4 y = int(input("Enter a year\n"))
5 if is_leap(y):
6 print("Leap year")
7 else:
8 print("Not a Leap Year") | 1 - warning: redefined-outer-name
|
1 # To find a factorial of a number
2 # 5! = 5 * 4 * 3 * 2 * 1
3
4 # fact(5) = 5 * fact(4)
5 # fact(4) = 4 * fact(3)
6 # fact(3) = 3 * fact(2)
7 # fact(2) = 2 * fact(1)
8 # fact(1) = 1
9
10 # fact(5) = 5 * 4 * 3 * 2 * 1
11
12 def fact(n):
13 if n == 1:
14 return 1
15 return n * fact(n-1)
16
17 n = 5
18 result = fact(n)
19 print(result) | 12 - warning: redefined-outer-name
|
1 for i in range(5):
2 for j in range(5-i):
3 print("X", end=" ")
4
5 print()
6
7 for i in range(5):
8 print("X " * int(5-i)) | Clean Code: No Issues Detected
|
1 from django.shortcuts import render,redirect
2 from .models import Jogo
3 from django.views.decorators.csrf import csrf_protect
4
5 #método para chamar todos os objetos que estão na classe Jogo quando entrar na home page
6 def home_page(request):
7 jogo = Jogo.objects.all()
8 return render (request,'home.html',{'jogo':jogo})
9
10 #método para inserir os dados na tabela quando o botão ser clicado
11 def inserir(request):
12 placar = request.POST.get('nPlacar')
13
14 #método para buscar os valores do objeto anterior
15 try:
16 placarMin = int(Jogo.objects.earliest('placarMin').placarMin)
17 placarMax = int(Jogo.objects.latest('placarMax').placarMax)
18 quebraRecMin = int(Jogo.objects.latest('quebraRecMin').quebraRecMin)
19 quebraRecMax = int(Jogo.objects.latest('quebraRecMax').quebraRecMax)
20 except:
21 placarMin = False
22 placarMax = False
23 quebraRecMin = False
24 quebraRecMax = False
25
26 placar = int(placar)
27
28 #condição para adicionar o placar nos demais atributos alem dele mesmo
29 if placarMin is False:
30 placarMin = placar
31 placarMax = placar
32 elif placar < placarMin:
33 placarMin = placar
34 quebraRecMin += 1
35 elif placar > placarMax or placarMax is False:
36 placarMax = placar
37 quebraRecMax += 1
38 else:
39 quebraRecMin = quebraRecMin+ 0
40 quebraRecMmax = quebraRecMax+ 0
41
42 #método para criar o objeto já com os atributos populados
43 jogo = Jogo.objects.create(placarMin=placarMin,placar=placar,placarMax=placarMax,quebraRecMin=quebraRecMin,quebraRecMax=quebraRecMax)
44 return redirect('/') #função para ficar home page após inserir o dado e clica no botão inserir | 2 - error: relative-beyond-top-level
20 - warning: bare-except
40 - warning: unused-variable
43 - warning: unused-variable
3 - warning: unused-import
|
1 # Generated by Django 3.1 on 2020-10-01 01:54
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('core', '0001_initial'),
10 ]
11
12 operations = [
13 migrations.RemoveField(
14 model_name='jogo',
15 name='id',
16 ),
17 migrations.AlterField(
18 model_name='jogo',
19 name='idJogo',
20 field=models.AutoField(primary_key=True, serialize=False),
21 ),
22 migrations.AlterField(
23 model_name='jogo',
24 name='placar',
25 field=models.IntegerField(),
26 ),
27 ]
| 6 - refactor: too-few-public-methods
|
1 from django.db import models
2
3 #criação da classe com os atributos
4 class Jogo(models.Model):
5 idJogo = models.AutoField(primary_key=True)
6 placar = models.IntegerField()
7 placarMin = models.IntegerField()
8 placarMax = models.IntegerField()
9 quebraRecMin = models.IntegerField()
10 quebraRecMax = models.IntegerField()
11
12 def __str__(self):
13 return str(self.idJogo)
14
| 4 - refactor: too-few-public-methods
|
1 # Generated by Django 3.1.1 on 2020-09-28 18:50
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 initial = True
9
10 dependencies = [
11 ]
12
13 operations = [
14 migrations.CreateModel(
15 name='Jogo',
16 fields=[
17 ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
18 ('idJogo', models.IntegerField()),
19 ('placar', models.IntegerField(max_length=3)),
20 ('placarMin', models.IntegerField()),
21 ('placarMax', models.IntegerField()),
22 ('quebraRecMin', models.IntegerField()),
23 ('quebraRecMax', models.IntegerField()),
24 ],
25 ),
26 ]
| 6 - refactor: too-few-public-methods
|
1 import os
2 import sys
3 import logging
4 from time import time
5
6
7 class AppFilter(logging.Filter):
8 def filter(self, record):
9 record.function_id = os.environ.get("function_id", 'no_function_id')
10 record.request_id = os.environ.get("request_id", 'no_request_id')
11 return True
12
13
14 logger = logging.getLogger('pratai')
15 logger.setLevel(logging.DEBUG)
16
17 # Docker can log stdout and stderr
18 handler = logging.StreamHandler(sys.stdout)
19 handler.setLevel(logging.DEBUG)
20
21 formatter = logging.Formatter('%(asctime)s - %(function_id)s - %(request_id)s - %(levelname)s - %(message)s')
22 logger.addFilter(AppFilter())
23
24 handler.setFormatter(formatter)
25 logger.addHandler(handler)
26
27
28 def load_function_from_filesystem(path='/etc/pratai/'):
29 sys.path.append(path)
30 from new_module import main
31 return main
32
33
34 def load_payload():
35 payload = os.environ.get("pratai_payload", None)
36 return payload
37
38
39 def execute_function():
40 f = load_function_from_filesystem()
41
42 payload = load_payload()
43
44 start = time()
45
46 logger.debug("function started with payload {0}".format(str(payload)))
47
48 result = None
49 try:
50 result = f(payload)
51 status = 'succeeded'
52 except Exception as err:
53 status = 'failed'
54 logger.error(err.message, exc_info=True)
55
56 finish = time()
57
58 logger.debug("function {0}, it took {1} seconds with response {2}"
59 .format(status, str(finish-start), str(result)))
60
61 return result
62
63
64 if __name__ == '__main__':
65 r = execute_function()
66
67
| 7 - refactor: too-few-public-methods
46 - warning: logging-format-interpolation
52 - warning: broad-exception-caught
54 - error: no-member
58 - warning: logging-format-interpolation
|
1 #Problem Given
2 #Convert a string containing only lower case letter to a string with upper case
3 #It is expected to solve the problem within O(sizeof(str)
4 #Auxilary Space O(1)
5
6 #function to convert string into upper
7 def to_upper(str):
8 #temp will store the intger value of 1st letter of the string
9 temp = 0
10 #loop will run till the end of string
11 for i in range(len(str)):
12 #ord converts the char into its equivalent integer value
13 #ord(str[0]) - 32, so we will get ASCII value of upper case
14 temp = ord(str[0])-32
15 #storing string in the same string but removing the first element
16 str = str[1::]
17 #chr converts integer into its equivalent char value
18 #adding or concatenating the str and temp together then storing it in str
19 str = str+chr(temp)
20
21 #return str
22 return str
23
24 if __name__ == "__main__":
25 n = input()
26 print(to_upper(n)) | 9 - warning: bad-indentation
11 - warning: bad-indentation
14 - warning: bad-indentation
16 - warning: bad-indentation
19 - warning: bad-indentation
22 - warning: bad-indentation
7 - warning: redefined-builtin
11 - warning: unused-variable
|
1 import pandas as pd
2 protein_data=pd.read_csv('../data/protein_classification.csv')
3 X=protein_data['Sequence']
4 def split(word):
5 return [char for char in word]
6
7 sequences = [split(x) for x in X]
8 protein_data=pd.read_csv('../data/protein_classification.csv')
9 X=protein_data['Sequence']
10
11 import string
12 # import sgtdev as sgt
13 from sgt import Sgt
14 # Spark
15 from pyspark import SparkContext
16 sc = SparkContext("local", "app")
17 rdd = sc.parallelize(sequences)
18 sgt_sc = Sgt(kappa = 1, lengthsensitive = False, mode="spark", alphabets=list(string.ascii_uppercase))
19 rdd_embedding = sgt_sc.fit_transform(corpus=rdd)
20
21 sc.stop()
22 # Multi-processing
23 sgt_mp = Sgt(kappa = 1, lengthsensitive = False, mode="multiprocessing", processors=3)
24 mp_embedding = sgt_mp.fit_transform(corpus=sequences)
25 mp_embedding = sgt_mp.transform(corpus=sequences)
26 # Default
27 sgt = Sgt(kappa = 1, lengthsensitive = False)
28 embedding = sgt.fit_transform(corpus=sequences)
29
30 # Spark again
31 corpus = [["B","B","A","C","A","C","A","A","B","A"], ["C", "Z", "Z", "Z", "D"]]
32
33 sc = SparkContext("local", "app")
34
35 rdd = sc.parallelize(corpus)
36
37 sgt_sc = Sgt(kappa = 1,
38 lengthsensitive = False,
39 mode="spark",
40 alphabets=["A", "B", "C", "D", "Z"],
41 lazy=False)
42 s = sgt_sc.fit_transform(corpus=rdd)
43 print(s)
44 sc.stop() | 5 - warning: bad-indentation
5 - refactor: unnecessary-comprehension
|
1 # Generated by Django 3.0.2 on 2020-03-28 23:19
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('ohjelma', '0003_song_release_year'),
10 ]
11
12 operations = [
13 migrations.CreateModel(
14 name='Track',
15 fields=[
16 ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
17 ('track_name', models.CharField(max_length=500)),
18 ('track_artist', models.CharField(max_length=500)),
19 ('track_duration', models.IntegerField(default=200000)),
20 ('track_popularity', models.IntegerField(default=100)),
21 ],
22 ),
23 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.2 on 2020-03-29 10:13
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('ohjelma', '0004_track'),
10 ]
11
12 operations = [
13 migrations.AlterField(
14 model_name='track',
15 name='track_duration',
16 field=models.CharField(max_length=5),
17 ),
18 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.2 on 2020-03-13 17:36
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('ohjelma', '0001_initial'),
10 ]
11
12 operations = [
13 migrations.CreateModel(
14 name='Song',
15 fields=[
16 ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
17 ('song_name', models.CharField(max_length=200)),
18 ('song_artist', models.CharField(max_length=200)),
19 ],
20 ),
21 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.2 on 2020-03-15 16:01
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('ohjelma', '0002_song'),
10 ]
11
12 operations = [
13 migrations.AddField(
14 model_name='song',
15 name='release_year',
16 field=models.IntegerField(default=2000),
17 ),
18 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.2 on 2020-04-11 18:42
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('ohjelma', '0006_auto_20200329_1329'),
10 ]
11
12 operations = [
13 migrations.AddField(
14 model_name='track',
15 name='track_id',
16 field=models.CharField(default=0, max_length=30),
17 preserve_default=False,
18 ),
19 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.2 on 2020-03-29 10:29
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('ohjelma', '0005_auto_20200329_1313'),
10 ]
11
12 operations = [
13 migrations.AlterField(
14 model_name='track',
15 name='track_duration',
16 field=models.CharField(max_length=10),
17 ),
18 ]
| 6 - refactor: too-few-public-methods
|
1 from django.apps import AppConfig
2
3
4 class OhjelmaConfig(AppConfig):
5 name = 'ohjelma'
| 4 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.2 on 2020-04-11 19:11
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('ohjelma', '0008_track_track_danceability'),
10 ]
11
12 operations = [
13 migrations.AddField(
14 model_name='track',
15 name='track_acousticness',
16 field=models.FloatField(default=0, max_length=10),
17 preserve_default=False,
18 ),
19 migrations.AddField(
20 model_name='track',
21 name='track_energy',
22 field=models.FloatField(default=0, max_length=10),
23 preserve_default=False,
24 ),
25 migrations.AddField(
26 model_name='track',
27 name='track_instrumentalness',
28 field=models.FloatField(default=0, max_length=10),
29 preserve_default=False,
30 ),
31 migrations.AddField(
32 model_name='track',
33 name='track_key',
34 field=models.IntegerField(default=0, max_length=3),
35 preserve_default=False,
36 ),
37 migrations.AddField(
38 model_name='track',
39 name='track_liveness',
40 field=models.FloatField(default=0, max_length=10),
41 preserve_default=False,
42 ),
43 migrations.AddField(
44 model_name='track',
45 name='track_loudness',
46 field=models.FloatField(default=0, max_length=10),
47 preserve_default=False,
48 ),
49 migrations.AddField(
50 model_name='track',
51 name='track_speechiness',
52 field=models.FloatField(default=0, max_length=10),
53 preserve_default=False,
54 ),
55 migrations.AddField(
56 model_name='track',
57 name='track_tempo',
58 field=models.FloatField(default=0, max_length=10),
59 preserve_default=False,
60 ),
61 migrations.AddField(
62 model_name='track',
63 name='track_valence',
64 field=models.FloatField(default=0, max_length=10),
65 preserve_default=False,
66 ),
67 ]
| 6 - refactor: too-few-public-methods
|
1 from django.urls import path
2 from . import views
3
4 urlpatterns = [
5
6 path('', views.index, name = 'home'),
7 path('songs/', views.SongList.as_view(), name = 'song_list'),
8 path('view/<int:pk>', views.SongView.as_view(), name = 'song_view'),
9 path('new', views.SongCreate.as_view(), name = 'song_new'),
10 path('view/<int:pk>', views.SongView.as_view(), name = 'song_view'),
11 path('edit/<int:pk>', views.SongUpdate.as_view(), name = 'song_edit'),
12 path('delete/<int:pk>', views.SongDelete.as_view(), name = 'song_delete'),
13 path('tracks/', views.TrackView, name = 'track_list'),
14 path('yearanalysis/', views.YearAnalysis, name = 'year_analysis'),
15 path('analysis/<int:pk>', views.Analysis.as_view(), name = 'track_detail'),
16
17 #url(r'^tracks/(?P<tracksyear>\w+)/$', views.TrackView, name = "TrackView")
18 path('tracks/<int:tracksyear>', views.TrackView, name = "TrackView")
19 ]
| 2 - error: no-name-in-module
|
1 from django.db import models
2 from django.urls import reverse
3
4 class Question(models.Model):
5 question_text = models.CharField(max_length=200)
6 pub_date = models.DateTimeField('Date published')
7
8
9 class Choice(models.Model):
10 question = models.ForeignKey(Question, on_delete=models.CASCADE)
11 choice_text = models.CharField(max_length=200)
12 votes = models.IntegerField(default=0)
13
14 class Song(models.Model):
15 song_name = models.CharField(max_length=200)
16 song_artist = models.CharField(max_length = 200)
17 release_year = models.IntegerField(default=2000)
18
19 def __str__(self):
20 return self.song_name
21
22 def get_absolute_url(self):
23 return reverse('song_edit', kwargs={'pk': self.pk})
24
25 class Track(models.Model):
26 track_id = models.CharField(max_length=30)
27 track_name = models.CharField(max_length=500)
28 track_artist = models.CharField(max_length = 500)
29 track_duration = models.CharField(max_length = 10)
30 track_popularity = models.IntegerField(default=100)
31
32 track_danceability = models.FloatField(max_length=10)
33 track_energy = models.FloatField(max_length=10)
34 track_key = models.IntegerField(max_length=3)
35 track_loudness = models.FloatField(max_length=10)
36 track_speechiness = models.FloatField(max_length=10)
37 track_acousticness = models.FloatField(max_length=10)
38 track_instrumentalness = models.FloatField(max_length=10)
39 track_liveness = models.FloatField(max_length=10)
40 track_valence = models.FloatField(max_length=10)
41 track_tempo = models.FloatField(max_length=10)
42
43 def __str__(self):
44 return self.track_name
45
46
| 4 - refactor: too-few-public-methods
9 - refactor: too-few-public-methods
25 - refactor: too-few-public-methods
|
1 import logging
2 from logging.handlers import RotatingFileHandler
3
4 from flask import Flask
5 from flask import url_for as _url_for
6 from flask.ext.sqlalchemy import SQLAlchemy
7 from flask.ext.oauth import OAuth
8 from flask.ext.assets import Environment
9
10 import certifi
11 from kombu import Exchange, Queue
12 from celery import Celery
13
14 from nomenklatura import default_settings
15
16 logging.basicConfig(level=logging.DEBUG)
17
18 app = Flask(__name__)
19 app.config.from_object(default_settings)
20 app.config.from_envvar('NOMENKLATURA_SETTINGS', silent=True)
21 app_name = app.config.get('APP_NAME')
22
23 file_handler = RotatingFileHandler('/var/log/nomenklatura/errors.log',
24 maxBytes=1024 * 1024 * 100,
25 backupCount=20)
26 file_handler.setLevel(logging.DEBUG)
27 formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
28 file_handler.setFormatter(formatter)
29 app.logger.addHandler(file_handler)
30
31 if app.debug is not True:
32 from raven.contrib.flask import Sentry
33 sentry = Sentry(app, dsn=app.config.get('SENTRY_DSN'))
34
35 db = SQLAlchemy(app)
36 assets = Environment(app)
37
38 celery = Celery('nomenklatura', broker=app.config['CELERY_BROKER_URL'])
39
40 queue_name = app_name + '_q'
41 app.config['CELERY_DEFAULT_QUEUE'] = queue_name
42 app.config['CELERY_QUEUES'] = (
43 Queue(queue_name, Exchange(queue_name), routing_key=queue_name),
44 )
45
46 celery = Celery(app_name, broker=app.config['CELERY_BROKER_URL'])
47 celery.config_from_object(app.config)
48
49 oauth = OAuth()
50 github = oauth.remote_app('github',
51 base_url='https://github.com/login/oauth/',
52 authorize_url='https://github.com/login/oauth/authorize',
53 request_token_url=None,
54 access_token_url='https://github.com/login/oauth/access_token',
55 consumer_key=app.config.get('GITHUB_CLIENT_ID'),
56 consumer_secret=app.config.get('GITHUB_CLIENT_SECRET'))
57
58 github._client.ca_certs = certifi.where()
59
60
61 def url_for(*a, **kw):
62 try:
63 kw['_external'] = True
64 return _url_for(*a, **kw)
65 except RuntimeError:
66 return None
| 58 - warning: protected-access
|
1 from normality import normalize
2 from flask.ext.script import Manager
3 from flask.ext.assets import ManageAssets
4
5 from nomenklatura.core import db
6 from nomenklatura.model import Entity
7 from nomenklatura.views import app
8 from nomenklatura.assets import assets
9
10 manager = Manager(app)
11 manager.add_command('assets', ManageAssets(assets))
12
13
14 @manager.command
15 def createdb():
16 """ Make the database. """
17 db.engine.execute("CREATE EXTENSION IF NOT EXISTS hstore;")
18 db.engine.execute("CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;")
19 db.create_all()
20
21
22 @manager.command
23 def flush(dataset):
24 ds = Dataset.by_name(dataset)
25 for alias in Alias.all_unmatched(ds):
26 db.session.delete(alias)
27 db.session.commit()
28
29
30 if __name__ == '__main__':
31 manager.run()
| 24 - error: undefined-variable
25 - error: undefined-variable
1 - warning: unused-import
6 - warning: unused-import
|
1 import os
2
3 def bool_env(val):
4 """Replaces string based environment values with Python booleans"""
5 return True if os.environ.get(val, 'False').lower() == 'true' else False
6
7 #DEBUG = True
8 SECRET_KEY = os.environ.get('SECRET_KEY')
9 SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL',
10 os.environ.get('SHARED_DATABASE_URL'))
11
12 APP_NAME = os.environ.get('APP_NAME', 'nomenklatura')
13
14 GITHUB_CLIENT_ID = os.environ.get('GITHUB_CLIENT_ID')
15 GITHUB_CLIENT_SECRET = os.environ.get('GITHUB_CLIENT_SECRET')
16
17 MEMCACHE_HOST = os.environ.get('MEMCACHIER_SERVERS')
18
19 S3_BUCKET = os.environ.get('S3_BUCKET', 'nomenklatura')
20 S3_ACCESS_KEY = os.environ.get('S3_ACCESS_KEY')
21 S3_SECRET_KEY = os.environ.get('S3_SECRET_KEY')
22
23 CELERY_BROKER = os.environ.get('CLOUDAMQP_URL')
24
25 SIGNUP_DISABLED = bool_env('SIGNUP_DISABLED')
| 5 - refactor: simplifiable-if-expression
|
1
2 from nomenklatura.model.dataset import Dataset
3 from nomenklatura.model.entity import Entity
4 from nomenklatura.model.account import Account
5 from nomenklatura.model.upload import Upload
| 2 - warning: unused-import
3 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
|
1 from setuptools import setup, find_packages
2
3 setup(
4 name='nomenklatura',
5 version='0.1',
6 description="Make record linkages on the web.",
7 long_description='',
8 classifiers=[
9 ],
10 keywords='data mapping identity linkage record',
11 author='Open Knowledge Foundation',
12 author_email='info@okfn.org',
13 url='http://okfn.org',
14 license='MIT',
15 packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
16 namespace_packages=[],
17 include_package_data=False,
18 zip_safe=False,
19 install_requires=[
20 ],
21 tests_require=[],
22 entry_points=\
23 """ """,
24 )
| Clean Code: No Issues Detected
|
1 DEBUG = False
2 APP_NAME = 'nomenklatura'
3
4 CELERY_BROKER_URL = 'amqp://guest:guest@localhost:5672//'
5
6 ALLOWED_EXTENSIONS = set(['csv', 'tsv', 'ods', 'xls', 'xlsx', 'txt'])
7
8 SIGNUP_DISABLED = False
| Clean Code: No Issues Detected
|
1 from flask.ext.assets import Bundle
2
3 from nomenklatura.core import assets
4
5 deps_assets = Bundle(
6 'vendor/jquery/dist/jquery.js',
7 'vendor/bootstrap/js/collapse.js',
8 'vendor/angular/angular.js',
9 'vendor/angular-route/angular-route.js',
10 'vendor/angular-bootstrap/ui-bootstrap-tpls.js',
11 'vendor/ngUpload/ng-upload.js',
12 filters='uglifyjs',
13 output='assets/deps.js'
14 )
15
16 app_assets = Bundle(
17 'js/app.js',
18 'js/services/session.js',
19 'js/directives/pagination.js',
20 'js/directives/keybinding.js',
21 'js/directives/authz.js',
22 'js/controllers/app.js',
23 'js/controllers/import.js',
24 'js/controllers/home.js',
25 'js/controllers/docs.js',
26 'js/controllers/review.js',
27 'js/controllers/datasets.js',
28 'js/controllers/entities.js',
29 'js/controllers/profile.js',
30 filters='uglifyjs',
31 output='assets/app.js'
32 )
33
34 css_assets = Bundle(
35 'vendor/bootstrap/less/bootstrap.less',
36 'vendor/font-awesome/less/font-awesome.less',
37 'style/style.less',
38 filters='less,cssrewrite',
39 output='assets/style.css'
40 )
41
42 assets.register('deps', deps_assets)
43 assets.register('app', app_assets)
44 assets.register('css', css_assets)
| Clean Code: No Issues Detected
|
1 import logging
2
3 import requests
4 from flask import url_for, session, Blueprint, redirect
5 from flask import request
6 from apikit import jsonify
7 from werkzeug.exceptions import Forbidden
8
9 from nomenklatura import authz
10 from nomenklatura.core import app, db, github
11 from nomenklatura.model import Account, Dataset
12
13 section = Blueprint('sessions', __name__)
14
15
16 @section.route('/sessions')
17 def status():
18 return jsonify({
19 'logged_in': authz.logged_in(),
20 'api_key': request.account.api_key if authz.logged_in() else None,
21 'account': request.account,
22 'base_url': url_for('index', _external=True)
23 })
24
25
26 @section.route('/sessions/authz')
27 def get_authz():
28 permissions = {}
29 dataset_name = request.args.get('dataset')
30 if dataset_name is not None:
31 dataset = Dataset.find(dataset_name)
32 permissions[dataset_name] = {
33 'view': True,
34 'edit': authz.dataset_edit(dataset),
35 'manage': authz.dataset_manage(dataset)
36 }
37 return jsonify(permissions)
38
39
40 @section.route('/sessions/login')
41 def login():
42 callback = url_for('sessions.authorized', _external=True)
43 return github.authorize(callback=callback)
44
45
46 @section.route('/sessions/logout')
47 def logout():
48 logging.info(authz.require(authz.logged_in()))
49 session.clear()
50 return redirect('/')
51
52
53 @section.route('/sessions/callback')
54 @github.authorized_handler
55 def authorized(resp):
56 if 'access_token' not in resp:
57 return redirect(url_for('index', _external=True))
58 access_token = resp['access_token']
59 session['access_token'] = access_token, ''
60 res = requests.get('https://api.github.com/user?access_token=%s' % access_token,
61 verify=False)
62 data = res.json()
63 for k, v in data.items():
64 session[k] = v
65 account = Account.by_github_id(data.get('id'))
66 if account is None:
67 if app.config.get('SIGNUP_DISABLED'):
68 raise Forbidden("Sorry, account creation is disabled")
69 account = Account.create(data)
70 db.session.commit()
71 return redirect('/')
| 60 - warning: missing-timeout
|
1 # shut up useless SA warning:
2 import warnings
3 warnings.filterwarnings('ignore', 'Unicode type received non-unicode bind param value.')
| Clean Code: No Issues Detected
|
1 # Where I am currently: function makeOutfitMyself allows user to select an outfit choice from each category, adds it to a list, and returns the complete outfit.
2 # function computerChooses() has not been designed yet
3 # I plan on later adding in color options or allowing the user to add their own options
4 import random
5 gChoices = []
6 DictionaryClothing = {'head options:': 'baseball cap wig sombrero beret fedora toupee'.split(),
7 'chest options': 'blouse dress shirt tanktop bikini t-shirt sweater chestplate corset'.split(),
8 'leg options:':
9 'leggings skinny-jeans khaki\'s shorts daisy-dukes skirt bike-shorts tutu'.split(),
10 'feet options:':
11 'running-shoes tap-dance-shoes clogs stilettos platform-shoes sandals flipflops cowboy-boots'.split(),
12 'accessory options:':
13 'belt purse necklace headband hoop-earrings sword bow mustache goatee glasses'.split()}
14 # def computerChooses():
15 # The computer selects a random clothing option for each clothing category
16 # for every keyValues in DictionaryClothing:
17 # randomIndex = (random.randint(1, len((keyValues)-1)
18 # Return key[randomIndex]
19
20 def makeOutfitMyself():
21 # The user selects a choice for each category
22 Choices = []
23 for item in DictionaryClothing:
24 print(item)
25 print(DictionaryClothing[item])
26 response = ''
27 while response not in DictionaryClothing[item] and response != 'CC':
28 print("please select one of the choices, or type ‘CC’ to have the computer do it for you")
29 response = input()
30 Choices.append(response)
31 return Choices
32 # If input() in values:
33 # Return input()
34 # Else:
35 # randomIndex = (random.randint(1, len((key values)-1)
36 # Return key[randomIndex]
37
38
39 print("""Everyday most people must choose an outfit to wear.This game, 'Dress My Day', is here to help you design outfits.
40 Type MC (my choice) to make one yourself, or CC (computer choice) to have the computer make it for you.
41 If you make it yourself, you will be asked a series of questions about clothing type and color.
42 Select one of the given options by typing it in.
43 At any point you can respond to a question by typing “CC” and the computer will make that specific choice.
44 At the end, you will be told your outfit.""")
45 response = input()
46
47 if response == 'MC':
48 gChoices = makeOutfitMyself()
49 # Else:
50 # Choices.append(ComputerChooses())
51 # print('The outfit is now done. The outfit is: ’)
52 # print(Choices)
53 print('Looks like your outfit is: ')
54 for item in gChoices:
55 print(item)
56 print('Hope you enjoyed')
| 23 - warning: redefined-outer-name
26 - warning: redefined-outer-name
4 - warning: unused-import
|
1 import numpy as np
2 import matplotlib.pyplot as plt
3 from matplotlib import colors
4
5 rpms = np.array([4000,3500,3000,2500,2000,1500,1000,500])
6 throttle = np.array([0,0,10,20,40,60,80,100,120])
7 efi_map = np.array([[17.2, 16.8, 15.5, 14.8, 13.8, 13.0, 12.2],
8 [17.0, 16.5, 15.0, 14.0, 13.4, 13.0, 12.4],
9 [16.8, 16.0, 14.6, 14.2, 13.6, 13.2, 12.6],
10 [16.6, 15.8, 14.8, 14.4, 13.8, 13.4, 12.8],
11 [16.4, 15.5, 15.0, 14.6, 14.0, 13.6, 13.0],
12 [16.2, 15.6, 15.2, 14.8, 14.2, 13.8, 13.2],
13 [16.0, 15.8, 15.5, 15.1, 14.6, 14.0, 13.5]])
14
15 def ShowEFIMap():
16 plt.figure(figsize = (6, 6))
17 ax = plt.subplot(111)
18 ax.set_ylabel("RPM")
19 ax.set_xlabel("Throttle")
20
21 plt.imshow(efi_map, cmap = "autumn")
22
23 ax.set_xticklabels(throttle)
24 ax.set_yticklabels(rpms)
25
26 for a in range(len(efi_map)):
27 for b in range(len(efi_map[a])):
28 ax.text(a,b,efi_map[b,a], ha = "center", va = "center", color = "b")
29
30 ax.set_title("EFI MAP")
31 plt.colorbar()
32
33 plt.show()
34
35 ShowEFIMap() | 3 - warning: unused-import
|
1 from Data import MNIST_Data
2 from sklearn.preprocessing import OneHotEncoder
3 from sklearn.model_selection import train_test_split
4 from sklearn.linear_model import LogisticRegression
5 import numpy as np
6 import csv
7
8 mnist_loader = MNIST_Data(base_dir = "/home/hrishi/1Hrishi/ECE542_Neural_Networks/Homeworks/2/Data/", img_size = 784)
9
10 X_train = mnist_loader._load_imgs("train-images-idx3-ubyte.gz")
11 y_train = mnist_loader._load_labels("train-labels-idx1-ubyte.gz")
12 X_test = mnist_loader._load_imgs("t10k-images-idx3-ubyte.gz")
13 y_test = mnist_loader._load_labels("t10k-labels-idx1-ubyte.gz")
14
15 # np.random.seed(1) # Reset random state
16 # np.random.shuffle(X_train)
17 # np.random.shuffle(y_train)
18
19 input = np.append(X_train, y_train[:,None], axis=1)
20 # print(input.shape)
21 np.random.shuffle(input)
22 X_train = input[:,0:784]
23 y_train = input[:,784]
24
25 # X_train, X_test, y_train, y_test = train_test_split(train_images, train_labels, test_size=0.33, shuffle = True, random_state=42)
26
27 # from sklearn.preprocessing import StandardScaler
28 # scaler = StandardScaler()
29 # X_train = scaler.fit_transform(X_train)
30
31 # from sklearn.decomposition import PCA
32 # pca = PCA(n_components = 256)
33 # X_train = pca.fit_transform(X_train)
34 # X_test = pca.fit_transform(X_test)
35
36 # l2-sag-ovr = 91.25% acc without standard scaling
37 # l2-sag-multinomial = 91.91% acc without standard scaling
38 # l1-saga-ovr = 91.37% acc without standard scaling
39 # l1-saga-multinomial = 92.29% acc without standard scaling
40
41 # logistic_regressor = LogisticRegression(penalty = 'l1', solver = 'saga', tol = 1e-1, multi_class = 'multinomial', verbose = 1, n_jobs = -1)
42 # logistic_regressor.fit(X_train, y_train)
43 #
44 # predictions = logistic_regressor.predict(X_test)
45 # from sklearn.metrics import accuracy_score
46 # print(accuracy_score(y_test, predictions))
47 #
48 # onehot_encoder = OneHotEncoder(n_values = 10, sparse = False, dtype = np.int8)
49 # predictions = onehot_encoder.fit_transform(y_train.reshape(-1,1))
50 # np.savetxt('lr.csv', predictions, delimiter = ',', fmt = '%i')
51
52 from sklearn.ensemble import RandomForestClassifier
53 random_forest_regressor = RandomForestClassifier(criterion = 'entropy', verbose = 1)
54 random_forest_regressor.fit(X_train, y_train)
55
56 predictions = random_forest_regressor.predict(X_test)
57 from sklearn.metrics import accuracy_score
58 print(accuracy_score(y_test, predictions))
59
60 onehot_encoder = OneHotEncoder(n_values = 10, sparse = False, dtype = np.int8)
61 predictions = onehot_encoder.fit_transform(y_train.reshape(-1,1))
62 np.savetxt('rf.csv', predictions, delimiter = ',', fmt = '%i')
| 19 - warning: redefined-builtin
10 - warning: protected-access
11 - warning: protected-access
12 - warning: protected-access
13 - warning: protected-access
3 - warning: unused-import
4 - warning: unused-import
6 - warning: unused-import
|
1 import numpy as np
2 import gzip
3 from sklearn.preprocessing import OneHotEncoder
4
5 class MNIST_Data(object):
6
7 def __init__(self, base_dir, img_size):
8 self.base_dir = base_dir
9 self.img_size = img_size
10
11 def _load_labels(self, file_name):
12 file_path = self.base_dir + file_name
13
14 with gzip.open(file_path, 'rb') as f:
15 labels = np.frombuffer(f.read(), np.uint8, offset=8)
16
17 return np.array(labels)
18
19 def _load_imgs(self, file_name):
20 file_path = self.base_dir + file_name
21
22 with gzip.open(file_path, 'rb') as f:
23 images = np.frombuffer(f.read(), np.uint8, offset=16)
24 images = images.reshape(-1, self.img_size)
25
26 return np.array(images)
27
28 if __name__ == '__main__':
29 mnist_loader = MNIST_Data(base_dir = "/home/hrishi/1Hrishi/ECE542_Neural_Networks/Homeworks/2/Data/", img_size = 784)
30 train_labels = mnist_loader._load_labels("train-labels-idx1-ubyte.gz")
31 onehot_encoder = OneHotEncoder(n_values = 10, sparse=False)
32 onehot_encoded = onehot_encoder.fit_transform(train_labels.reshape(-1,1))
33 print(train_labels)
34 print(onehot_encoded)
| 15 - warning: bad-indentation
23 - warning: bad-indentation
5 - refactor: useless-object-inheritance
5 - refactor: too-few-public-methods
30 - warning: protected-access
|
1 import sys
2 import traceback
3 from flask import jsonify, request
4
5 from . import api
6
7 class InvalidAPIUsage(Exception):
8 status_code = 400
9
10 def __init__(self, message='', status_code=None):
11 super().__init__()
12 self.message = message
13 self.path = request.path
14 if status_code is None:
15 self.status_code = InvalidAPIUsage.status_code
16
17 def to_dict(self):
18 rv = {}
19 rv['path'] = self.path
20 rv['status'] = self.status_code
21 rv['message'] = self.message
22 return rv
23
24
25 class IncorrectVideoFormat(InvalidAPIUsage):
26 def __init__(self, message_id):
27 super().__init__()
28 self.message = self.msg[message_id]
29
30 msg = {1:'Incorrect video type: only RGB - Type=video/mp4 allowed',
31 2:'Incorrect video dimensions: only 720p supported (1280*720)'}
32
33
34 class InvalidFilterParams(InvalidAPIUsage):
35 def __init__(self, message_id, filter_name=''):
36 super().__init__()
37 self.message = self.msg(message_id, filter_name)
38
39 def msg(self, id, filter_name):
40 # TODO:Lukas [07252021] messges could be stored in static files as JSON
41 avail_msg = {1:'Incorrect filter parameters: should be {"fps": "<fps: float>", "filter_params":{"type":"<filter: str>"}} \
42 or for default preview, {"filter_params":{"type":""}}',
43 2:f'Incorrect filter parameters: filter does not exist, for more go to /api/v1/help/filters/',
44 3:f'Incorrect filter parameters: required parameters are missing or invalid, for more go to /api/v1/help/filters/{filter_name}/',
45 4:f'Incorrect download parameters: for more go to /api/v1/help/download/',
46 }
47 return avail_msg[id]
48
49
50 @api.errorhandler(InvalidAPIUsage)
51 def invalid_api_usage(e):
52 return jsonify(e.to_dict()), 400
53
| 40 - warning: fixme
5 - error: no-name-in-module
39 - warning: redefined-builtin
43 - warning: f-string-without-interpolation
45 - warning: f-string-without-interpolation
1 - warning: unused-import
2 - warning: unused-import
|
1 from flask_swagger_ui import get_swaggerui_blueprint
2
3
4 swagger_ui = get_swaggerui_blueprint(
5 '/docs',
6 '/static/swagger.json',
7 config={
8 "app_name": "videoApi"
9 }
10 )
| Clean Code: No Issues Detected
|
1 from flask import request, jsonify
2 from functools import wraps
3
4 from .errors import InvalidAPIUsage, InvalidFilterParams, IncorrectVideoFormat
5
6
7
8 """
9 Almost like an Architect - makes decorations
10 """
11 def decorator_maker(func):
12 def param_decorator(fn=None, does_return=None, req_c_type=None, req_type=None, arg=None, session=None):
13 def deco(fn):
14 @wraps(fn)
15 def wrapper(*args, **kwargs):
16 result = func(does_return, req_c_type, req_type, arg, session)
17 if does_return:
18 return fn(result)
19 return fn(*args, **kwargs)
20 return wrapper
21 if callable(fn): return deco(fn)
22 return deco
23 return param_decorator
24
25
26
27 """
28 Checks if user input is not out of bounds, and also Content-Type
29 """
30 def wrap_param_check(does_return, req_c_type, req_type, arg, session):
31 check_content_type(req_c_type)
32 return check_correct_filter_params(session)
33
34 def check_content_type(req_c_type):
35 if not request.content_type.startswith(req_c_type):
36 raise InvalidAPIUsage(f'Content-Type should be of type: {req_c_type}', 400)
37
38 def check_correct_filter_params(session):
39 if request.data:
40 data = request.get_json()
41 f_params = data['filter_params']
42 if 'filter_params' not in data:
43 raise InvalidFilterParams(1)
44 elif 'type' not in f_params:
45 raise InvalidFilterParams(1)
46 if 'download' in request.url:
47 if 'fps' not in data:
48 raise InvalidFilterParams(1)
49 if 'max_f' in f_params and 'min_f' in f_params:
50 max_fr = session['video_frame_count']
51 min_f_raw = f_params['min_f']
52 max_f_raw = f_params['max_f']
53
54 if min_f_raw == "": min_f_raw = 0
55 if max_f_raw == "": max_f_raw = max_fr
56
57 min_f = _check_for_req_type(int, min_f_raw, 4)
58 max_f = _check_for_req_type(int, max_f_raw, 4)
59 a = check_bounds(min_f_raw, max_fr)
60 b = check_bounds(max_f_raw, max_fr)
61 return sorted([a, b])
62
63
64 def _check_for_req_type(req_type, val, ex):
65 try:
66 req_type(val)
67 except Exception:
68 raise InvalidFilterParams(ex)
69 return val
70
71 parameter_check = decorator_maker(wrap_param_check)
72
73
74
75 """
76 Checks if user input is not out of bounds, and also Content-Type
77 """
78 def wrap_url_arg_check(does_return, req_c_type, req_type, arg, session):
79 check_arg_urls(req_type, arg)
80 frame_idx = request.view_args[arg]
81 return check_bounds(frame_idx, session['video_frame_count'])
82
83
84 def check_arg_urls(req_type, arg):
85 try:
86 req_type(request.view_args[arg])
87 except ValueError:
88 raise InvalidAPIUsage(f'Content-Type should be of type: {req_type.__name__}', 400)
89
90 def check_bounds(frame_idx, max_frames):
91 f_max = int(max_frames)
92 f_idx = int(frame_idx)
93 if f_idx > f_max:
94 f_idx = f_max-50
95 elif f_idx < 1:
96 f_idx = 1
97 return f_idx
98
99 url_arg_check = decorator_maker(wrap_url_arg_check)
100
101
102
103 """
104 Checks Video Metadata
105 """
106 def wrap_metadata_check(does_return, req_c_type, req_type, arg, session):
107 check_metadata(req_type)
108
109 def check_metadata(req_type):
110 byteStream = request.files['file']
111 vid_type = byteStream.__dict__['headers'].get('Content-Type')
112 if vid_type != req_type:
113 raise IncorrectVideoFormat(1)
114
115 metadata_check = decorator_maker(wrap_metadata_check)
116
117
118
119 """
120 Excpetion Handler for non-Endpoints
121 """
122 def exception_handler(fn=None, ex=None, type=None, pas=False):
123 def deco(fn):
124 @wraps(fn)
125 def wrapper(*args, **kwargs):
126 try:
127 fn(*args, **kwargs)
128 except Exception:
129 if not pas:
130 raise ex(type)
131 pass
132 return fn(*args, **kwargs)
133 return wrapper
134 if callable(fn): return deco(fn)
135 return deco
| 4 - error: relative-beyond-top-level
8 - warning: pointless-string-statement
12 - refactor: too-many-arguments
12 - refactor: too-many-positional-arguments
27 - warning: pointless-string-statement
30 - warning: unused-argument
30 - warning: unused-argument
30 - warning: unused-argument
42 - refactor: no-else-raise
38 - refactor: inconsistent-return-statements
57 - warning: unused-variable
58 - warning: unused-variable
68 - warning: raise-missing-from
78 - warning: unused-argument
78 - warning: unused-argument
88 - warning: raise-missing-from
106 - warning: unused-argument
106 - warning: unused-argument
106 - warning: unused-argument
106 - warning: unused-argument
122 - warning: redefined-builtin
128 - warning: broad-exception-caught
130 - warning: raise-missing-from
131 - warning: unnecessary-pass
1 - warning: unused-import
|
1 from flask import Blueprint
2
3 api = Blueprint('videoApi', __name__)
4
5 from . import videoApi, errors, help
| 5 - warning: redefined-builtin
5 - error: no-name-in-module
5 - error: no-name-in-module
5 - error: no-name-in-module
5 - warning: unused-import
5 - warning: unused-import
5 - warning: unused-import
|
1 from flask import Flask
2 from config import config
3 from flask_caching import Cache
4
5 from flask_swagger_ui import get_swaggerui_blueprint
6
7 VIDEO_EXTENSION=None
8 VIDEO_WIDTH=None
9 VIDEO_HEIGHT=None
10
11 VIDEO_UPLOAD_PATH=None
12 FRAMES_UPLOAD_PATH=None
13 IMG_EXTENSION=None
14
15 HELP_MSG_PATH=None
16
17 CACHE=None
18
19
20 def create_app(config_name):
21
22 global VIDEO_EXTENSION
23 global VIDEO_WIDTH
24 global VIDEO_HEIGHT
25
26 global VIDEO_UPLOAD_PATH
27 global FRAMES_UPLOAD_PATH
28
29 global IMG_EXTENSION
30 global HELP_MSG_PATH
31 global CACHE
32
33 app = Flask(__name__)
34 app.config.from_object(config[config_name])
35 config[config_name].init_app(app)
36
37 cache = Cache(config={"CACHE_TYPE": "filesystem",
38 "CACHE_DIR": app.root_path + '/static/cache'})
39 cache.init_app(app)
40
41 CACHE = cache
42
43 VIDEO_EXTENSION = app.config["VIDEO_EXTENSION"]
44 VIDEO_WIDTH = int(app.config["VIDEO_WIDTH"])
45 VIDEO_HEIGHT = int(app.config["VIDEO_HEIGHT"])
46
47 IMG_EXTENSION = app.config["IMG_EXTENSION"]
48
49 VIDEO_UPLOAD_PATH = app.root_path + '/static/uploads/videos'
50 FRAMES_UPLOAD_PATH = app.root_path + '/static/uploads/frames'
51
52 HELP_MSG_PATH = app.root_path + '/static/helpmessages'
53
54 #TODO: video max dimensions, video max length
55
56 from .main import main as main_blueprint
57 app.register_blueprint(main_blueprint)
58
59 from .api import api as api_blueprint
60 app.register_blueprint(api_blueprint, url_prefix='/videoApi/v1')
61
62 from .docs import swagger_ui
63 app.register_blueprint(swagger_ui, url_prefix="/docs")
64
65
66 return app
| 54 - warning: fixme
22 - warning: global-statement
23 - warning: global-statement
24 - warning: global-statement
26 - warning: global-statement
27 - warning: global-statement
29 - warning: global-statement
30 - warning: global-statement
31 - warning: global-statement
56 - error: relative-beyond-top-level
59 - error: relative-beyond-top-level
62 - error: relative-beyond-top-level
5 - warning: unused-import
|
1 import cv2
2 import math
3 import string
4 import random
5 import numpy as np
6 import skvideo.io
7 from PIL import Image
8
9 from .. import VIDEO_EXTENSION, VIDEO_UPLOAD_PATH, \
10 FRAMES_UPLOAD_PATH, IMG_EXTENSION, CACHE
11
12 FPS = 23.98
13 SK_CODEC = 'libx264'
14
15
16 def create_vid_path(name):
17 return f'{VIDEO_UPLOAD_PATH}/{name}{VIDEO_EXTENSION}'
18
19 def create_frame_path(name):
20 return f'{FRAMES_UPLOAD_PATH}/{name}{IMG_EXTENSION}'
21
22 def framecount_from_vid_id(video_id):
23 video_path = create_vid_path(video_id)
24 cap = cv2.VideoCapture(video_path)
25 return math.floor(cap.get(7))
26
27 def id_generator(size, chars=string.ascii_lowercase + string.digits) -> str:
28 return ''.join(random.choice(chars) for _ in range(size))
29
30
31 def create_sk_video_writer(video_f_path, fps = None):
32 if not fps : fps = FPS
33 return skvideo.io.FFmpegWriter(video_f_path,
34 outputdict={'-c:v':SK_CODEC, '-profile:v':'main',
35 '-pix_fmt': 'yuv420p', '-r':str(fps)})
36
37
38 def set_cache_f_count(s_id: str, ud: str, fc: str) -> None:
39 CACHE.set(f'{s_id}_{ud}', fc)
40
41
42 def bgr_to_rgb(frame: np.ndarray) -> np.ndarray:
43 return frame[:, :, ::-1]
44
45
46 def is_greyscale(frame) -> bool:
47 return frame.ndim == 2
48
49
50 def is_rgb(frame) -> bool:
51 return frame.ndim == 3
52
53
54 def img_from_greyscale(frame: np.ndarray) -> Image:
55 return Image.fromarray(frame).convert("L")
56
57
58 def img_from_bgr(frame: np.ndarray) -> Image:
59 return Image.fromarray(bgr_to_rgb(frame))
60
61
62
| 9 - error: relative-beyond-top-level
|
1 import os
2 basedir = os.path.abspath(os.path.dirname(__file__))
3
4
5 class Config:
6
7 """
8 """
9 SECRET_KEY = os.environ.get('SECRET_KEY')
10 FLASK_CONFIG = os.environ.get('FLASK_CONFIG')
11
12 VIDEO_EXTENSION = os.environ.get('VIDEO_EXTENSION')
13 VIDEO_WIDTH = os.environ.get('VIDEO_WIDTH')
14 VIDEO_HEIGHT = os.environ.get('VIDEO_HEIGHT')
15
16 IMG_EXTENSION = os.environ.get('IMG_EXTENSION')
17
18
19 @staticmethod
20 def init_app(app):
21 pass
22
23
24 class DevelopmentConfig(Config):
25
26 """
27 """
28 DEBUG = True
29
30 config = {
31 'development': DevelopmentConfig,
32 'default': DevelopmentConfig
33 }
| 5 - refactor: too-few-public-methods
24 - refactor: too-few-public-methods
|
1 from werkzeug.utils import secure_filename
2 from functools import partial
3 import subprocess as sp
4 import time
5
6 import skvideo.io
7 import numpy as np
8 import threading
9 import ffmpeg
10 import shlex
11 import cv2
12 import re
13
14 from PIL import Image
15
16 from werkzeug.datastructures import FileStorage as FStorage
17 from .. import VIDEO_EXTENSION, VIDEO_WIDTH, VIDEO_HEIGHT, \
18 VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION
19
20 from . import utils
21 from . errors import IncorrectVideoFormat, InvalidFilterParams, InvalidAPIUsage
22 from . decorators import exception_handler
23
24 FRAME_SIZE = VIDEO_WIDTH * VIDEO_HEIGHT * 3
25 FRAME_WH = (VIDEO_WIDTH, VIDEO_HEIGHT)
26 FFMPEG_COMMAND = 'ffmpeg -i pipe: -f rawvideo -pix_fmt bgr24 -an -sn pipe: -loglevel quiet'
27
28 ID_LEN = 32
29
30
31
32 class Frame:
33
34 def __init__(self, id=None):
35 self.id = id
36
37 @exception_handler(ex=IncorrectVideoFormat, type=2)
38 def from_bytes(self, in_bytes: bytes) -> np.ndarray:
39 """
40 """
41 frame_arr = np.frombuffer(in_bytes, np.uint8)
42 f_arr = frame_arr.reshape([VIDEO_HEIGHT, VIDEO_WIDTH, 3])
43 return utils.bgr_to_rgb(f_arr)
44
45 def f_save(self, frame: np.ndarray, frame_id: str) -> None:
46 upload_path = utils.create_frame_path(frame_id)
47 if utils.is_rgb(frame):
48 Image.fromarray(frame).save(upload_path)
49 return
50 utils.img_from_greyscale(frame).save(upload_path)
51 return
52
53 def get_by_idx(self, frame_idx):
54 vid = utils.create_vid_path(self.id)
55 cap = cv2.VideoCapture(vid)
56 cap.set(1, frame_idx)
57 _, frame = cap.read()
58 return frame
59
60
61
62 class VideoUploader(Frame):
63
64 def __init__(self):
65 id = utils.id_generator(ID_LEN)
66 super().__init__(id)
67 self.frame_count = 0
68
69 def upload_from_bytestream(self, byte_stream: FStorage):
70 video_f_path = utils.create_vid_path(self.id)
71 sk_writer = utils.create_sk_video_writer(video_f_path)
72
73 sh_command = shlex.split(FFMPEG_COMMAND)
74 process = sp.Popen(sh_command, stdin=sp.PIPE, stdout=sp.PIPE, bufsize=10**8)
75 thread = threading.Thread(target=self._writer, args=(process, byte_stream, ))
76 thread.start()
77
78 while True:
79 in_bytes = process.stdout.read(FRAME_SIZE)
80 if not in_bytes: break
81 frame = self.from_bytes(in_bytes)
82 self.frame_count += 1
83 if self.frame_count == 1: self.f_save(frame, self.id)
84 sk_writer.writeFrame(frame)
85 thread.join()
86 sk_writer.close()
87
88 def _writer(self, process, byte_stream):
89 for chunk in iter(partial(byte_stream.read, 1024), b''):
90 process.stdin.write(chunk)
91 try:
92 process.stdin.close()
93 except (BrokenPipeError):
94 pass
95
96
97
98 class Filter:
99
100 def __init__(self, img=None):
101 self.img = img
102
103 def applyCanny(self, params):
104 if 'thresh1' in params and 'thresh2' in params:
105 gs_img = self.applyGreyScale(params)
106 return cv2.Canny(gs_img,
107 int(params['thresh1']),
108 int(params['thresh2']))
109 raise InvalidFilterParams(3, 'canny')
110
111 def applyGauss(self, params):
112 if 'ksize_x' and 'ksize_y' in params and \
113 params['ksize_x'] % 2 != 0 and \
114 params['ksize_y'] % 2 != 0:
115 g_img = self.img.copy()
116 if np.ndim(g_img) == 3: g_img = utils.bgr_to_rgb(g_img)
117 return cv2.GaussianBlur(g_img,
118 (int(params["ksize_x"]), int(params["ksize_y"])), 0)
119 raise InvalidFilterParams(3, 'gauss')
120
121 def applyGreyScale(self, _):
122 c_img = self.img.copy()
123 return cv2.cvtColor(c_img, cv2.COLOR_RGB2GRAY)
124
125 def applyLaplacian(self, params):
126 gs_img = self.applyGreyScale(params)
127 return cv2.Laplacian(gs_img, cv2.CV_8U)
128
129 def run_func(self, params):
130 if params["type"] in self.filter_map:
131 func = self.filter_map[params["type"]].__get__(self, type(self))
132 return func(params)
133 raise InvalidFilterParams(2)
134
135 def _default(self, _):
136 return utils.bgr_to_rgb(self.img)
137
138 filter_map = {'canny': applyCanny,
139 'gauss': applyGauss,
140 'greyscale': applyGreyScale,
141 'laplacian': applyLaplacian,
142 '': _default}
143
144
145
146 class VideoDownloader(Frame, Filter):
147
148 def __init__(self, fps, vid_range=None):
149 Frame.__init__(self)
150 Filter.__init__(self)
151 self.fps = fps
152 self.vid_range = vid_range
153 self.curr_f_frame = None
154 if vid_range:
155 self.range_min = vid_range[0]
156 self.range_max = vid_range[1]
157
158 def download(self, s_id, tot_video_frames, params):
159 f_vid_name = f'{s_id}_{params["type"]}'
160 video_f_path = utils.create_vid_path(f_vid_name)
161 local_vid = cv2.VideoCapture(utils.create_vid_path(s_id))
162 vid_writer = utils.create_sk_video_writer(video_f_path, self.fps)
163
164 for i in range(tot_video_frames-1):
165 utils.set_cache_f_count(s_id, 'd', i)
166 _, curr_frame = local_vid.read()
167 if curr_frame is None: break
168 self.img = curr_frame
169 f_frame = self._filter_apply(i, params)
170 vid_writer.writeFrame(f_frame)
171 vid_writer.close()
172 return f_vid_name
173
174 def _filter_apply(self, i, params):
175 """
176 we simply check if a range is given,
177 then if we get a gs-img from the filter we add three dimensions
178 """
179 if self.vid_range:
180 if(i >= self.vid_range[0] and
181 i <= self.vid_range[1]):
182 f_frame = self.run_func(params)
183 if not utils.is_rgb(f_frame):
184 return np.dstack(3*[f_frame])
185 return f_frame
186 else:
187 return self.run_func({"type":""})
188 else:
189 return self.run_func(params)
| 70 - warning: bad-indentation
71 - warning: bad-indentation
73 - warning: bad-indentation
74 - warning: bad-indentation
75 - warning: bad-indentation
76 - warning: bad-indentation
78 - warning: bad-indentation
79 - warning: bad-indentation
80 - warning: bad-indentation
81 - warning: bad-indentation
82 - warning: bad-indentation
83 - warning: bad-indentation
84 - warning: bad-indentation
85 - warning: bad-indentation
86 - warning: bad-indentation
159 - warning: bad-indentation
160 - warning: bad-indentation
161 - warning: bad-indentation
162 - warning: bad-indentation
164 - warning: bad-indentation
165 - warning: bad-indentation
166 - warning: bad-indentation
167 - warning: bad-indentation
168 - warning: bad-indentation
169 - warning: bad-indentation
170 - warning: bad-indentation
171 - warning: bad-indentation
172 - warning: bad-indentation
17 - error: relative-beyond-top-level
20 - error: no-name-in-module
21 - error: relative-beyond-top-level
22 - error: relative-beyond-top-level
34 - warning: redefined-builtin
65 - warning: redefined-builtin
74 - refactor: consider-using-with
112 - refactor: simplifiable-condition
180 - refactor: no-else-return
180 - refactor: chained-comparison
1 - warning: unused-import
4 - warning: unused-import
6 - warning: unused-import
9 - warning: unused-import
12 - warning: unused-import
17 - warning: unused-import
17 - warning: unused-import
17 - warning: unused-import
17 - warning: unused-import
21 - warning: unused-import
|
1 from flask import jsonify, request, send_from_directory
2 from . decorators import parameter_check
3 from . import api
4 from ..import HELP_MSG_PATH
5 import json
6
7 AV_EP = ["upload", "preview", "download", "stats", "filters"]
8 AV_FILTERS = ["canny", "greyscale", "laplacian", "gauss"]
9
10 @api.route('/help/', methods=['GET'])
11 @api.route('/help/<endpts>/', methods=['GET'])
12 @api.route('/help/filters/<filter_type>/', methods=['GET'])
13 @parameter_check(req_c_type='application/json')
14 def help(endpts=None, filter_type=None):
15 if endpts and endpts in AV_EP:
16 return jsonify(load_json_from_val(endpts)), 200
17 elif filter_type and filter_type in AV_FILTERS:
18 return jsonify(load_json_from_val(filter_type)), 200
19 else:
20 return jsonify(load_json_from_val('help')), 200
21
22
23 def load_json_from_val(val):
24 f = open(HELP_MSG_PATH+f'/{val}.json')
25 return json.load(f)
| 14 - warning: redefined-builtin
2 - error: relative-beyond-top-level
3 - error: no-name-in-module
4 - error: relative-beyond-top-level
15 - refactor: no-else-return
24 - warning: unspecified-encoding
24 - refactor: consider-using-with
1 - warning: unused-import
1 - warning: unused-import
|
1 import os
2 from flask import Flask, request, redirect, \
3 url_for, session, jsonify, send_from_directory, make_response, send_file
4
5 from . import api
6 from . import utils
7 from .. import VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION, VIDEO_EXTENSION, CACHE
8
9 from . VideoProcessing import Frame, VideoUploader, VideoDownloader, Filter
10 from . decorators import parameter_check, url_arg_check, metadata_check
11 from . errors import InvalidAPIUsage
12
13
14
15 @api.route('/upload/', methods=['POST'])
16 @parameter_check(does_return=False, req_c_type='multipart/form-data')
17 @metadata_check(does_return=False, req_type='video/mp4')
18 def upload_video():
19 """
20 uploads the video
21 """
22
23 byteStream = request.files['file']
24 vu = VideoUploader()
25 vu.upload_from_bytestream(byteStream)
26
27 session['s_id'] = vu.id
28 f_c = utils.framecount_from_vid_id(vu.id)
29 session['video_frame_count'] = f_c
30 session['is_uploaded'] = True
31
32 return jsonify({'status' : '201',
33 'message' : 'video uploaded!'}), 201
34
35
36
37 @api.route('/preview/', defaults={'frame_idx':1}, methods=['GET'])
38 @api.route('/preview/<frame_idx>/', methods=['GET', 'POST'])
39 @parameter_check(does_return=False, req_c_type='application/json')
40 @url_arg_check(does_return=True, req_type=int, arg='frame_idx', session=session)
41 def preview_thumbnail(frame_idx):
42 """
43 Preview a frame by index, given filter parameters
44 """
45 if session.get('is_uploaded'):
46 data = request.get_json()
47 filter_params = data['filter_params']
48 session['filter_params'] = filter_params
49 frame = Frame(session['s_id'])
50 frame_i = frame.get_by_idx(frame_idx)
51 filter_frame = Filter(frame_i).run_func(filter_params)
52 frame.f_save(filter_frame, session['s_id'])
53
54 return send_from_directory(directory=f'{FRAMES_UPLOAD_PATH}',
55 path=f'{session["s_id"]}{IMG_EXTENSION}',
56 as_attachment=True), 200
57
58 raise InvalidAPIUsage('Invalid usage: please upload a video first')
59
60
61
62 @api.route('/download/', methods=['POST'])
63 @parameter_check(does_return=True, req_c_type='application/json', session=session)
64 def download_video(vid_range):
65 """
66 Download a video given filter parameters
67 """
68
69 if session.get('is_uploaded'):
70 data = request.get_json()
71 fps = data['fps']
72 filter_params = data['filter_params']
73 frame_count = session['video_frame_count']
74 vd = VideoDownloader(fps, vid_range)
75 filter_vid = vd.download(session['s_id'], frame_count, filter_params)
76
77 session['is_downloaded'] = True
78 return send_from_directory(directory=f'{VIDEO_UPLOAD_PATH}',
79 path=f'{filter_vid}{VIDEO_EXTENSION}',
80 as_attachment=True), 200
81
82 raise InvalidAPIUsage('Invalid usage: please upload a video first')
83
84
85 @api.route('/status/', methods=['GET'])
86 @parameter_check(req_c_type='application/json')
87 def status():
88 """
89 The progress of the user, uploaded, download / frames
90 """
91
92 resp = {}
93 try:
94 if session['is_uploaded']:
95 resp["upload"] = "done"
96 if CACHE.get(f"{session['s_id']}_d"):
97 d_status = CACHE.get(f"{session['s_id']}_d")
98 resp["downloaded_frames"] = f'{d_status}/{session["video_frame_count"]}'
99 if session["is_downloaded"]:
100 resp["is_downloaded"] = True
101 except KeyError:
102 pass
103 return jsonify({"status" : resp}), 200
| 5 - error: no-name-in-module
6 - error: no-name-in-module
7 - error: relative-beyond-top-level
9 - error: relative-beyond-top-level
10 - error: relative-beyond-top-level
11 - error: relative-beyond-top-level
1 - warning: unused-import
2 - warning: unused-import
2 - warning: unused-import
2 - warning: unused-import
2 - warning: unused-import
2 - warning: unused-import
|
1 from flask import redirect, url_for, jsonify
2 from . import main
3
4 @main.app_errorhandler(404)
5 def page_not_found(e):
6 return jsonify(error=str(e)), 404
7
8 @main.app_errorhandler(405)
9 def method_not_allowed(e):
10 return jsonify(error=str(e)), 405
11
12
| 2 - error: no-name-in-module
1 - warning: unused-import
1 - warning: unused-import
|
1 #!/usr/bin/env python3
2 """Combine all hours of data into one CSV"""
3 import pandas as pd
4
5 #output combined CSV file
6 def concat_file(file_list, output_file):
7 """concat .csv file according to list of files
8 :param str file_list: List of CSV from provided dataset
9 :param str output_file: Output filename to save the concat CSV of files
10 :return: New file written to <output_file>
11 :rtype: void
12 """
13 combined_csv = pd.concat([pd.read_csv(f) for f in file_list ]) #combine all files in the list
14 combined_csv.to_csv( output_file, index=False, encoding='utf-8-sig') #export to csv with uft-8 encoding
15
16 # hold paths for each hour
17 acc_file_locations=[]
18 gyro_file_locations=[]
19
20 # loop to add path hours to list
21 for hour in range (12,18):
22 acc_file_locations.append("Data-raw/Accelerometer/2019-11-12/" + str(hour) + "/accel_data.csv")
23 gyro_file_locations.append("Data-raw/Gyroscope/2019-11-12/" + str(hour) + "/accel_data.csv")
24
25 concat_file(acc_file_locations, 'acc_data.csv')
26 concat_file(gyro_file_locations, 'gyro_data.csv') | Clean Code: No Issues Detected
|
1 #!/usr/bin/env python3
2 """Module to calculate reliability of samples of raw accelerometer files."""
3
4 import pandas as pd
5 import matplotlib.pyplot as plt
6 import datetime
7 import argparse
8 import os
9
10
11 def main():
12 """
13 Application entry point responsible for parsing command line requests
14 """
15 parser = argparse.ArgumentParser(description='Process accelerometer data.')
16 parser.add_argument('input_file', metavar='file', type=str, nargs='+',
17 help='filename for csv accelerometer data')
18
19 # parse command line arguments
20 args = parser.parse_args()
21 for file in args.input_file:
22 reliability_score(file)
23
24
25 def reliability_score(input_file):
26 """ calculate reliability score based on input file
27 :param str input_file: CSV from provided dataset
28 :return: New file written to csv output naming convention and new png image of plot
29 :rtype: void
30 """
31 sampling_rate=20 # Sample rate (Hz) for target device data
32
33 # save file name
34 base_input_name = os.path.splitext(input_file)[0]
35
36 # timestamp for filename
37 now = datetime.datetime.now()
38 timestamp = str(now.strftime("%Y%m%d_%H-%M-%S"))
39
40 df = pd.read_csv(input_file) # read data
41
42 df['Time'] = pd.to_datetime(df['Time'], unit='ms') # convert timestamp to seconds
43 df = df.set_index('Time') #index as timestamp to count
44 samples_seconds = df.resample('1S').count() # count sample in each 1s time period
45
46 # reliability by second
47 samples_seconds['Reliability']= samples_seconds['Hour'] / sampling_rate
48 samples_seconds.loc[samples_seconds['Reliability'] >= 1, 'Reliability'] = 1 #if sample rate greater than one set to 1
49
50 # save csv of reliability by second
51 header = ["Reliability"]
52 samples_seconds.to_csv("reliability_csv_by_seconds_" + base_input_name + "_" + timestamp + ".csv" , columns=header)
53
54 print("Reliability for data set = " + str(samples_seconds["Reliability"].mean(axis=0)))
55
56 # set and display plot
57 plot_df = samples_seconds.reset_index() # add index column
58 plot_df.plot(x='Time', y='Reliability', rot=45, style=".", markersize=5)
59
60 # save png image
61 plt.savefig("reliability_plot_" + base_input_name + "_" + timestamp + ".png", bbox_inches='tight')
62
63 #show plot
64 plt.title("Reliability Score by Second")
65 plt.show()
66
67 if __name__ == '__main__':
68 main() # Standard boilerplate to call the main() function to begin the program.
69
| Clean Code: No Issues Detected
|
1 #!/usr/bin/env python3
2 """Module to calculate energy and non-wear time of accelerometer data."""
3
4 import os
5 import pandas as pd
6 import numpy as np
7 import matplotlib.pyplot as plt
8 import datetime
9 import argparse
10
11
12 def main():
13 """
14 Application entry point responsible for parsing command line requests
15 """
16 parser = argparse.ArgumentParser(description='Process Non-wear-time accelerometer data.')
17 parser.add_argument('input_file', metavar='file', type=str, nargs='+',
18 help='filename for csv accelerometer data')
19
20 # parse command line arguments
21 args = parser.parse_args()
22 for file in args.input_file:
23 energy_calculations(file)
24
25 def energy_calculations(input_file):
26 """calculate energy and non-wear time stamps based on input file
27 :param str input_file: CSV from provided dataset
28 :return: New file written to csv output naming convention and new png image of plot
29 :rtype: void
30 """
31 df = pd.read_csv(input_file) # read data
32 df['Time'] = pd.to_datetime(df['Time'], unit='ms') # convert timestamp to datetime object
33
34 # save file name
35 base_input_name = os.path.splitext(input_file)[0]
36
37 # timestamp for filename
38 now = datetime.datetime.now()
39 timestamp = str(now.strftime("%Y%m%d_%H-%M-%S"))
40
41 # Simple smoothing signal with rolling window
42 # use rolling window of 10 samples ~ .5 second
43 df['accX'] = df['accX'].rolling(window=10, min_periods=1).mean() # smoothing
44 df['accY'] = df['accY'].rolling(window=10, min_periods=1).mean() # smoothing
45 df['accZ'] = df['accZ'].rolling(window=10, min_periods=1).mean() # smoothing
46
47 #rolling std
48 df['stdX'] = df['accX'].rolling(300).std()*1000 # rolling std of 15 seconds is 300 samples
49 df['stdY'] = df['accY'].rolling(300).std()*1000
50 df['stdZ'] = df['accZ'].rolling(300).std()*1000 # 1000 X to convert g to mg
51
52 # Calculate non-wear time using std if 2 of 3 axes is less than target, point can be marked as non-wear point
53 target_std=13 # set target std to check against
54 df["Non_Wear"] = (df['stdX'] < target_std) & (df['stdY'] < target_std) | (df['stdX'] < target_std) & (df['stdZ'] < target_std) | (df['stdY'] < target_std) & (df['stdZ'] < target_std)
55
56 # Vector Mag to calc non-worn time
57 df["Energy"]= np.sqrt((df['accX']**2) + (df['accY']**2) + (df['accZ']**2)) # energy calculation
58
59 # plot the energy expenditure
60 ax = df.plot(x="Time", y='Energy', rot=45, markersize=5)
61 ax = plt.gca()
62
63 # run gridlines for each hour bar
64 ax.get_xaxis().grid(True, which='major', color='grey', alpha=0.5)
65 ax.get_xaxis().grid(True, which='minor', color='grey', alpha=0.25)
66
67 # mask the blocks for wear and non_wear time
68 df['block'] = (df['Non_Wear'].astype(bool).shift() != df['Non_Wear'].astype(bool)).cumsum() # checks if next index label is different from previous
69 df.assign(output=df.groupby(['block']).Time.apply(lambda x:x - x.iloc[0])) # calculate the time of each sample in blocks
70
71 # times of blocks
72 start_time_df = df.groupby(['block']).first() # start times of each blocked segment
73 stop_time_df = df.groupby(['block']).last() # stop times for each blocked segment
74
75 # lists of times stamps
76 non_wear_starts_list=start_time_df[start_time_df['Non_Wear'] == True]['Time'].tolist()
77 non_wear_stops_list=stop_time_df[stop_time_df['Non_Wear'] == True]['Time'].tolist()
78
79 # new df from all non-wear periods
80 data = { "Start": non_wear_starts_list, "Stop": non_wear_stops_list}
81 df_non_wear=pd.DataFrame(data) # new df for non-wear start/stop times
82 df_non_wear['delta'] = [pd.Timedelta(x) for x in (df_non_wear["Stop"]) - pd.to_datetime(df_non_wear["Start"])]
83
84 # check if non-wear is longer than target
85 valid_no_wear = df_non_wear["delta"] > datetime.timedelta(minutes=5) # greater than 5 minutes
86 no_wear_timestamps=df_non_wear[valid_no_wear]
87
88 # list of valid non-wear starts and stops
89 non_wear_start = no_wear_timestamps["Start"]
90 non_wear_stop = no_wear_timestamps["Stop"]
91
92 # calculate total capture time
93 capture_time_df = df[['Time']].copy()
94 # capture_time_df = capture_time_df.set_index('Time')
95
96 # plot non-wear periods
97 for non_start, non_stop in zip(non_wear_start, non_wear_stop):
98 capture_time_df['Non_Wear'] = (capture_time_df['Time'] > non_start ) & (capture_time_df['Time'] < non_stop )
99 ax.axvspan(non_start, non_stop, alpha=0.5, color='red')
100
101 # blocking validated wear and non wear time
102 capture_time_df['block'] = (capture_time_df['Non_Wear'].astype(bool).shift() != capture_time_df['Non_Wear'].astype(bool)).cumsum() # checks if next index label is different from previous
103 capture_time_df.assign(output=capture_time_df.groupby(['block']).Time.apply(lambda x: x - x.iloc[0])) # calculate the time of each sample in blocks
104 # times of blocks
105 start_time_df = capture_time_df.groupby(['block']).first() # start times of each blocked segment
106 stop_time_df = capture_time_df.groupby(['block']).last() # stop times for each blocked segment
107
108
109 start_time_df.rename(columns={'Time': 'StartTime'}, inplace=True)
110 stop_time_df.rename(columns={'Time': 'StopTime'}, inplace=True)
111
112 # combine start and stop dataframes
113 time_marks = pd.concat([start_time_df, stop_time_df], axis=1)
114 print("Capture Segment Periods:")
115 print(time_marks)
116
117 #save csv of individual time periods (worn and non-worn timestamps
118 time_marks.to_csv("wear_periods_csv_" + base_input_name + "_" + timestamp + ".csv")
119
120 # save png image
121 plt.savefig("non_wear_time_plot_" + base_input_name + "_" + timestamp + ".png", bbox_inches='tight')
122
123 #show plot
124 plt.title("Non-wear Time")
125 plt.show()
126
127 if __name__ == '__main__':
128 main() # Standard boilerplate to call the main() function to begin the program.
129
130
| 25 - refactor: too-many-locals
|
1 import numpy as np
2 import cv2
3 import pyyolo
4
5 cap = cv2.VideoCapture('gun4_2.mp4')
6 meta_filepath = "/home/unknown/yolo/darknet.data"
7 cfg_filepath = "/home/unknown/yolo/darknet-yolov3.cfg"
8 weights_filepath = "/home/unknown/yolo/yolov3.weights"
9
10
11 meta = pyyolo.load_meta(meta_filepath)
12 net = pyyolo.load_net(cfg_filepath, weights_filepath, False)
13
14 while(cap.isOpened()):
15 ret, frame = cap.read()
16 if not ret:
17 break
18
19 yolo_img = pyyolo.array_to_image(frame)
20 res = pyyolo.detect(net, meta, yolo_img)
21
22 for r in res:
23 cv2.rectangle(frame, r.bbox.get_point(pyyolo.BBox.Location.TOP_LEFT, is_int=True),
24 r.bbox.get_point(pyyolo.BBox.Location.BOTTOM_RIGHT, is_int=True), (0, 255, 0), 2)
25
26
27 cv2.imshow('frame', frame)
28 if cv2.waitKey(1) & 0xFF == ord('q'):
29 break
30
31 cap.release()
32 cv2.destroyAllWindows()
| 1 - warning: unused-import
|
1 import urllib.request
2 import re
3 import ssl
4
5 #全局取消证书验证
6 ssl._create_default_https_context = ssl._create_unverified_context
7
8 #设置淘宝搜索的关键词
9 keyword = urllib.request.quote("毛衣")
10
11 #将爬虫伪装成火狐浏览器
12 headers=("User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:57.0) Gecko/20100101 Firefox/57.0")
13
14 #创建一个opener对象
15 opener = urllib.request.build_opener()
16
17 #为opener设置header
18 opener.addheaders = [headers]
19
20 #将opener设为全局
21 urllib.request.install_opener(opener)
22
23 #为了方便测试,仅下载两页的数据
24 for i in range(0,2):
25
26 #把关键词添加到URL中并请求数据
27 url = "https://s.taobao.com/search?q="+ keyword +"&imgfile=&js=1&stats_click=search_radio_all%3A1&initiative_id=staobaoz_20180121&ie=utf8&bcoffset=4&ntoffset=4&p4ppushleft=1%2C48&s="+str(44*i)
28 data = urllib.request.urlopen(url).read().decode("utf-8","ignore")
29
30 #测试是否请求到数据
31 print(len(data))
32
33 #使用正则表达式进行匹配
34 pat = '"pic_url":"//(.*?)"'
35 imgs = re.compile(pat).findall(data)
36
37 for j in range(0,len(imgs)):
38
39 #用匹配到的网址建立完整的URL,并直接下载到本地
40 thisurl = "http://" + imgs[j]
41 file = "/Users/Rocky1/Desktop/imgs/" + str(i) + "-" + str(j) + ".jpg"
42 urllib.request.urlretrieve(thisurl,file)
43
44
45
46
47
| 6 - warning: protected-access
6 - warning: protected-access
28 - refactor: consider-using-with
|
1 import sqlite3
2
3 """ Demo Data Query Using Sqlite 3 """
4
5 def data_operations(conn,query):
6 """ Function which performs database operations and returns results """
7 try:
8 #creating a cursor for connection
9 cursor = conn.cursor()
10 #executing the query on the database and get the results
11 results = cursor.execute(query).fetchall()
12 except:
13 return "error in data operations"
14
15 #If No error is enountered, cursor is closed
16 cursor.close()
17 #Committing the data operatios
18 conn.commit()
19 #On successful completion return the results
20 return results
21
22
23
24 def count_query(connect):
25 """
26 Function to find the number of rows in the demo table
27 Expected Output :The number of rows in demo table is 6
28
29 """
30 #How many rows in the demo table
31 count_query = """SELECT COUNT(*) FROM demo; """
32 #checking for 3 rows
33 result = data_operations(connect,count_query)
34 for row in result:
35 print(f'The number of rows in demo table is {row[0]}')
36
37 def xy_query(connect):
38 """
39 Function for finding the number of rows where x and y are atleast equal to 5
40 Expected Output :The number of rows where x and y is atleast 5: 4
41
42 """
43 #How many rows where x and y are atleast 5
44 xy_query = """ SELECT COUNT(*) FROM demo WHERE x >= 5 AND y >= 5; """
45 #checking for atleast x y having value 5
46 result = data_operations(connect,xy_query)
47 for row in result:
48 print(f'The number of rows where x and y is atleast 5: {row[0]}')
49
50
51 def y_unique(connect):
52 """
53 Function for finding the Number of Unique values of Y
54 Expected output : The number of distinct values of y : 2
55
56 """
57 #Query for unique values of y
58 y_unique = """ SELECT COUNT(DISTINCT y) FROM demo ; """
59 #checking for distinct values of y
60 result = data_operations(connect,y_unique)
61 for row in result:
62 print(f'The number of distinct values of y : {row[0]}')
63
64
65 #Create a connection within a try excecpt block to pass errors without causing exception
66 try:
67 #Creating a database with name demo_data
68 connect = sqlite3.connect('demo_data.sqlite3')
69 #SQL Query for creating the table
70 create_table_query = """CREATE TABLE "demo" (
71 "s" TEXT,
72 "x" INTEGER,
73 "y" INTEGER
74 ); """
75
76 #Creating the table by sending to data operations function
77 result = data_operations(connect,create_table_query)
78
79 #Inserting values into the demo table
80 insert_query = """INSERT INTO demo (s,x,y) values
81 ("\'g\'",3,9),
82 ("\'v\'",5,7),
83 ("\'f\'",8,7) ;"""
84
85 #inserting the values into the demo table
86 data_operations(connect,insert_query)
87
88 #Now checking the demo table for data
89 count_query(connect) # Number of rows in the table
90 xy_query(connect) # Number of rows with x y values more than 5
91 y_unique(connect) # Number of distinct y values in the table
92
93 except:
94 pass
95
96 #closing the connection to the database
97 connect.close() | 3 - warning: pointless-string-statement
12 - warning: bare-except
24 - warning: redefined-outer-name
31 - warning: redefined-outer-name
33 - warning: redefined-outer-name
37 - warning: redefined-outer-name
44 - warning: redefined-outer-name
46 - warning: redefined-outer-name
51 - warning: redefined-outer-name
58 - warning: redefined-outer-name
60 - warning: redefined-outer-name
93 - warning: bare-except
|
1 import psycopg2
2 from sqlalchemy import create_engine
3 import pandas as pd
4
5 #Reading titanic file to upload into pandas
6 titanic = pd.read_csv('titanic.csv')
7 #Print the shape of titanic and print the top 5 rows
8 print(titanic.shape)
9 print(titanic.head())
10 #Pring the columns of the titanics dataframe
11 print(titanic.columns)
12
13 from sqlalchemy import create_engine
14
15 dbname = "rgfajssc"
16 username = "rgfajssc"
17 pass_word = "SECRET"
18 host = "john.db.elephantsql.com"
19 #creating creating engine inserting the titanic dataframe to postgres
20 try:
21 engine = create_engine(f'postgresql://{username}:{pass_word}@{host}/{username}')
22 titanic.to_sql('titanic', engine)
23 except:
24 pass
25
26 # pg_connect = psycopg2.connect(dbname=dbname, user=username,
27 # password=pass_word, host=host)
28 # cur = pg_connect.cursor() | 13 - warning: reimported
23 - warning: bare-except
1 - warning: unused-import
|
1 import psycopg2
2 import sqlite3
3
4 dbname = "rgfajssc"
5 username = "rgfajssc"
6 pass_word = "U0W4kG-Um-Pug_wj8ec9OnbkQ70deuZR"
7 host = "john.db.elephantsql.com"
8
9 pg_connect = psycopg2.connect(dbname=dbname, user=username,
10 password=pass_word, host=host)
11 cur = pg_connect.cursor()
12 #Query for Survived people by class
13 query = 'SELECT survived,pclass, COUNT(pclass) FROM titanic \
14 GROUP BY (pclass,survived) order by survived asc ,pclass asc;'
15 try:
16 cur.execute(query)
17 for row in cur:
18 print(row)
19 except :
20 pass
21
22 cur.close()
23 pg_connect.close() | 19 - warning: bare-except
2 - warning: unused-import
|
1 import sqlite3
2
3 """Database operations for the NorthWind data """
4
5 def data_operations(conn,query):
6 """ Function which performs database operations and returns results """
7 try:
8 #creating a cursor for connection
9 cursor = conn.cursor()
10 #executing the query on the database and get the results
11 results = cursor.execute(query).fetchall()
12 except:
13 return "error in data operations"
14
15 #If No error is enountered, cursor is closed
16 cursor.close()
17 #Committing the data operatios
18 conn.commit()
19 #On successful completion return the results
20 return results
21
22 def top_10_expensive_items(connect):
23 """
24 Function to find the Top 10 expensive items per unit price
25 Expected Output
26 The 10 Most expensive Items in the database are:
27 Côte de Blaye
28 Thüringer Rostbratwurst
29 Mishi Kobe Niku
30 Sir Rodney's Marmalade
31 Carnarvon Tigers
32 Raclette Courdavault
33 Manjimup Dried Apples
34 Tarte au sucre
35 Ipoh Coffee
36 Rössle Sauerkraut
37 """
38 #query for ten most expensive items in the database
39 expensive_query = """ SELECT ProductName
40 FROM Product
41 ORDER BY UnitPrice DESC LIMIT 10 """
42 result = data_operations(connect,expensive_query)
43 print("The 10 Most expensive Items in the database are:\n")
44 for row in result:
45 print(f'{row[0]}')
46
47 def avg_age_employee(connect):
48 """
49 Function to find the average age of the employee at the time of hire
50 Expected Output
51 The Averag age of the employee at the time of hire is:37.22
52 """
53 #Query for the Avg age of the employee at the time of hiring
54 avg_age_employee = """ SELECT AVG(HireDate - BirthDate) FROM Employee; """
55 result = data_operations(connect,avg_age_employee)
56 print("\n")
57 for row in result:
58 print(f'The Averag age of the employee at the time of hire is:{row[0]:0.2f}')
59
60 def avg_employee_age_bycity(connect):
61 """
62 Function to find the average age of employees at the time of hire by city
63 Expected Output
64 The Averag age of the employee at the time of hire by City is :
65 Kirkland : 29.0
66 London : 32.5
67 Redmond : 56.0
68 Seattle : 40.0
69 Tacoma : 40.0
70 """
71 #Query for avg age of the employee by city
72 avg_employee_age_bycity = """ SELECT city, AVG(HireDate - BirthDate)
73 FROM Employee GROUP BY City; """
74 result = data_operations(connect,avg_employee_age_bycity)
75 print("\nThe Averag age of the employee at the time of hire by City is :")
76 for row in result:
77 print(f'{row[0]} : {row[1]}')
78
79 def expensive_items_supplier(connect):
80 """
81 Function to find the Top 10 expensive items per unit price and the Name of the Supplier
82 Expected Output
83 The 10 Most expensive Items in the database Supplier Names followed by Item Name:
84 Aux joyeux ecclésiastiques : Côte de Blaye
85 Plutzer Lebensmittelgroßmärkte AG : Thüringer Rostbratwurst
86 Tokyo Traders : Mishi Kobe Niku
87 Specialty Biscuits, Ltd. : Sir Rodney's Marmalade
88 Pavlova, Ltd. : Carnarvon Tigers
89 Gai pâturage : Raclette Courdavault
90 G'day, Mate : Manjimup Dried Apples
91 Forêts d'érables : Tarte au sucre
92 Leka Trading : Ipoh Coffee
93 Plutzer Lebensmittelgroßmärkte AG : Rössle Sauerkraut
94 """
95 #Query for Top 10 expensive items along with Suppliet names
96 expensive_query_supplier = """ SELECT CompanyName,ProductName
97 FROM Product, Supplier
98 WHERE Supplier.Id = Product.SupplierId
99 ORDER BY UnitPrice DESC LIMIT 10"""
100 result = data_operations(connect,expensive_query_supplier)
101 print("\nThe 10 Most expensive Items in the database with Supplier Names followed by Item Name:\n")
102 for row in result:
103 print(f'{row[0]} : {row[1]}')
104
105 def largest_category(connect):
106 """
107 Function to find the Top category with largest unique products
108 Expected Output
109 The Category with largest unique products is :Confections
110
111 """
112 #Query for the name of category with largest number of unique products
113 largest_category = """ SELECT CategoryName FROM Category WHERE id = (
114 SELECT CategoryId FROM Product
115 GROUP BY CategoryId
116 ORDER BY COUNT(CategoryId) DESC LIMIT 1) """
117 result = data_operations(connect,largest_category)
118 print("\n")
119 for row in result:
120 print(f'The Category with largest unique products is :{row[0]}')
121
122 def most_territories_employee(connect):
123 """
124 Function to find the Top 10 expensive items per unit price
125 Expected Output
126 The Employee with most territories is :Robert King
127
128 """
129 #Query for name of the Employee who has the most territories
130 most_territories_employee = """ SELECT FirstName,LastName FROM Employee WHERE id = (
131 SELECT EmployeeId FROM EmployeeTerritory
132 GROUP BY EmployeeId
133 ORDER BY COUNT(TerritoryId) DESC LIMIT 1
134 ) """
135 result = data_operations(connect,most_territories_employee)
136 print("\n")
137 for row in result:
138 print(f'The Employee with most territories is :{row[0]} {row[1]}')
139
140
141
142
143 #Creating a try catch block to safely operation on database
144 try:
145 #Creating a connection to the Northwind database
146 connect = sqlite3.connect('northwind_small.sqlite3')
147
148 top_10_expensive_items(connect)
149 avg_age_employee(connect)
150 avg_employee_age_bycity(connect)
151 expensive_items_supplier(connect)
152 largest_category(connect)
153 most_territories_employee(connect)
154
155
156 except :
157 pass
158 #closing the connection to the database
159 connect.close() | 3 - warning: pointless-string-statement
12 - warning: bare-except
22 - warning: redefined-outer-name
47 - warning: redefined-outer-name
54 - warning: redefined-outer-name
60 - warning: redefined-outer-name
72 - warning: redefined-outer-name
79 - warning: redefined-outer-name
105 - warning: redefined-outer-name
113 - warning: redefined-outer-name
122 - warning: redefined-outer-name
130 - warning: redefined-outer-name
156 - warning: bare-except
|
1 import pandas as pd
2 import sqlite3
3 #Import buddymove dataset by reading csv
4 buddymove_df = pd.read_csv('buddymove_holidayiq.csv')
5 #Printing the shape of the dataset and first five rows
6 print(buddymove_df.shape)
7 print(buddymove_df.head())
8 #Printing the number of null values in the dataset
9 print("The number of null values in the dataset are:\n",buddymove_df.isna().sum())
10
11 #Opening a sqlite connection and creating a database
12 database_name = 'buddymove_holidayiq.sqlite3'
13 conn = sqlite3.connect(database_name)
14 #Dumping the dataframe to the database
15 buddymove_df.to_sql('buddymove_tbl',con=conn,if_exists='replace')
16
17 #Checking for the first five rows to ensure the database dump was complete
18 query = 'SELECT * FROM buddymove_tbl LIMIT 5;'
19 #Query for number of rows in database
20 query_rows = 'SELECT COUNT("User Id") FROM buddymove_tbl;'
21 try:
22 answer = conn.execute(query)
23 for row in answer:
24 print(row)
25 except:
26 pass
27
28 #Getting the number of rows in the table
29 try:
30 answer = conn.execute(query_rows)
31 for row in answer:
32 print(f'Number of rows in the table buddymove_tbl is :{row[0]}')
33 except:
34 pass
35
36 #Number of users have rated atleast 100 in nature and atleast 100 in shopping category
37 query_users = 'SELECT COUNT("User Id") FROM buddymove_tbl WHERE\
38 "Nature" >=100 AND "Shopping" >=100;'
39 try:
40 answer = conn.execute(query_users)
41 for row in answer:
42 print(f'Number of users have rated atleast 100 in Nature and Shopping are :{row[0]}')
43 except:
44 pass
45 #Query for getting average rating for all categories
46 query_avg_rating = 'SELECT AVG("Sports"),AVG("Religious"),AVG("Nature"),AVG("Theatre"),AVG("Shopping"),AVG("Picnic") FROM buddymove_tbl;'
47 try:
48 answer = conn.execute(query_avg_rating)
49 for row in answer:
50 print(f'Avg rating for Sports:{row[0]:.2f}')
51 print(f'Avg rating for Religious:{row[1]:.2f}')
52 print(f'Avg rating for Nature:{row[2]:.2f}')
53 print(f'Avg rating for Theatre:{row[3]:.2f}')
54 print(f'Avg rating for Shopping:{row[4]:.2f}')
55 print(f'Avg rating for Picnic:{row[5]:.2f}')
56 except:
57 pass
58 #committing the changes and closing the connection
59 conn.commit()
60 conn.close()
| 25 - warning: bare-except
33 - warning: bare-except
43 - warning: bare-except
56 - warning: bare-except
|
1 import pymongo
2
3
4 client = pymongo.MongoClient("mongodb+srv://mongoadmin:2BlYV2t3X4jws3XR@cluster0-uosnx.mongodb.net/test?retryWrites=true&w=majority")
5 db = client.test
| Clean Code: No Issues Detected
|
1 import sqlite3
2
3 #Creating a connection to the rpg database
4 conn = sqlite3.connect('rpg_db.sqlite3')
5 #creating a cursor for rpg database connection
6 curs = conn.cursor()
7 #Query for number of characters in the game
8 query = 'SELECT COUNT(name) FROM charactercreator_character;'
9 #Executing the query
10 answer = curs.execute(query)
11 for row in answer:
12 print(f'They are a total of {row[0]} characters in the game')
13 no_of_characters = row[0]
14 #Different classes of character by table name
15 character_class = ['mage','thief','cleric','fighter']
16 for subclass in character_class:
17 query = f'SELECT COUNT(character_ptr_id) FROM charactercreator_{subclass}'
18 answer = curs.execute(query)
19 for row in answer:
20 print(f'They are {row[0]} characters of the class {subclass}')
21
22 #Total items in the armoury
23 query = 'SELECT COUNT(name) FROM armory_item;'
24 answer = curs.execute(query)
25 for row in answer:
26 print(f'They are a total of {row[0]} items in the armoury')
27 no_of_items = row[0]
28 #Number of weapons in the items
29 query = f'SELECT COUNT(item_ptr_id) FROM armory_weapon;'
30 answer = curs.execute(query)
31 for row in answer:
32 print(f'They are a total of {row[0]} weapons in the items')
33 no_of_weapons = row[0]
34 #Number of non weapons in the items
35 print(f'They are a total of {(no_of_items-no_of_weapons)} weapons in the items')
36
37 #No of items for Top 20 chararcters by name
38 query = 'select count(item_id) AS no_of_items,name\
39 from charactercreator_character_inventory,charactercreator_character\
40 where charactercreator_character.character_id = charactercreator_character_inventory.character_id\
41 GROUP BY charactercreator_character_inventory.character_id ORDER BY name ASC LIMIT 20;'
42 answer = curs.execute(query).fetchall()
43 print('The Number of items of the top 20 characters by name are')
44 for row in answer:
45 print(f'No. of Items:{row[0]}, Name:{row[1]}')
46
47 #No of weapons for Top 20 chararcters by name
48 query = 'select count(armory_weapon.item_ptr_id) AS no_of_items,name\
49 from charactercreator_character_inventory,charactercreator_character,armory_weapon\
50 where charactercreator_character.character_id = charactercreator_character_inventory.character_id\
51 AND charactercreator_character_inventory.item_id = armory_weapon.item_ptr_id\
52 GROUP BY charactercreator_character_inventory.character_id ORDER BY name ASC LIMIT 20;'
53 answer = curs.execute(query).fetchall()
54 print('The number of weapons of the top 20 characters by name are')
55 for row in answer:
56 print(f'No. of Items:{row[0]}, Name:{row[1]}')
57
58 #Total Number of items held by characters
59 query = 'select count(id) from charactercreator_character_inventory'
60 answer = curs.execute(query)
61 for row in answer:
62 total_no_of_items = row[0]
63 #Average number of items for each character
64 print(f'The average number of items per character is {total_no_of_items/no_of_characters:0.2f}')
65 #closing the cursor and connection
66 curs.close()
67 conn.close() | 13 - warning: undefined-loop-variable
29 - warning: f-string-without-interpolation
|
1 import requests
2 from datetime import datetime
3 import time
4 import argparse
5 import getpass
6 import json
7 from rich import print
8 import logging
9 import urllib3
10 from netmiko import ConnectHandler
11 from eve_up import get_nodes, get_links
12 from ipaddress import *
13 urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
14 logging.basicConfig(level=logging.DEBUG,
15 format=f'%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
16
17 def base_config():
18 counter = 0
19 nodes = get_nodes(topo="NornirJunos.unl")
20 ips = ['192.168.20.191',
21 '192.168.20.192',
22 '192.168.20.193',
23 '192.168.20.194',
24 '192.168.20.195',
25 '192.168.20.196',
26 '192.168.20.197',
27 '192.168.20.198',
28 '192.168.20.199']
29 for key, value in nodes.items():
30 try:
31 if value["template"] == "vmx":
32 ## Get the Telnet address and port set to variables
33 url = value["url"].split(":")
34 ip = url[1].replace("//", "")
35 port = (url[2])
36 node_conn = {
37 'device_type' : 'juniper_junos_telnet',
38 'host' : ip,
39 'port': port,
40 }
41 # Initiate connection to EVE
42 net_connect = ConnectHandler(**node_conn)
43 hostname = f'vMX{str(counter+1)}'
44 # counter += 1
45 # Send commands and view output
46 config_commands = [ 'set system root-authentication encrypted-password "$1$hBBaQcLY$AZYmNq9VbicPSNbl4KDcf0"',
47 'delete chassis auto-image-upgrade',
48 f'set system host-name {hostname}',
49 'set system domain-name abc.co',
50 'set system services ssh',
51 'set system services netconf ssh',
52 'set system login user cisco class super-user authentication encrypted-password "$1$hBBaQcLY$AZYmNq9VbicPSNbl4KDcf0"',
53 f'set interfaces ge-0/0/4 unit 0 family inet address {ips[counter]}/24']
54 output = net_connect.send_config_set(config_commands, exit_config_mode=False)
55 counter += 1
56 print(output)
57 # Commit
58 output = net_connect.commit(and_quit=True)
59 print(output)
60
61 except Exception as err:
62 continue
63
64 # def ip_addresser(subnet: str = '192.168.20.0/24'):
65 # subnets = list(ip_network(subnet).subnets(new_prefix = 30))
66 # print(subnets)
67 # for subn in subnets:
68 # print(list(subn.hosts()))
69 # links = get_links("NornirJunos.unl")
70 # print(links)
71 # return subnets
72
73
74 if __name__ == "__main__":
75 base_config() | 7 - warning: redefined-builtin
12 - warning: wildcard-import
15 - warning: f-string-without-interpolation
61 - warning: broad-exception-caught
29 - warning: unused-variable
61 - warning: unused-variable
1 - warning: unused-import
2 - warning: unused-import
3 - warning: unused-import
4 - warning: unused-import
5 - warning: unused-import
6 - warning: unused-import
11 - warning: unused-import
12 - warning: unused-wildcard-import
|
1 #!/bin/env python
2 # the graphical interface of resql compiler client
3 import wx
4 import wx.grid
5 from subprocess import *
6 import sys
7
8 # call the client to connect to the server for rewriting the sql statement
9 def call_clnt(sql):
10 try:
11 # under windows
12 # p1 = Popen(['echo',sql],stdout=PIPE,shell=True)
13 # p2 = Popen(['python','clnt.py'],stdin=p1.stdout,stdout=PIPE,shell=True)
14 p1 = Popen(['echo',sql],stdout=PIPE)
15 p2 = Popen(['python','clnt.py'],stdin=p1.stdout,stdout=PIPE)
16 fout,ferr = p2.communicate()
17 return fout
18 except OSError, e:
19 pass
20
21 # define my application form
22 class MyApp(wx.App):
23 def OnInit(self):
24 frame = MyFrame("ReSQL Client v0.1",(50,60),(460,420))
25 frame.Show()
26 self.SetTopWindow(frame)
27 return True
28
29 # define the output table of the query result
30 class MyTable(wx.grid.PyGridTableBase):
31 def __init__(self):
32 wx.grid.PyGridTableBase.__init__(self)
33 self.data = {}
34 self.odd = wx.grid.GridCellAttr()
35 self.odd.SetBackgroundColour("white")
36 self.even = wx.grid.GridCellAttr()
37 self.even.SetBackgroundColour("wheat")
38 self.rows = self.cols = 5
39
40 def setData(self,data):
41 self.data = data
42
43 def GetNumberRows(self):
44 return self.rows
45
46 def GetNumberCols(self):
47 return self.cols
48
49 def SetNumberRows(self,rows):
50 self.rows = rows
51
52 def SetNumberCols(self,cols):
53 self.cols = cols
54
55 def IsEmptyCell(self,row,col):
56 return self.data.get((row,col)) is None
57
58 def GetValue(self,row,col):
59 value = self.data.get((row,col))
60 if value is not None:
61 return value
62 else:
63 return ''
64
65 def SetValue(self,row,col,value):
66 self.data[(row,col)] = value
67
68 def GetAttr(self,row,col,kind):
69 attr = [self.even,self.odd][row % 2]
70 attr.IncRef()
71 return attr
72
73 # define the Frame with menus, buttons, output table in it.
74 class MyFrame(wx.Frame):
75 def __init__(self,title,post,sizet):
76 wx.Frame.__init__(self,None,-1,title, pos=post,size=sizet)
77 menuFile = wx.Menu()
78 menuFile.Append(1,"&About")
79 menuFile.AppendSeparator()
80 menuFile.Append(2,"E&xit")
81 menuOutput = wx.Menu()
82 menuOutput.Append(3,"Output rewiting")
83 menuOutput.Append(4,"Output result")
84 menuBar = wx.MenuBar()
85 menuBar.Append(menuFile,"&File")
86 menuBar.Append(menuOutput,"&Output")
87
88 panel = wx.Panel(self,-1)
89 self.schemaBtn = wx.Button(panel,-1,"S",pos=(20,0),size=(40,30))
90 self.rewriteBtn = wx.Button(panel,-1,"R",pos=(70,0),size=(40,30))
91 self.rewriteBtn.Disable()
92 self.execBtn = wx.Button(panel,-1,"Exec",pos=(120,0),size=(40,30))
93 self.execBtn.Disable()
94 self.Bind(wx.EVT_BUTTON,self.OnSchemaClick,self.schemaBtn)
95 self.Bind(wx.EVT_BUTTON,self.OnRewrite,self.rewriteBtn)
96 self.Bind(wx.EVT_BUTTON,self.OnExecQuery,self.execBtn)
97
98 self.text = wx.TextCtrl(panel,-1,"",size=(440,100),pos=(5,40),style=wx.TE_MULTILINE)
99
100 from wx.grid import Grid
101 self.grid = Grid(panel,pos=(5,140),size=(440,200))
102 self.table = MyTable()
103 self.grid.SetTable(self.table)
104
105 self.SetMenuBar(menuBar)
106 self.CreateStatusBar()
107 self.SetStatusText("Welcome to ReSQL Client")
108 self.Bind(wx.EVT_MENU,self.OnAbout,id=1)
109 self.Bind(wx.EVT_MENU,self.OnQuit,id=2)
110 self.Bind(wx.EVT_MENU,self.OnOutputRewriting,id=3)
111 self.Bind(wx.EVT_MENU,self.OnOutputResult,id=4)
112
113 def OnQuit(self,event):
114 self.Close()
115
116 def OnAbout(self,event):
117 msg = "The Rewriting SQL client v0.1 !\n\nContact:wanghit2006@gmail.com"
118 wx.MessageBox(msg,"About ReSQL client", wx.OK | wx.ICON_INFORMATION, self)
119
120 def OnSchemaClick(self,event):
121 schemaStr = 'schema: ' + self.text.GetValue().decode('utf-8').encode('ascii')
122 back = call_clnt(schemaStr.replace('\n',' '))
123 wx.MessageBox(back,"Schema",wx.OK | wx.ICON_INFORMATION,self)
124 self.rewriteBtn.Enable()
125
126 def OnRewrite(self,event):
127 sql = "sql: " + self.text.GetValue().decode('utf-8').encode('ascii')
128 self.resql = call_clnt(sql.replace('\n',' '))
129 wx.MessageBox(self.resql,"Rewrite", wx.OK | wx.ICON_INFORMATION,self)
130 self.execBtn.Enable()
131
132 def OnExecQuery(self,event):
133 #rows,cols,data = self.execQuery(self.resql)
134 #self.table.SetNumberRows(rows)
135 #self.table.SetNumberCols(cols)
136 #self.table.setData(data)
137 #self.grid.SetTable(self.table)
138 wx.MessageBox("Exec " + self.resql,"Rewrite", wx.OK | wx.ICON_INFORMATION,self)
139
140 def OnOutputRewriting(self,event):
141 msg = "output writing query"
142 wx.MessageBox(msg,"Rewriting query",wx.OK | wx.ICON_INFORMATION,self)
143
144 def OnOutputResult(self,event):
145 rows,cols = self.table.GetNumberRows(),self.table.GetNumberCols()
146 wd = []
147 for i in range(rows):
148 tmp = []
149 for j in range(cols):
150 tmp.append(self.table.data[(i,j)])
151 wd.append(tmp)
152
153 import csv
154 writer = csv.writer(open('sample.csv','wb'))
155 writer.writerows(wd)
156
157 msg = "output query result"
158 wx.MessageBox(msg,"Query result",wx.OK | wx.ICON_INFORMATION,self)
159
160 def execQuery(self,sql):
161 import pymssql
162 cxn = pymssql.connect(password='61813744',host=r'.\SQLEXPRESS',database='hello')
163 cur = cxn.cursor()
164 cur.execute(sql)
165
166 data = {}
167 i = coln = 0
168 for eachRow in cur.fetchall():
169 coln = len(eachRow)
170 for x in range(coln):
171 data[(i,x)] = eachRow[x]
172 i += 1
173
174 cxn.commit()
175 cxn.close()
176 return cur.rownumber,coln,data
177
178 if __name__ == '__main__':
179 app = MyApp(False)
180 app.MainLoop()
| 18 - error: syntax-error
|
1 # the rewrite sql compiler server end
2 import socket
3 from subprocess import *
4
5 HOST = ''
6 PORT = 9999
7 BUFSIZE=1024
8 ADDR = (HOST,PORT)
9
10 # call the resql compiler
11 # pass in the sql statement
12 # return the rewriting sql result
13 def rewrite_sql(sql):
14 print 'In reswrite_sql: ',sql
15 p1 = Popen(["echo",sql],stdout=PIPE)
16 p2 = Popen(["./resql"],stdin=p1.stdout,stdout=PIPE)
17 fout = p2.communicate()[0]
18 if p2.returncode == 0:
19 return fout
20 else:
21 return "can't rewrite"
22
23 # parse the create table statement
24 # and generation the schema file
25 # for the convince processing of the compiler writen in C.
26 def parse_query(query):
27 table = []
28 keys = []
29 attrs = []
30
31 # parse table name
32 s = query.find('c')
33 e = query.find('(')
34 st = query[s:e]
35 stab = st.split()
36 table.append(stab[-1])
37
38 s = e+1
39 e = query.rfind(')');
40 sat = query[s:e]
41 sats = sat.split(',')
42 for attab in sats:
43 if attab.find("foreign") != -1:
44 pass
45 elif attab.find('primary') == -1:
46 attrs.append((attab.split())[0])
47 else:
48 s = attab.find('(')
49 e = attab.rfind(')')
50 keys = attab[s+1:e].split(',')
51
52 for i in range(len(keys)):
53 keys[i] = keys[i].strip()
54
55 for key in keys:
56 attrs.remove(key)
57
58 table.append(keys)
59 table.append(attrs)
60
61 return table
62
63 # read in the schema definations and
64 # pass each defination to the parse_query function
65 # get the parsed schema and write it into the schema.txt file
66 def extract_schema(schemadef):
67 querys = schemadef.split(';')
68 tables = []
69 for query in querys:
70 query = query.strip()
71 if query != '':
72 tables.append(parse_query(query))
73
74 fout = open('schema.txt','w')
75 fout.write(str(len(tables)) + '\n')
76 for table in tables:
77 fout.write(table[0]+'\n')
78 fout.write(str(len(table[1]))+' ')
79 fout.write(str(len(table[2]))+'\n')
80 for key in table[1]:
81 fout.write(key+'\n')
82 for attr in table[2]:
83 fout.write(attr+'\n')
84 fout.write('\n')
85
86 # deal with the user requestion
87 def deal_with(conn,data):
88 if not data.rstrip().endswith(';'):
89 data = data + ';'
90
91 if data.startswith("\""):
92 data = data[1:]
93
94 if data.endswith("\";"):
95 data = data[:-2]
96
97 if data.startswith('schema:'):
98 print 'create schema with'
99 create_sql = data.split(':')[1]
100 print create_sql
101 extract_schema(create_sql)
102 conn.send('SCHEMA')
103 elif data.startswith("sql:"):
104 ts = data.split(':')[1]
105 print 'try rewriting'
106 print ts
107 resql = rewrite_sql(ts)
108 conn.send(resql)
109 else:
110 print 'wrong format'
111 print data
112 conn.send('WRONG')
113
114 if __name__ == '__main__':
115 sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
116 sock.bind(ADDR)
117 sock.listen(5)
118 while True:
119 print 'waiting... for connection...'
120 connection,address = sock.accept()
121 print '...connect from',address
122
123 buf = connection.recv(1024)
124 deal_with(connection,buf)
125 connection.close()
126
127 sock.close()
128
| 14 - error: syntax-error
|
1 import logging
2 import os
3
4 import pandas as pd
5 import wget as wget
6 from tqdm import tqdm
7
8 from sean_logger import setup_logging
9 from toolbox import make_directory
10
11
12 def scrape_election_results(prov_id=35, base_url=None, results_format=1):
13 setup_logging()
14 if results_format == 1:
15 results_format = "pollbypoll_bureauparbureau"
16 elif results_format == 2:
17 results_format = "pollresults_resultatsbureau"
18 if base_url is None:
19 base_url = "https://www.elections.ca/res/rep/off/ovr2015app/41/data_donnees/"
20 num_except_in_a_row = 0
21 exceptions = []
22 for fed_num in tqdm(range(prov_id * 1000, ((prov_id + 1) * 1000) - 1)):
23 logging.info(f"fed num {fed_num}")
24 try:
25 url = f"{base_url}{results_format}{fed_num}.csv"
26 outfile = f"./data_donnees/{results_format}{fed_num}.csv"
27 logging.debug(url)
28 logging.debug(outfile)
29 make_directory(outfile)
30 wget.download(url, outfile)
31 num_except_in_a_row = 0
32 except:
33 logging.exception(f"Exception!! {fed_num}")
34 exceptions.append(fed_num)
35 num_except_in_a_row += 1
36 if num_except_in_a_row > 10:
37 logging.info(f"Probably finished at {fed_num - num_except_in_a_row}")
38 break
39 logging.info(f"Missed FED Nums:")
40 for fed in exceptions:
41 logging.info(fed)
42 logging.info()
43 print('fin')
44
45
46 def combine_result_csvs(folder=None, cols=None):
47 if folder is None:
48 folder = "./data_donnees/"
49 files = os.listdir(folder)
50 if cols is None:
51 cols = "Electoral District Number/Numéro de circonscription," \
52 "Electoral District Name/Nom de circonscription," \
53 "Polling Station Number/Numéro du bureau de scrutin," \
54 "Polling Station Name/Nom du bureau de scrutin," \
55 "Rejected Ballots/Bulletins rejetés," \
56 "Total Votes/Total des votes," \
57 "Electors/Électeurs".split(',')
58 print("Reading...")
59 frames = [pd.read_csv(folder + file, usecols=cols) for file in tqdm(files)]
60 print("Combining...")
61 data = pd.concat(frames)
62 print("Writing...")
63 data.to_csv("turnout_data_ontario_42nd_federal.csv", index=False)
64 print("Fin.")
65
66
67 if __name__ == '__main__':
68 scrape_election_results()
69 combine_result_csvs()
| 23 - warning: logging-fstring-interpolation
32 - warning: bare-except
33 - warning: logging-fstring-interpolation
37 - warning: logging-fstring-interpolation
39 - warning: logging-fstring-interpolation
39 - warning: f-string-without-interpolation
42 - error: no-value-for-parameter
|
1 '''
2 Author: Robert Cabral
3 File Name: Post_Module.py
4 Purpose: To create an Article Post into the database that has the Article Title and Article URL properties
5 associated with the Article Post.
6 Date: 2/16/2013
7 '''
8 import datastore
9 import webapp2
10 import cgi
11 from google.appengine.api import users
12
13 form = """
14 <html>
15 <body>
16 <form method="post">
17 <div><h1>Post Page</h1></div>
18 <div>Title:</div>
19 <div><textarea name="link_title" rows="2" cols="60"></textarea></div>
20 <div>Location/URL:<br></div>
21 <div><textarea name="link_url" rows="2" cols="60"></textarea></div>
22 <div><input type="submit" value="Post"></div>
23 </form>
24 </body>
25 </html>
26 """
27
28 def escape_html(s):
29 return cgi.escape(s, quote = True)
30
31 class PostPage(webapp2.RequestHandler):
32 def write_form(self, error="", title="", url=""):
33 self.response.out.write(form %{"error": error,
34 "link_title": escape_html(title),
35 "link_url": escape_html(url)})
36 def get(self):
37 #We should check to see if the user is logged in here instead of after our POST.
38 if users.get_current_user():
39 self.write_form()
40 else:
41 self.redirect(users.create_login_url(self.request.uri))
42
43 def post(self):
44 user = users.get_current_user()
45 user_link_url = self.request.get('link_url')
46 user_link_title = self.request.get('link_title')
47 user_name = user.nickname()
48 datastore.Post_Article(user_link_url,user_link_title,user_name)
49 self.redirect("/") | 29 - warning: bad-indentation
32 - warning: bad-indentation
33 - warning: bad-indentation
36 - warning: bad-indentation
38 - warning: bad-indentation
39 - warning: bad-indentation
40 - warning: bad-indentation
41 - warning: bad-indentation
43 - warning: bad-indentation
44 - warning: bad-indentation
45 - warning: bad-indentation
46 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
49 - warning: bad-indentation
10 - warning: deprecated-module
11 - error: no-name-in-module
29 - error: no-member
|
1 # This file contains hardcoded strings and values
2
3 main_page="article_list.html"
4 main_title="Oratorical Decaf"
| Clean Code: No Issues Detected
|
1 import datetime
2 from google.appengine.ext import db
3 from google.appengine.api import users
4
5 '''
6 DATASTORE CLASSES
7 '''
8 class Articles(db.Model):
9 link = db.LinkProperty()
10 text = db.StringProperty()
11 votes = db.IntegerProperty()
12 posted = db.DateTimeProperty()
13 owner = db.StringProperty()
14
15 class Votes(db.Model):
16 article_id = db.IntegerProperty()
17 users = db.ListProperty(db.Email)
18
19
20 class Comments(db.Model):
21 article_id = db.IntegerProperty()
22 comment_owner = db.EmailProperty()
23 comment_text = db.StringProperty()
24 posted = db.DateTimeProperty()
25
26 '''
27 DATASTORE FUNCTIONS
28 '''
29 '''
30 Function: Post Article
31 Properties:
32 input:
33 link = URL link passed from script
34 text = Article title text passed from script
35 output:
36 None
37 required:
38 None
39 '''
40 def Post_Article(link,text,owner):
41 article_info = Articles()
42
43 #set the article data
44 article_info.link = link
45 article_info.text = text
46 article_info.votes = 0
47 article_info.posted = datetime.datetime.now()
48 article_info.owner = owner
49 #store it!
50 article_info.put()
51
52 '''
53 Function: Get Article List
54 Properties:
55 input:
56 None
57 output:
58 Articles -> list
59 [0] = database index id
60 [1] = article link (URL)
61 [2] = article text
62 [3] = article vote amount
63 required:
64 None
65 '''
66 def Get_Articles():
67 articles = []
68 result = []
69 for i in Articles.all().order('-posted'):
70 result = [i.key().id(),i.link,i.text,i.votes]
71 articles.append(result)
72 return(articles)
73
74 '''
75 Function: Post Comment
76 Properties:
77 input:
78 article_id = entity id of article from script
79 commentor = comment author (username)
80 comment_text = comment text body passed from script
81 output:
82 None
83 required:
84 None
85 '''
86 def Post_Comment(article_id,commentor,comment_text):
87 #note that article_id is actually an entity id which can be pulled when we load the comments
88 new_comment = Comments(Articles().get_by_id(ids = article_id).key())
89
90 #setup the comment data
91 new_comment.article_id = article_id
92 new_comment.comment_owner = commentor
93 new_comment.comment_text = comment_text
94 new_comment.posted = datetime.datetime.now()
95 new_comment.put()
96
97 '''
98 Function: Article Vote
99 Properties:
100 input:
101
102 output:
103
104 required:
105
106 '''
107 def Vote_Article(username,article_id,vote):
108 '''
109 note, vote can only be -1 or 1, 0 IS NOT acceptable
110 also note this is a two prong function, we must make sure the user has not voted prior; if they have not voted than
111 we must add the vote to the Articles() table and then also add an entry to the Votes() table.
112 '''
113 new_vote = Votes().all().filter("article_id =",int(article_id))
114 #we should always have an article that matches its ID, if not than we are in serious trouble!
115 article_add_vote = Articles().get_by_id(ids = int(article_id))
116 email_address = db.Email(username)
117
118 #make sure the votes for this article exist, if not create a new entry for them.
119 if new_vote.get() is None:
120 #WARNING: we are redefining new_vote!
121 new_vote = Votes(Articles().get_by_id(ids = int(article_id)).key())
122 new_vote.article_id = int(article_id)
123 new_vote.users = [email_address]
124
125 article_add_vote.votes = int(vote)
126 #add the vote to the article first
127 article_add_vote.put()
128 #now add the votes entity
129 new_vote.put()
130 return
131 else:
132 #check to see if we have already voted for this article!
133 already_voted = Votes.all().filter("article_id =",article_id).filter("users in",[email_address]).get()
134 if already_voted is None:
135 return 1
136
137 new_vote = Votes().all().filter("article_id =",int(article_id)).get()
138 new_vote = Votes(Articles().get_by_id(ids = int(article_id)).key()).get_by_id(ids = new_vote.key().id())
139 new_vote.users.append(email_address)
140
141 article_add_vote.votes = int(article_add_vote.votes) + int(vote)
142
143 new_vote.put()
144 article_add_vote.put() | 9 - warning: bad-indentation
10 - warning: bad-indentation
11 - warning: bad-indentation
12 - warning: bad-indentation
13 - warning: bad-indentation
16 - warning: bad-indentation
17 - warning: bad-indentation
21 - warning: bad-indentation
22 - warning: bad-indentation
23 - warning: bad-indentation
24 - warning: bad-indentation
41 - warning: bad-indentation
44 - warning: bad-indentation
45 - warning: bad-indentation
46 - warning: bad-indentation
47 - warning: bad-indentation
48 - warning: bad-indentation
50 - warning: bad-indentation
67 - warning: bad-indentation
68 - warning: bad-indentation
69 - warning: bad-indentation
70 - warning: bad-indentation
71 - warning: bad-indentation
72 - warning: bad-indentation
88 - warning: bad-indentation
91 - warning: bad-indentation
92 - warning: bad-indentation
93 - warning: bad-indentation
94 - warning: bad-indentation
95 - warning: bad-indentation
108 - warning: bad-indentation
113 - warning: bad-indentation
115 - warning: bad-indentation
116 - warning: bad-indentation
119 - warning: bad-indentation
121 - warning: bad-indentation
122 - warning: bad-indentation
123 - warning: bad-indentation
125 - warning: bad-indentation
127 - warning: bad-indentation
129 - warning: bad-indentation
130 - warning: bad-indentation
131 - warning: bad-indentation
133 - warning: bad-indentation
134 - warning: bad-indentation
135 - warning: bad-indentation
137 - warning: bad-indentation
138 - warning: bad-indentation
139 - warning: bad-indentation
141 - warning: bad-indentation
143 - warning: bad-indentation
144 - warning: bad-indentation
2 - error: no-name-in-module
3 - error: no-name-in-module
5 - warning: pointless-string-statement
8 - refactor: too-few-public-methods
15 - refactor: too-few-public-methods
20 - refactor: too-few-public-methods
26 - warning: pointless-string-statement
29 - warning: pointless-string-statement
52 - warning: pointless-string-statement
74 - warning: pointless-string-statement
97 - warning: pointless-string-statement
119 - refactor: no-else-return
107 - refactor: inconsistent-return-statements
3 - warning: unused-import
|
1 import webapp2
2 import os
3 import datastore
4 import config
5 import vote
6 import articles
7 import comment
8 import jinja2
9 from google.appengine.ext import db
10 from google.appengine.api import users
11
12 # jinja2 file loading copied from
13 # https://github.com/fRuiApps/cpfthw/blob/master/webapp2/views.py
14 TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), 'templates')
15 j_env = jinja2.Environment(loader=jinja2.FileSystemLoader(TEMPLATE_DIR))
16
17
18 class MainHandler(webapp2.RequestHandler):
19 def get(self):
20 template = j_env.get_template(config.main_page)
21 self.response.write('''
22 <a href="/article">Post new article</a>
23 ''')
24 articles = datastore.Get_Articles()
25 self.response.write(template.render(title=config.main_title,data = articles))
26
27 app = webapp2.WSGIApplication([
28 ('/', MainHandler),
29 ('/vote/(.*)', vote.RequestHandler),
30 ('/article', articles.PostPage),
31 ('/comment/(.*)', comment.RequestHandler)
32 ],
33 debug=True)
| 21 - error: syntax-error
|
1 import cgi
2 import datetime
3 import urllib
4 import webapp2
5 import datastore
6
7 from google.appengine.ext import db
8 from google.appengine.api import users
9
10 class RequestHandler(webapp2.RequestHandler):
11 def get(self, article_id):
12 self.response.out.write('<html><body>')
13
14 #article_key = self.request.get('article_key')
15
16 my_article = datastore.Articles().get_by_id(ids = int(article_id))
17 article_name = my_article.text
18
19 #user login check
20 user = users.get_current_user()
21 if not user:
22 self.redirect(users.create_login_url(self.request.uri))
23
24 #article name
25 self.response.out.write('Article Name: <b>%s</b>' % article_name)
26 self.response.out.write('<br><a href="/">Back</a>')
27
28 #comment query
29 comment_list = datastore.Comments().all().filter("article_id =",int(article_id))
30
31 #comment submission form
32 self.response.out.write("""
33 <form method="post">
34 <div><textarea name="comment_text" rows="3" cols="60"></textarea></div>
35 <div><input type="submit" value="Post"></div>
36 </form>""")
37
38
39 for comments in comment_list:
40 #sub-note - comments will always have an author
41 self.response.out.write('<b>%s</b> wrote:' % comments.comment_owner)
42 self.response.out.write('<blockquote>%s</blockquote>' % cgi.escape(comments.comment_text))
43
44 self.response.out.write("""</body></html>""" )
45
46 def post(self, article_id):
47
48 comment_text = self.request.get('comment_text')
49 datastore.Post_Comment(int(article_id),users.get_current_user().email(),cgi.escape(comment_text))
50
51 self.redirect('/comment/%s'% (article_id)) | 16 - error: syntax-error
|
1 #!/usr/bin/env python
2
3 from __future__ import print_function
4
5 import argparse
6 import socket
7 import sys
8
9 parser = argparse.ArgumentParser()
10 parser.add_argument('--socket', help="Socket to connect to", type=str,
11 default="/var/run/pdns.controlsocket")
12 parser.add_argument('--timeout', help="Socket timeout", type=int, default=5)
13 args = parser.parse_args()
14
15 s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
16 s.settimeout(args.timeout)
17
18 try:
19 s.connect(args.socket)
20 s.send('PING\n')
21 data = s.recv(1024)
22 except socket.timeout:
23 print ("CRITICAL: Socket timeout, please investigate!")
24 sys.exit(2)
25 except socket.error, e:
26 print ("CRITICAL: %s" % e)
27 sys.exit(2)
28
29 s.close()
30
31 if data != 'PONG':
32 print('CRITICAL: Ping error, didn\' receive PONG!')
33 sys.exit(2)
34 else:
35 print("OK: Socket is responding with PONG")
36 sys.exit(0) | 25 - error: syntax-error
|
1 import json
2 import sys
3 import random
4 import os
5 import numpy as np
6 from collections import deque
7 from keras.models import Sequential
8 from keras.layers import *
9 from keras.optimizers import *
10
11
12 class KerasAgent:
13
14 def __init__(self, shape, action_size):
15 self.weight_backup = "bombergirl_weight.model"
16 self.shape = shape
17 self.action_size = action_size
18 self.memory = deque(maxlen=2000)
19 self.learning_rate = 0.001
20 self.gamma = 0.95
21 self.exploration_rate = 1.0
22 self.exploration_min = 0.01
23 self.exploration_decay = 0.995
24 self.model = self._build_model()
25
26 def _build_model(self):
27 model = Sequential()
28
29 # Convolutions.
30 model.add(Conv2D(
31 16,
32 kernel_size=(3, 3),
33 strides=(1, 1),
34 #data_format='channels_first',
35 input_shape=self.shape
36 ))
37 model.add(Activation('relu'))
38 model.add(Conv2D(
39 32,
40 kernel_size=(3, 3),
41 strides=(1, 1),
42 data_format='channels_first'
43 ))
44 model.add(Activation('relu'))
45
46 # Dense layers.²
47 model.add(Flatten())
48 model.add(Dense(256))
49 model.add(Activation('relu'))
50 model.add(Dense(self.action_size))
51
52 model.summary()
53 model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
54 #model.compile(RMSprop(), 'MSE')
55
56 if os.path.isfile(self.weight_backup):
57 model.load_weights(self.weight_backup)
58 self.exploration_rate = self.exploration_min
59
60 return model
61
62 def save_model(self, name):
63 self.model.save(self.weight_backup)
64 self.model.save(name)
65
66 def act(self, state):
67 if np.random.rand() <= self.exploration_rate:
68 return random.randrange(self.action_size)
69 act_values = self.model.predict(state)
70 return np.argmax(act_values[0])
71
72 def remember(self, state, action, reward, next_state, done):
73 self.memory.append((state, action, reward, next_state, done))
74
75 def replay(self, sample_batch_size=256):
76 if len(self.memory) < sample_batch_size:
77 sample_batch_size=len(self.memory)
78 sample_batch = random.sample(self.memory, sample_batch_size)
79 for state, action, reward, next_state, done in sample_batch:
80 target = reward
81 if not done:
82 target = (reward + self.gamma *
83 np.amax(self.model.predict(next_state)[0]))
84 target_f = self.model.predict(state)
85 target_f[0][action] = target
86 self.model.fit(state, target_f, epochs=1, verbose=0)
87 if self.exploration_rate > self.exploration_min:
88 self.exploration_rate *= self.exploration_decay
| 8 - warning: wildcard-import
9 - warning: wildcard-import
12 - refactor: too-many-instance-attributes
30 - error: undefined-variable
37 - error: undefined-variable
38 - error: undefined-variable
44 - error: undefined-variable
47 - error: undefined-variable
48 - error: undefined-variable
49 - error: undefined-variable
50 - error: undefined-variable
53 - error: undefined-variable
72 - refactor: too-many-arguments
72 - refactor: too-many-positional-arguments
76 - refactor: consider-using-min-builtin
1 - warning: unused-import
2 - warning: unused-import
|
1 #from .memreader import MemoryReader
2 import time
3
4 class Game:
5 enemies = [] #{x,y}
6 bombs = [] #{x,y}
7 bonus = []
8 girl = {"x": 0, "y": 0}
9 start_time = 0
10 time = 0
11 game_inputs = {
12 0: "MoveUp",
13 1: "MoveDown",
14 2: "MoveLeft",
15 3: "MoveRight",
16 4: "LeaveBomb",
17 5: "None"
18 }
19 girl_alive = True
20 done = False
21 lose = False
22 victory = False
23
24 ##const
25 TIME_NORM = 10
26 MOVEMENT_RW = 5
27 BONUS_RW = 10
28 ALIVE_RW = 20
29 ENEMIES_NORM = 5
30 REWARD_BOMB = 25
31 REWARD_VICTORY = 100
32 REWARD_LOSE = 50
33
34 MAX_DISTANCE = 8
35
36 def restartState(self):
37 self.girl_alive = True
38 self.done = False
39 self.lose = False
40 self.victory = False
41 self.time = 0
42 self.start_time = time.time()
43
44 def getCurrentTimeNormalized(self):
45 return self.time / self.TIME_NORM
46
47 def getDistanceNormalized(self, elem1, elem2):
48 return abs(elem1['x'] - elem2['x']) + abs(elem1['y'] - elem2['y'])
49
50 def updateTime(self):
51 self.time = time.time() - self.start_time
52
53 def getReward(self, action):
54 reward = 0
55 # Para castigar por el numero de enemigos
56 reward -= self.ENEMIES_NORM*len(self.enemies)
57 # Para casticar con el paso del tiempo
58 reward -= self.getCurrentTimeNormalized()
59
60 # Para castigar/ premiar si la chica está cerca/lejos a una bomba
61 for bomb in self.bombs:
62 distance = self.getDistanceNormalized(bomb, self.girl)
63 if distance < self.MAX_DISTANCE:
64 reward -= distance
65 else
66 reward += distance
67
68 if(action == 4):
69 # Para premiar que esté colocando una bomba
70 reward += self.REWARD_BOMB
71 for enemy in self.enemies:
72 # Para premiar que la bomba está más cerca a un enemigo
73 distance = self.getDistanceNormalized(enemy, self.girl)
74 if distance< self.MAX_DISTANCE:
75 reward += self.REWARD_BOMB/distance
76
77 if(action < 4):
78 # Para premiar que se mueve
79 reward += self.MOVEMENT_RW
80 # Para premiar que esté más cerca a un bonus
81 for bonus in self.bonus:
82 reward += self.BONUS_RW / self.getDistanceNormalized(bonus, self.girl)
83
84 # Para premiar que está jugando
85 if(self.girl_alive):
86 reward += self.ALIVE_RW
87
88 # Para castigar que ha perdido
89 if self.lose:
90 reward -= self.REWARD_LOSE
91
92 # Para premiar que ha ganado
93 if self.victory:
94 reward += self.REWARD_VICTORY
95
96 return reward
| 65 - error: syntax-error
|
1 from serpent.game import Game
2
3 from .api.api import BombermanAPI
4
5 from serpent.utilities import Singleton
6
7 from serpent.game_launchers.web_browser_game_launcher import WebBrowser
8
9
10 class SerpentBombermanGame(Game, metaclass=Singleton):
11
12 def __init__(self, **kwargs):
13 kwargs["platform"] = "web_browser"
14
15 kwargs["window_name"] = "Safari"
16
17 kwargs["url"] = "http://0.0.0.0:8000"
18 kwargs["browser"] = WebBrowser.DEFAULT
19
20 super().__init__(**kwargs)
21
22 self.api_class = BombermanAPI
23 self.api_instance = None
24
25 @property
26 def screen_regions(self):
27
28 #t
29 dic_offset = {
30 "WINDOWS_CHROME": {
31 # "top": 81,
32 # "left": 5
33 "top": 0,
34 "left": 0
35 }
36 }
37
38 offset = dic_offset["WINDOWS_CHROME"]
39
40 regions = {
41 "GAME_REGION": (offset["top"], offset["left"], 416 + offset["top"], 544 + offset["left"]), #544x416
42 "GAME_OVER_REGION": (118 + offset["top"], 163 + offset["left"], 151 + offset["top"], 383 + offset["left"]), #220x33 - 163,118
43 "WIN_REGION": (118 + offset["top"], 171 + offset["left"], 149 + offset["top"], 372 + offset["left"]), # 201x31 - 171,118
44 }
45
46 return regions
47
48 @property
49 def ocr_presets(self):
50 presets = {
51 "SAMPLE_PRESET": {
52 "extract": {
53 "gradient_size": 1,
54 "closing_size": 1
55 },
56 "perform": {
57 "scale": 10,
58 "order": 1,
59 "horizontal_closing": 1,
60 "vertical_closing": 1
61 }
62 }
63 }
64
65 return presets
66
67 # from serpent.game import Game
68 #
69 # from .api.api import BombermanAPI
70 #
71 # from serpent.utilities import Singleton
72 #
73 # from serpent.game_launchers.web_browser_game_launcher import WebBrowser
74 #
75 #
76 # class SerpentBombermanGame(Game, metaclass=Singleton):
77 #
78 # def __init__(self, **kwargs):
79 # kwargs["platform"] = "web_browser"
80 #
81 # kwargs["window_name"] = "Safari"
82 #
83 # kwargs["url"] = "http://0.0.0.0:8000"
84 # kwargs["browser"] = WebBrowser.DEFAULT
85 #
86 # super().__init__(**kwargs)
87 #
88 # self.api_class = BombermanAPI
89 # self.api_instance = None
90 #
91 # @property
92 # def screen_regions(self):
93 # regions = {
94 # "GAME_REGION": (0, 0, 480, 549), ##545x416
95 # "GAME_OVER_REGION": (160,160, 225, 404),
96 # "WIN_REGION": (175,130, 220, 421),
97 # }
98 #
99 # return regions
100 #
101 # @property
102 # def ocr_presets(self):
103 # presets = {
104 # "SAMPLE_PRESET": {
105 # "extract": {
106 # "gradient_size": 1,
107 # "closing_size": 1
108 # },
109 # "perform": {
110 # "scale": 10,
111 # "order": 1,
112 # "horizontal_closing": 1,
113 # "vertical_closing": 1
114 # }
115 # }
116 # }
117 #
118 # return presets
| 12 - warning: bad-indentation
13 - warning: bad-indentation
15 - warning: bad-indentation
17 - warning: bad-indentation
18 - warning: bad-indentation
20 - warning: bad-indentation
22 - warning: bad-indentation
23 - warning: bad-indentation
25 - warning: bad-indentation
26 - warning: bad-indentation
29 - warning: bad-indentation
38 - warning: bad-indentation
40 - warning: bad-indentation
46 - warning: bad-indentation
48 - warning: bad-indentation
49 - warning: bad-indentation
50 - warning: bad-indentation
65 - warning: bad-indentation
3 - error: relative-beyond-top-level
|
1 from tensorforce.agents import PPOAgent
2
3 from serpent.utilities import SerpentError
4
5 import numpy as np
6 import os
7
8 # This file is borrowed from SerpentAIsaacGameAgentPlugin:
9 # https://github.com/SerpentAI/SerpentAIsaacGameAgentPlugin/blob/master/files/helpers/ppo.py
10 class SerpentPPO:
11
12 def __init__(self, frame_shape=None, game_inputs=None):
13
14 if frame_shape is None:
15 raise SerpentError("A 'frame_shape' tuple kwarg is required...")
16
17 states_spec = {"type": "float", "shape": frame_shape}
18
19 if game_inputs is None:
20 raise SerpentError("A 'game_inputs' dict kwarg is required...")
21
22 self.game_inputs = game_inputs
23 self.game_inputs_mapping = self._generate_game_inputs_mapping()
24
25 actions_spec = {"type": "int", "num_actions": len(self.game_inputs)}
26
27 network_spec = [
28 {"type": "conv2d", "size": 1, "window": 2, "stride": 1},
29 {"type": "flatten"},
30 # {"type": "dense", "size": 64},
31 {"type": "dense", "size": 6}
32 ]
33
34 self.agent = PPOAgent(
35 states=states_spec,
36 actions=actions_spec,
37 network=network_spec,
38
39 batched_observe=256,
40 batching_capacity=1000,
41 # BatchAgent
42 #keep_last_timestep=True,
43 # PPOAgent
44 step_optimizer=dict(
45 type='adam',
46 learning_rate=1e-4
47 ),
48 optimization_steps=10,
49 # Model
50 scope='ppo'
51 #discount=0.97,
52 # DistributionModel
53 #distributions=None,
54 #entropy_regularization=0.01,
55 # PGModel
56 #baseline_mode=None,
57 #baseline=None,
58 #baseline_optimizer=None,
59 #gae_lambda=None,
60 # PGLRModel
61 #likelihood_ratio_clipping=None,
62 #summary_spec=summary_spec,
63 #distributed_spec=None,
64 # More info
65 #device=None,
66 #session_config=None,
67 #saver=None,
68 #variable_noise=None,
69 #states_preprocessing_spec=None,
70 #explorations_spec=None,
71 #reward_preprocessing_spec=None,
72 #execution=None,
73 #actions_exploration=None,
74 #update_mode=None,
75 #memory=None,
76 #subsampling_fraction=0.1
77 )
78
79 def generate_action(self, game_frame_buffer):
80 states = np.stack(
81 game_frame_buffer,
82 axis=2
83 )
84
85 # Get prediction from agent, execute
86 action = self.agent.act(states)
87 label = self.game_inputs_mapping[action]
88
89 return action, label, self.game_inputs[label]
90
91 def observe(self, reward=0, terminal=False):
92 self.agent.observe(reward=reward, terminal=terminal)
93
94 def _generate_game_inputs_mapping(self):
95 mapping = dict()
96
97 for index, key in enumerate(self.game_inputs):
98 mapping[index] = key
99
100 return mapping
101
102 def save_model(self):
103 self.agent.save_model(directory=os.path.join(os.getcwd(), "datasets", "bomberman", "ppo_model"), append_timestep=False)
104
105 def restore_model(self):
106 self.agent.restore_model(directory=os.path.join(os.getcwd(), "datasets", "bomberman"))
| 44 - refactor: use-dict-literal
95 - refactor: use-dict-literal
|
1 import smtplib, re, socket, optparse, sys
2 import os.path
3 import pickle
4 from email.mime.text import MIMEText
5 import utilitaires
6
7 parser = optparse.OptionParser()
8 parser.add_option("-a", "--address", action="store", dest="address", default="localhost")
9 parser.add_option("-p", "--port", action="store", dest="port", type=int, default=1337)
10 opts = parser.parse_args(sys.argv[1:])[0]
11
12 destination = (opts.address, opts.port)
13 #Création du socket
14 serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
15 serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
16 serversocket.bind(destination)
17 serversocket.listen(5)
18 print("Listening on port " + str(serversocket.getsockname()[1]))
19 nbConnexions = 0
20 nbDeconnexions = 0
21
22 while True:
23 #Un client se connecte au serveur
24 (s, address) = serversocket.accept()
25 nbConnexions += 1
26 print(str(nbConnexions) + "e connexion au serveur")
27
28 #Réception du choix d'option du menu connexion.
29 option = s.recv(1024).decode()
30
31 #Si l'utilisateur choisit de se connecter
32 if option == "1":
33
34 #On vérifie que le compte existe et que le mot de passe est valide
35 id = s.recv(1024).decode()
36 mdp = s.recv(1024).decode()
37 verificationID = utilitaires.verifierID(id)
38 s.send(verificationID.encode())
39 if verificationID != "0":
40 verificationMDP = utilitaires.connexion(id, mdp)
41 s.send(verificationMDP.encode())
42
43 while verificationID != "1" or verificationMDP != "1":
44 id = s.recv(1024).decode()
45 mdp = s.recv(1024).decode()
46 verificationID = utilitaires.verifierID(id)
47 s.send(verificationID.encode())
48 if verificationID != "0":
49 verificationMDP = utilitaires.connexion(id, mdp)
50 s.send(verificationMDP.encode())
51 if verificationMDP == "-1":
52 continue
53
54
55 #Si l'utilisateur choisit de se créer un compte
56 elif option == "2":
57 #Création de l'identifiant
58 id = s.recv(1024).decode()
59 mdp = s.recv(1024).decode()
60 verificationID = utilitaires.verifierID(id)
61 s.send(verificationID.encode())
62 if verificationID != "1":
63 verificationMDP = utilitaires.veififierMDP(mdp)
64 s.send(verificationMDP.encode())
65 while verificationID != "0" or verificationMDP != "1":
66 id = s.recv(1024).decode()
67 mdp = s.recv(1024).decode()
68 verificationID = utilitaires.verifierID(id)
69 s.send(verificationID.encode())
70 if verificationID != "1":
71 verificationMDP = utilitaires.veififierMDP(mdp)
72 s.send(verificationMDP.encode())
73 verificationErreur = utilitaires.creerCompte(id, mdp)
74 s.send(verificationErreur.encode())
75 if verificationErreur == "0":
76 continue
77
78
79
80 while True:
81 # Réception du choix d'option du menu connexion.
82 option = s.recv(1024).decode()
83
84 #Envoie d'un courriel
85 if option == "1":
86 # reception du courriel et verification qu’il est valide
87 emailFrom = s.recv(1024).decode()
88 emailAddress = s.recv(1024).decode()
89 while not re.search(r"^[^@]+@[^@]+\.[^@]+$", emailAddress):
90 msg = "-1"
91 s.send(msg.encode())
92 emailAddress = s.recv(1024).decode()
93 msg = "0"
94 s.send(msg.encode())
95
96 # creation du courriel
97 subject = s.recv(1024).decode()
98 data = s.recv(1024).decode()
99 courriel = MIMEText(data)
100 courriel["From"] = emailFrom
101 courriel["To"] = emailAddress
102 courriel["Subject"] = subject
103
104 #Externe
105 use_smtp_ulaval = False
106 if(re.match(r"^[^@]+@reseauglo\.ca$", emailAddress) == None):
107 use_smtp_ulaval = True
108
109 if use_smtp_ulaval == True:
110
111 # envoi du courriel par le smtp de l'ecole
112 try:
113 smtpConnection = smtplib.SMTP(host="smtp.ulaval.ca", timeout=10)
114 smtpConnection.sendmail(courriel["From"], courriel["To"], courriel.as_string())
115 smtpConnection.quit()
116 msg = "0"
117 s.send(msg.encode())
118 except:
119 msg = "-1"
120 s.send(msg.encode())
121 else:
122 chemin_dossier = emailAddress.replace("@reseauglo.ca", "")
123 verification = utilitaires.courrielLocal(chemin_dossier, courriel['Subject'], courriel.as_string())
124 if(verification != "0"):
125 utilitaires.courrielDump(courriel['Subject'], courriel.as_string())
126 s.send(verification.encode())
127
128 elif option == "2":
129 id = s.recv(1024).decode()
130 files = os.listdir(id)
131 files.remove("config.txt")
132 files = utilitaires.sortDate(id, files)
133 mails = []
134
135 for file in files:
136 file = file.replace(".txt", "")
137 mails.append(file)
138
139 data_string = pickle.dumps(mails)
140 s.send(data_string)
141
142 email_id = int(s.recv(1024).decode()) - 1
143 email_content = utilitaires.ouvrirLocal(id, files[email_id])
144 s.send(email_content.encode())
145
146 elif option == "3":
147 id = s.recv(1024).decode()
148 filesize = utilitaires.getSize(id)
149 s.send(str(filesize).encode())
150 files = os.listdir(id)
151 files.remove("config.txt")
152 files = sorted(files, key=str)
153 mails = []
154
155 for file in files:
156 file = file.replace(".txt", "")
157 print(file)
158 mails.append(file)
159
160 data_string = pickle.dumps(mails)
161 s.send(data_string)
162
163 elif option == "4":
164 nbDeconnexions += 1
165 print(str(nbDeconnexions) + "e deconnexion au serveur")
166 break
| 35 - warning: redefined-builtin
1 - warning: deprecated-module
118 - warning: bare-except
|
1 import os.path
2 import re
3 from hashlib import sha256
4 from os.path import getsize
5
6 #Méthode qui crée un nouveau compte dans le répertoire du serveur
7 #id : nom du dossier
8 #mdp : mot de passe
9 #return : "0" si un problème est survenu avec le fichier, "1" si le compte a été créé
10 def creerCompte(id, mdp):
11 state = "1"
12 try:
13 os.makedirs(id)
14 file = open(id + "/config.txt", "w")
15 file.write(sha256(mdp.encode()).hexdigest())
16 file.close()
17 except:
18 state = "0"
19 return state
20
21 #Méthode qui vérifie si le compte existe
22 #id : Nom du dossier du compte
23 #return: "1" si le compte existe, "0" sinon
24 def verifierID(id):
25 state = "0"
26 if os.path.exists(id + "/config.txt"):
27 state = "1"
28 return state
29
30 #Méthode qui vérifie si le mot de passe respecte les conditions
31 #mdp : le mot de passe
32 #return : "1" si le mot de passe respecte les conditions, "0" sinon.
33 def veififierMDP(mdp):
34 state = "0"
35 if (re.search(r"^[a-zA-Z0-9]{6,12}$", mdp) and re.search(r".*[0-9].*", mdp) and re.search(r".*[a-zA-Z].*",mdp)):
36 state = "1"
37 return state
38
39 #Méthode qui permet d'ouvrir le dossier d'un utilisateur
40 #id, mdp : L'identifiant et le mot de passe de l'utilisateur
41 #Return : "-1" s'il y a un problème avec lors de l'ouverture du fichier
42 # "0" si le mot de passe de correspond pas
43 # "1" si la connexion est un succès
44 def connexion(id, mdp):
45 state = "1"
46 try:
47 file = open(id + "/config.txt", "r")
48 password = file.readline()
49 file.close()
50 if sha256(mdp.encode()).hexdigest() != password:
51 state = "0"
52 except:
53 state = "-1"
54
55 return state
56
57 #Méthode qui permet d'ouvrir le dossier d'un utilisateur
58 #id, subject, data: L'identifiant de l'utilisateur, le sujet et corps du message
59 #Return : "-1" s'il y a un problème avec lors de l'ouverture du fichier
60 # "0" si tout se passe bien
61 def courrielLocal(id, subject, data):
62 state = "0"
63 try:
64 file = open(id + "/" + subject + ".txt", "w")
65 file.write(data)
66 file.close()
67 state = "0"
68 except:
69 state = "-1"
70
71 return state
72
73 #Méthode qui permet d'ouvrir un courriel local
74 #subject, data: Sujet et corps du courriel
75 def ouvrirLocal(id, filename):
76 try:
77 file = open( id + "/" + filename, "r")
78 str_content = file.read();
79 file.close()
80 return str_content
81 except:
82 print("Fichier introuvable.")
83
84
85
86 #Méthode qui permet d'enregistrer un courriel vers un utilisateur inexistant
87 #subject, data: Sujet et corps du courriel
88 def courrielDump(subject, data):
89 try:
90 if not os.path.exists("DESTERREUR"):
91 os.makedirs("DESTERREUR")
92 file = open("DESTERREUR/" + subject + ".txt", "w")
93 file.write(data)
94 file.close()
95 except:
96 print("Guess somebody fucked up good.")
97
98
99 #Méthode qui retourne la grosseur d'un directory
100 #id: le directory
101 def getSize(id):
102 try:
103 size = getsize(id)
104 return size
105 except:
106 print("Mauvais nom de repertoire")
107
108
109 #Méthode qui retourne la liste trié par date
110 #id, liste: la liste a trier
111 def sortDate(id, liste):
112 liste.sort(key=lambda x: os.path.getmtime(id + "/" + x))
113 return liste
114
115 #Méthode qui retourne la liste trié alphabetiquement
116 #id, liste: la liste a trier
117 def sortAlpha(liste):
118 liste = liste.sort()
119 return liste
| 78 - warning: unnecessary-semicolon
10 - warning: redefined-builtin
17 - warning: bare-except
14 - warning: unspecified-encoding
14 - refactor: consider-using-with
24 - warning: redefined-builtin
44 - warning: redefined-builtin
52 - warning: bare-except
47 - warning: unspecified-encoding
47 - refactor: consider-using-with
61 - warning: redefined-builtin
68 - warning: bare-except
64 - warning: unspecified-encoding
64 - refactor: consider-using-with
75 - warning: redefined-builtin
81 - warning: bare-except
77 - warning: unspecified-encoding
75 - refactor: inconsistent-return-statements
77 - refactor: consider-using-with
95 - warning: bare-except
92 - warning: unspecified-encoding
92 - refactor: consider-using-with
101 - warning: redefined-builtin
105 - warning: bare-except
101 - refactor: inconsistent-return-statements
111 - warning: redefined-builtin
|
1 import smtplib, re, socket, optparse, sys
2 import os.path
3 from email.mime.text import MIMEText
4 from hashlib import sha256
5 import getpass
6 import pickle
7
8 parser = optparse.OptionParser()
9 parser.add_option("-a", "--address", action="store", dest="address", default="localhost")
10 parser.add_option("-p", "--port", action="store", dest="port", type=int, default=1337)
11 opts = parser.parse_args(sys.argv[1:])[0]
12
13 destination = (opts.address, opts.port)
14 #Connexion au serveur
15 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
16 s.settimeout(10)
17 s.connect(destination)
18 s.settimeout(None)
19
20 while True:
21 #Menu de connexion, choix d'une option
22 option = input("Menu de connexion \n1. Se connecter \n2. Creer un compte \n")
23 while option != "1" and option != "2":
24 option = input("Veuillez saisir une option valide:\n")
25 s.send(option.encode())
26
27 #Se connecter
28 if option == "1":
29 id = input("Veuillez saisir votre identifiant:\n")
30 mdp = getpass.getpass("Veuillez saisir votre mot de passe:\n")
31 s.send(id.encode())
32 s.send(mdp.encode())
33 reponseID = s.recv(1024).decode()
34 if reponseID != "0":
35 reponseMDP = s.recv(1024).decode()
36 while reponseID != "1" or reponseMDP != "1":
37 if reponseID != "1":
38 id = input("Veuillez saisir un identifiant valide:\n")
39 mdp = getpass.getpass("Veuillez saisir votre mot de passe:\n")
40 elif reponseMDP == "-1":
41 print("Desole, un probleme est survenu.")
42 continue
43 else:
44 print("Ce n'est pas le bon mot de passe. Veuillez reessayer.")
45 id = input("Veuillez saisir votre identifiant:\n")
46 mdp = getpass.getpass("Veuillez saisir votre mot de passe:\n")
47 s.send(id.encode())
48 s.send(mdp.encode())
49 reponseID = s.recv(1024).decode()
50 if reponseID != "0":
51 reponseMDP = s.recv(1024).decode()
52
53
54
55 #Créer un compte
56 elif option == "2":
57 id = input("Veuillez choisir un identifiant:\n")
58 mdp = getpass.getpass("Veuillez choisir un mot de passe contenant de 6 à 12 carateres, dont au moins une lettre et un chiffre:\n")
59 s.send(id.encode())
60 s.send(mdp.encode())
61 reponseID = s.recv(1024).decode()
62 if reponseID != "1":
63 reponseMDP = s.recv(1024).decode()
64 while reponseID != "0" or reponseMDP != "1":
65 if reponseID != "0":
66 id = input("Cet identifiant est deja pris, veuillez en choisir un autre:\n")
67 mdp = getpass.getpass("Veuillez saisir votre mot de passe:\n")
68 else:
69 print("Ce mot de passe ne respecte pas les conditions, veuilelz en choisir un autre.")
70 id = input("Veuillez saisir votre identifiant a nouveau:\n")
71 mdp = getpass.getpass("Veuillez saisir votre nouveau mot de passe:\n")
72 s.send(id.encode())
73 s.send(mdp.encode())
74 reponseID = s.recv(1024).decode()
75 if reponseID != "1":
76 reponseMDP = s.recv(1024).decode()
77 reponseCreationCompte = s.recv(1024).decode()
78 if reponseCreationCompte == "0":
79 print("Desole, un probleme est survenu")
80 continue
81
82
83 while True:
84 option = input("\nMenu principale\n1. Envoi de courriels\n2. Consultation de courriels\n3. Statistiques\n4. Quitter\n")
85 while option not in ["1", "2", "3", "4"]:
86 option = input("Veuillez saisir une option valide:\n")
87
88 s.send(option.encode())
89
90 if option == "1":
91 email_from = id + "@reseauglo.ca"
92 s.send(email_from.encode())
93
94 response = "-1"
95 while(response == "-1"):
96 email_to = input("\nÀ: ")
97 s.send(email_to.encode())
98 response = s.recv(1024).decode()
99
100 subject = input("\nSujet: ")
101 s.send(subject.encode())
102 data = input("\nMessage: ")
103 s.send(data.encode())
104
105 response = s.recv(1024).decode()
106 if(response == "-1"):
107 print("\nErreur lors de l'envoie du courriel.")
108 continue
109 else:
110 print("\nCourriel envoyé avec succès!")
111 elif option == "2":
112 s.send(id.encode())
113 data_string = s.recv(1024)
114 mails = pickle.loads(data_string)
115
116 print("\nListe de vos courriels: \n")
117
118 compteur = 1;
119 for mail in mails:
120 print("\n" + str(compteur) + ". " + mail)
121 compteur += 1
122
123 email_id = input("\nQuel courriel souhaitez-vous visionner? \n")
124
125 s.send(email_id.encode())
126
127 email_content = s.recv(1024).decode()
128 print("\n" + email_content)
129 input("\nAppuyez sur Enter pour continuer...")
130 continue
131 elif option == "3":
132 s.send(id.encode())
133
134 filesize = s.recv(1024).decode()
135
136 data_string = s.recv(1024)
137 mails = pickle.loads(data_string)
138
139 print("\nNombre de messages: " + str(len(mails)) + "\n")
140 print("\nTaille du repertoire personnel (en octets): " + filesize + "\n")
141 print("\nListe de vos courriels: \n")
142
143 compteur = 1;
144 for mail in mails:
145 print("\n" + str(compteur) + ". " + mail)
146 compteur += 1
147 input("\nAppuyez sur Enter pour continuer...")
148 continue
149 elif option == "4":
150 break;
151 s.close()
152 exit() | 118 - warning: unnecessary-semicolon
143 - warning: unnecessary-semicolon
150 - warning: unnecessary-semicolon
29 - warning: redefined-builtin
1 - warning: deprecated-module
23 - refactor: consider-using-in
36 - error: possibly-used-before-assignment
106 - refactor: no-else-continue
152 - refactor: consider-using-sys-exit
1 - warning: unused-import
1 - warning: unused-import
2 - warning: unused-import
3 - warning: unused-import
4 - warning: unused-import
|
1 import os
2 from collections import OrderedDict
3
4 import torch
5 import torch.nn as nn
6 import torchvision.models as models
7
8 class LinearEvalModel(nn.Module):
9 def __init__(self, arch='vgg19', dim=512, num_classes=10):
10 super().__init__()
11
12 if arch == 'vgg19':
13 self.features = models.vgg19().features
14 if arch == 'vgg19_bn':
15 self.features = models.vgg19_bn().features
16 elif arch == 'resnet18':
17 resnet18 = models.resnet18(pretrained=False)
18 self.features = nn.Sequential(*list(resnet18.children())[:-1])
19
20 self.avg_pool = nn.AdaptiveAvgPool2d((1,1))
21
22 self.fc = nn.Linear(dim, num_classes)
23
24 def weight_init(self, weight_path, device, arch):
25 state_dict = torch.load(os.path.join(weight_path, 'best_model.pth'), device)
26 new_state_dict = OrderedDict()
27
28 if 'resnet' in arch:
29 for k, v in state_dict.items():
30 if 'encoder' in k:
31 k = k.replace('encoder.', '')
32 new_state_dict[k] = v
33
34 self.features.load_state_dict(new_state_dict)
35 elif 'vgg' in arch:
36 for k, v in state_dict.items():
37 if 'encoder' in k:
38 k = k.replace('encoder.0.', '')
39 new_state_dict[k] = v
40
41 self.features.load_state_dict(new_state_dict)
42
43 for m in self.features.parameters():
44 m.requires_grad = False
45
46 def forward(self, x):
47 x = self.features(x)
48 x = self.avg_pool(x)
49 x = x.squeeze()
50 out = self.fc(x)
51
52 return out
| 5 - refactor: consider-using-from-import
6 - refactor: consider-using-from-import
|
1 import os
2 import sys
3 import time
4 import logging
5
6 import numpy as np
7
8 import torch
9 from torch import optim
10 from torch.optim import lr_scheduler
11 from torch.utils.data import DataLoader
12 import torchvision.models as models
13 import torchvision.datasets as datasets
14
15 from models.simple_siamese_net import SiameseNetwork
16 from models.cosine_contrastive_loss import CosineContrastiveLoss
17 from utils.trainer import Trainer
18 from utils.helper import Save_Handle, AverageMeter, worker_init_fn
19 from utils.visualizer import ImageDisplayer, LossGraphPloter
20 from datasets.spatial import SpatialDataset
21 from datasets.cifar10 import PosNegCifar10, get_simsiam_dataset
22
23 class SimSiamTrainer(Trainer):
24 def setup(self):
25 """initialize the datasets, model, loss and optimizer"""
26 args = self.args
27 self.vis = ImageDisplayer(args, self.save_dir)
28 self.tr_graph = LossGraphPloter(self.save_dir)
29 self.vl_graph = LossGraphPloter(self.save_dir)
30
31 if torch.cuda.is_available():
32 self.device = torch.device("cuda")
33 self.device_count = torch.cuda.device_count()
34 logging.info('using {} gpus'.format(self.device_count))
35 else:
36 raise Exception("gpu is not available")
37
38 if args.cifar10:
39 self.datasets = {x: get_simsiam_dataset(args, x) for x in ['train', 'val']}
40
41 else:
42 self.datasets = {x: SpatialDataset(x,
43 args.data_dir,
44 args.crop_size,
45 (args.div_row, args.div_col),
46 args.aug) for x in ['train', 'val']}
47
48 self.dataloaders = {x: DataLoader(self.datasets[x],
49 batch_size=args.batch_size,
50 shuffle=(True if x == 'train' else False),
51 num_workers=args.num_workers*self.device_count,
52 pin_memory=(True if x == 'train' else False),
53 worker_init_fn=worker_init_fn) for x in ['train', 'val']}
54
55 # Define model, loss, optim
56 self.model = SiameseNetwork(args)
57 self.model.to(self.device)
58
59 self.criterion = CosineContrastiveLoss()
60 self.criterion.to(self.device)
61
62 self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
63
64 #self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1)
65 self.scheduler = lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=args.max_epoch)
66
67 self.start_epoch = 0
68 self.best_loss = np.inf
69 if args.resume:
70 suf = args.resume.rsplit('.', 1)[-1]
71 if suf == 'tar':
72 checkpoint = torch.load(args.resume, self.device)
73 self.model.load_state_dict(checkpoint['model_state_dict'])
74 self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
75 self.start_epoch = checkpoint['epoch'] + 1
76 elif suf == 'pth':
77 self.model.load_state_dict(torch.load(args.resume, self.device))
78
79 self.save_list = Save_Handle(max_num=args.max_model_num)
80
81 def train(self):
82 """training process"""
83 args = self.args
84 for epoch in range(self.start_epoch, args.max_epoch):
85 logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5)
86 self.epoch = epoch
87 self.train_epoch(epoch)
88 if epoch % args.val_epoch == 0 and epoch >= args.val_start:
89 self.val_epoch(epoch)
90
91 def train_epoch(self, epoch):
92 epoch_loss = AverageMeter()
93 epoch_start = time.time()
94 self.model.train() # Set model to training mode
95
96 for step, ((input1, input2), label) in enumerate(self.dataloaders['train']):
97 input1 = input1.to(self.device)
98 input2 = input2.to(self.device)
99
100 with torch.set_grad_enabled(True):
101 (z1, z2), (p1, p2) = self.model(input1, input2)
102 loss = self.criterion(z1, z2, p1, p2)
103 epoch_loss.update(loss.item(), input1.size(0))
104 self.optimizer.zero_grad()
105 loss.backward()
106 self.optimizer.step()
107 self.scheduler.step()
108
109 # visualize
110 if step == 0:
111 self.vis(epoch, 'train', input1, input2, label)
112 pass
113
114 logging.info('Epoch {} Train, Loss: {:.5f}, lr: {:.5f}, Cost {:.1f} sec'
115 .format(self.epoch, epoch_loss.get_avg(), self.optimizer.param_groups[0]['lr'], time.time()-epoch_start))
116
117 self.tr_graph(self.epoch, epoch_loss.get_avg(), 'tr')
118
119 if epoch % self.args.check_point == 0:
120 model_state_dic = self.model.state_dict()
121 save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch))
122 torch.save({
123 'epoch': self.epoch,
124 'optimizer_state_dict': self.optimizer.state_dict(),
125 'model_state_dict': model_state_dic
126 }, save_path)
127 self.save_list.append(save_path) # control the number of saved models
128
129 def val_epoch(self, epoch):
130 epoch_start = time.time()
131 self.model.eval() # Set model to evaluate mode
132 epoch_loss = AverageMeter()
133
134 for step, ((input1, input2), label) in enumerate(self.dataloaders['val']):
135 input1 = input1.to(self.device)
136 input2 = input2.to(self.device)
137
138 with torch.set_grad_enabled(False):
139 (z1, z2), (p1, p2) = self.model(input1, input2)
140 loss = self.criterion(z1, z2, p1, p2)
141 epoch_loss.update(loss.item(), input1.size(0))
142
143 # visualize
144 if step == 0:
145 self.vis(epoch, 'val', input1, input2, label)
146 pass
147
148 logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec'
149 .format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
150
151 self.vl_graph(self.epoch, epoch_loss.get_avg(), 'vl')
152
153 model_state_dic = self.model.state_dict()
154 if self.best_loss > epoch_loss.get_avg():
155 self.best_loss = epoch_loss.get_avg()
156 logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch))
157 torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth')) | 12 - refactor: consider-using-from-import
13 - refactor: consider-using-from-import
20 - error: no-name-in-module
21 - error: no-name-in-module
23 - refactor: too-many-instance-attributes
34 - warning: logging-format-interpolation
36 - warning: broad-exception-raised
50 - refactor: simplifiable-if-expression
52 - refactor: simplifiable-if-expression
85 - warning: logging-not-lazy
112 - warning: unnecessary-pass
146 - warning: unnecessary-pass
27 - warning: attribute-defined-outside-init
28 - warning: attribute-defined-outside-init
29 - warning: attribute-defined-outside-init
32 - warning: attribute-defined-outside-init
33 - warning: attribute-defined-outside-init
39 - warning: attribute-defined-outside-init
42 - warning: attribute-defined-outside-init
48 - warning: attribute-defined-outside-init
56 - warning: attribute-defined-outside-init
59 - warning: attribute-defined-outside-init
62 - warning: attribute-defined-outside-init
65 - warning: attribute-defined-outside-init
67 - warning: attribute-defined-outside-init
75 - warning: attribute-defined-outside-init
68 - warning: attribute-defined-outside-init
155 - warning: attribute-defined-outside-init
79 - warning: attribute-defined-outside-init
86 - warning: attribute-defined-outside-init
2 - warning: unused-import
12 - warning: unused-import
13 - warning: unused-import
21 - warning: unused-import
|
1 import torch
2 import torch.nn as nn
3
4 class projection_MLP(nn.Module):
5 def __init__(self, in_dim=512, hidden_dim=512, out_dim=512): # bottleneck structure
6 super().__init__()
7
8 self.layers = nn.Sequential(
9 nn.Linear(in_dim, hidden_dim),
10 nn.ReLU(),
11 nn.Linear(hidden_dim, hidden_dim),
12 nn.ReLU(),
13 nn.Linear(hidden_dim, out_dim)
14 )
15
16 def forward(self, x):
17 if x.dim() != 2:
18 x = x.squeeze()
19 x = self.layers(x)
20 return x
21
22 class prediction_MLP(nn.Module):
23 def __init__(self, in_dim=512, hidden_dim=256, out_dim=512): # bottleneck structure
24 super().__init__()
25 self.layer1 = nn.Sequential(
26 nn.Linear(in_dim, hidden_dim),
27 nn.ReLU(inplace=True)
28 )
29 self.layer2 = nn.Linear(hidden_dim, out_dim)
30
31 def forward(self, x):
32 if x.dim() != 2:
33 x = x.squeeze()
34 x = self.layer1(x)
35 x = self.layer2(x)
36 return x
37
38 class SiameseNetwork(nn.Module):
39 def __init__(self, model, pattern_feature = 'conv-512x1x1', projection=False, prediction=False):
40 super(SiameseNetwork, self).__init__()
41 self.projection = projection
42 self.prediction = prediction
43
44 if pattern_feature == 'conv-512x1x1':
45 features = model().features
46 max_pool = nn.AdaptiveAvgPool2d((1,1))
47 self.encoder = nn.Sequential(features, max_pool)
48
49 if projection:
50 self.projector = projection_MLP(in_dim=512, hidden_dim=512, out_dim=512)
51
52 if prediction:
53 self.predictor = prediction_MLP(in_dim=512, out_dim=512)
54
55 elif pattern_feature == 'fc-4096':
56 features = model()
57 self.encoder = nn.Sequential(*[self.encoder.classifier[0]])
58
59 if projection:
60 self.projector = projection_MLP(in_dim=4096, hidden_dim=4096, out_dim=4096)
61
62 if prediction:
63 self.predictor = prediction_MLP(in_dim=4096, out_dim=4096)
64
65
66 def forward(self, input1, input2):
67 if self.prediction:
68 f, h = self.encoder, self.predictor
69 z1, z2 = f(input1), f(input2)
70
71 if self.projection:
72 z1, z2 = self.projection(input1), self.projection(input2)
73
74 p1, p2 = h(z1), h(z2)
75
76 else:
77 f = self.encoder
78 z1, z2 = f(input1), f(input2)
79
80 if self.projection:
81 z1, z2 = self.projection(input1), self.projection(input2)
82
83 p1, p2 = None, None
84
85 return (z1, z2), (p1, p2)
86
| 2 - refactor: consider-using-from-import
40 - refactor: super-with-arguments
1 - warning: unused-import
|
1 from typing import Callable, Optional
2 import random
3
4 from PIL import Image
5 import numpy as np
6
7 import torch
8 import torchvision
9 from torchvision import transforms
10 from torchvision.datasets import CIFAR10
11
12 np.random.seed(765)
13 random.seed(765)
14
15 class SupervisedPosNegCifar10(torch.utils.data.Dataset):
16 def __init__(self, dataset, phase):
17 # split by some thresholds here 80% anchors, 20% for posnegs
18 lengths = [int(len(dataset)*0.8), int(len(dataset)*0.2)]
19 self.anchors, self.posnegs = torch.utils.data.random_split(dataset, lengths)
20
21 if phase == 'train':
22 self.anchor_transform = transforms.Compose([transforms.Resize(64),
23 transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
24 transforms.RandomHorizontalFlip(0.5),
25 transforms.ToTensor(),
26 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
27 self.posneg_transform = transforms.Compose([transforms.Resize(64),
28 transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
29 transforms.RandomHorizontalFlip(0.5),
30 transforms.ToTensor(),
31 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
32 else:
33 self.anchor_transform = transforms.Compose([transforms.Resize(64),
34 transforms.ToTensor(),
35 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
36 self.posneg_transform = transforms.Compose([transforms.Resize(64),
37 transforms.ToTensor(),
38 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
39
40 def __len__(self):
41 return len(self.anchors)
42
43 def __getitem__(self, index):
44 anchor, label = self.anchors[index]
45 if self.anchor_transform is not None:
46 anchor = self.anchor_transform(anchor)
47
48 # now pair this up with an image from the same class in the second stream
49 if random.random() > 0.5:
50 A = np.where(np.array(self.posnegs.dataset.targets) == label)[0]
51 posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
52 posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
53 target = torch.tensor([1]).long()
54 else:
55 A = np.where(np.array(self.posnegs.dataset.targets) != label)[0]
56 posneg_idx = np.random.choice(A[np.in1d(A, self.posnegs.indices)])
57 posneg, label = self.posnegs[np.where(self.posnegs.indices==posneg_idx)[0][0]]
58 target = torch.tensor([0]).long()
59
60 if self.posneg_transform is not None:
61 posneg = self.posneg_transform(posneg)
62
63 return anchor, posneg, target, label
64
65 class PosNegCifar10(torch.utils.data.Dataset):
66 def __init__(self, dataset, phase):
67 # split by some thresholds here 80% anchors, 20% for posnegs
68 self.dataset = dataset
69
70 if phase == 'train':
71 self.anchor_transform = transforms.Compose([transforms.Resize(64),
72 transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
73 transforms.RandomHorizontalFlip(0.5),
74 transforms.ToTensor(),
75 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
76 self.posneg_transform = transforms.Compose([transforms.Resize(64),
77 transforms.RandomResizedCrop(scale=(0.16, 1), ratio=(0.75, 1.33), size=64),
78 transforms.RandomHorizontalFlip(0.5),
79 transforms.ToTensor(),
80 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
81 else:
82 self.anchor_transform = transforms.Compose([transforms.Resize(64),
83 transforms.ToTensor(),
84 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
85 self.posneg_transform = transforms.Compose([transforms.Resize(64),
86 transforms.ToTensor(),
87 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
88
89 def __len__(self):
90 return len(self.dataset)
91
92 def __getitem__(self, index):
93 anchor, label = self.dataset[index]
94
95 # now pair this up with an image from the same class in the second stream
96 if random.random() > 0.5:
97 posneg = anchor
98 target = torch.tensor([1]).long()
99 else:
100 while True:
101 neg_idx = random.randint(0, len(self.dataset)-1)
102 if neg_idx != index:
103 break
104 posneg, label = self.dataset[neg_idx]
105 target = torch.tensor([0]).long()
106
107 if self.anchor_transform is not None:
108 anchor = self.anchor_transform(anchor)
109
110 if self.posneg_transform is not None:
111 posneg = self.posneg_transform(posneg)
112
113 return anchor, posneg, target, label
114
115 ### Simple Siamese code
116
117 imagenet_mean_std = [[0.485, 0.456, 0.406],[0.229, 0.224, 0.225]]
118
119 class SimSiamTransform():
120 def __init__(self, image_size, train, mean_std=imagenet_mean_std):
121 self.train = train
122 if self.train:
123 image_size = 224 if image_size is None else image_size # by default simsiam use image size 224
124 p_blur = 0.5 if image_size > 32 else 0 # exclude cifar
125 # the paper didn't specify this, feel free to change this value
126 # I use the setting from simclr which is 50% chance applying the gaussian blur
127 # the 32 is prepared for cifar training where they disabled gaussian blur
128 self.transform = transforms.Compose([
129 transforms.RandomResizedCrop(image_size, scale=(0.2, 1.0)),
130 transforms.RandomHorizontalFlip(),
131 transforms.RandomApply([transforms.ColorJitter(0.4,0.4,0.4,0.1)], p=0.8),
132 transforms.RandomGrayscale(p=0.2),
133 transforms.RandomApply([transforms.GaussianBlur(kernel_size=image_size//20*2+1, sigma=(0.1, 2.0))], p=p_blur),
134 transforms.ToTensor(),
135 transforms.Normalize(*mean_std)
136 ])
137
138 else:
139 self.transform = transforms.Compose([
140 transforms.Resize(int(image_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
141 transforms.CenterCrop(image_size),
142 transforms.ToTensor(),
143 transforms.Normalize(*mean_std)
144 ])
145
146 def __call__(self, x):
147 x1 = self.transform(x)
148 x2 = self.transform(x)
149 return x1, x2
150
151
152 def get_simsiam_dataset(args, phase, download=True, debug_subset_size=None):
153 if phase == 'train':
154 train = True
155 transform = SimSiamTransform(args.crop_size, train)
156 elif phase == 'val':
157 train = False
158 transform = SimSiamTransform(args.crop_size, train)
159 elif phase == 'linear_train':
160 train = True
161 transform = transforms.Compose([
162 transforms.RandomResizedCrop(args.crop_size, scale=(0.08, 1.0), ratio=(3.0/4.0,4.0/3.0), interpolation=Image.BICUBIC),
163 transforms.RandomHorizontalFlip(),
164 transforms.ToTensor(),
165 transforms.Normalize(*imagenet_mean_std)
166 ])
167 elif phase == 'linear_val':
168 train = False
169 transform = transforms.Compose([
170 transforms.Resize(int(args.crop_size*(8/7)), interpolation=Image.BICUBIC), # 224 -> 256
171 transforms.CenterCrop(args.crop_size),
172 transforms.ToTensor(),
173 transforms.Normalize(*imagenet_mean_std)
174 ])
175
176 dataset = torchvision.datasets.CIFAR10(root="CIFAR10_Dataset", train=train, transform=transform, download=download)
177
178 if debug_subset_size is not None:
179 dataset = torch.utils.data.Subset(dataset, range(0, debug_subset_size)) # take only one batch
180 dataset.classes = dataset.dataset.classes
181 dataset.targets = dataset.dataset.targets
182
183 return dataset | 120 - warning: dangerous-default-value
140 - error: no-member
119 - refactor: too-few-public-methods
162 - error: no-member
170 - error: no-member
176 - error: possibly-used-before-assignment
1 - warning: unused-import
1 - warning: unused-import
10 - warning: unused-import
|
1 import os
2 import numpy as np
3 import torch
4
5 def worker_init_fn(worker_id):
6 np.random.seed(np.random.get_state()[1][0] + worker_id)
7
8 class Save_Handle(object):
9 """handle the number of """
10 def __init__(self, max_num):
11 self.save_list = []
12 self.max_num = max_num
13
14 def append(self, save_path):
15 if len(self.save_list) < self.max_num:
16 self.save_list.append(save_path)
17 else:
18 remove_path = self.save_list[0]
19 del self.save_list[0]
20 self.save_list.append(save_path)
21 if os.path.exists(remove_path):
22 os.remove(remove_path)
23
24
25 class AverageMeter(object):
26 """Computes and stores the average and current value"""
27 def __init__(self):
28 self.reset()
29
30 def reset(self):
31 self.val = 0
32 self.avg = 0
33 self.sum = 0
34 self.count = 0
35
36 def update(self, val, n=1):
37 self.val = val
38 self.sum += val * n
39 self.count += n
40 self.avg = 1.0 * self.sum / self.count
41
42 def get_avg(self):
43 return self.avg
44
45 def get_count(self):
46 return self.count
47
48 ## cannot use in training
49 @torch.no_grad()
50 def accuracy(meter, output1, output2, target):
51 """Computes the accuracy overthe predictions"""
52
53 for logit in [output1, output2]:
54 corrects = (torch.max(logit, 1)[1].data == target.squeeze().long().data).sum()
55 accu = float(corrects) / float(target.size()[0])
56 meter.update(accu)
57
58 return meter
| 8 - refactor: useless-object-inheritance
8 - refactor: too-few-public-methods
25 - refactor: useless-object-inheritance
37 - warning: attribute-defined-outside-init
40 - warning: attribute-defined-outside-init
|
1 # in : original image
2 # out : cropped img1 (anchor)
3 # cropped img2 (compete)
4 # target (positive img1 - img2 : 1, negative img1 - img2 : 0)
5
6 import os
7 from glob import glob
8 import random
9
10 import numpy as np
11 from PIL import Image
12 from PIL import ImageFilter
13
14 import torch
15 import torch.utils.data as data
16 import torchvision.transforms.functional as F
17 from torchvision import transforms
18
19 random.seed(765)
20
21 def divide_patches(img, row, col):
22 patche_size_w = int(img.size[0] / col)
23 patche_size_h = int(img.size[1] / row)
24
25 patches = []
26 for cnt_i, i in enumerate(range(0, img.size[1], patche_size_h)):
27 if cnt_i == row:
28 break
29 for cnt_j, j in enumerate(range(0, img.size[0], patche_size_w)):
30 if cnt_j == col:
31 break
32 box = (j, i, j+patche_size_w, i+patche_size_h)
33 patches.append(img.crop(box))
34
35 return patches
36
37 def create_pos_pair(patches):
38 idx = random.randint(0, len(patches)-1)
39 img1 = patches[idx]
40 img2 = patches[idx]
41 target = np.array([1])
42 return img1, img2, target
43
44 def create_neg_pair(patches):
45 idx = random.sample(range(0, len(patches)-1), k=2)
46 img1 = patches[idx[0]]
47 img2 = patches[idx[1]]
48 target = np.array([0])
49 return img1, img2, target
50
51 def random_crop(im_h, im_w, crop_h, crop_w):
52 res_h = im_h - crop_h
53 res_w = im_w - crop_w
54 i = random.randint(0, res_h)
55 j = random.randint(0, res_w)
56 return i, j, crop_h, crop_w
57
58 class GaussianBlur(object):
59 """Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
60
61 def __init__(self, sigma=[.1, 2.]):
62 self.sigma = sigma
63
64 def __call__(self, x):
65 sigma = random.uniform(self.sigma[0], self.sigma[1])
66 x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
67 return x
68
69 class PosNegSpatialDataset(data.Dataset):
70 # divide_num : 3 -> 3x3= 9 paches
71 def __init__(self, data_path, crop_size, divide_num=(3,3), aug=True):
72 self.data_path = data_path
73 self.im_list = sorted(glob(os.path.join(self.data_path, '*.jpg')))
74
75 self.c_size = crop_size
76 self.d_row = divide_num[0]
77 self.d_col = divide_num[1]
78
79 if aug:
80 self.aug = transforms.Compose([
81 transforms.CenterCrop(self.c_size),
82 transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
83 transforms.RandomGrayscale(p=0.2),
84 transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
85 transforms.RandomHorizontalFlip()
86 ])
87 else:
88 self.aug = transforms.CenterCrop(self.c_size)
89
90 self.trans = transforms.Compose([
91 transforms.ToTensor(),
92 transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
93 ])
94
95 def __len__(self):
96 return len(self.im_list)
97
98 def __getitem__(self, index):
99 img_path = self.im_list[index]
100 img = Image.open(img_path).convert('RGB')
101 patches = divide_patches(img, self.d_row, self.d_col)
102
103 if random.random() > 0.5:
104 img1, img2, target = create_pos_pair(patches)
105 else:
106 img1, img2, target = create_neg_pair(patches)
107
108 img1 = self.aug(img1)
109 img2 = self.aug(img2)
110
111 target = torch.from_numpy(target).long()
112
113 img1 = self.trans(img1)
114 img2 = self.trans(img2)
115
116 return img1, img2, target, None
117
118 class SpatialDataset(data.Dataset):
119 # divide_num : 3 -> 3x3= 9 paches
120 def __init__(self, phase, data_path, crop_size, divide_num=(3,3), aug=True):
121
122 with open(os.path.join(data_path, '{}.txt'.format(phase)), 'r') as f:
123 im_list = f.readlines()
124
125 self.im_list = [im_name.replace('\n', '') for im_name in im_list]
126
127 self.c_size = crop_size
128 self.d_row = divide_num[0]
129 self.d_col = divide_num[1]
130
131 self.trans = transforms.Compose([
132 transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
133 transforms.RandomGrayscale(p=0.2),
134 transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
135 transforms.RandomHorizontalFlip(),
136 transforms.ToTensor(),
137 transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
138 ])
139
140
141 def __len__(self):
142 return len(self.im_list)
143
144 def __getitem__(self, index):
145 img_path = self.im_list[index]
146 img = Image.open(img_path).convert('RGB')
147 patches = divide_patches(img, self.d_row, self.d_col)
148
149 img1, img2, label = create_pos_pair(patches)
150
151 assert img1.size == img2.size
152 wd, ht = img1.size
153 i, j, h, w = random_crop(ht, wd, self.c_size, self.c_size)
154 img1 = F.crop(img1, i, j, h, w)
155 img2 = F.crop(img2, i, j, h, w)
156
157 img1 = self.trans(img1)
158 img2 = self.trans(img2)
159
160 imgs = (img1, img2)
161
162 return imgs, label | 15 - refactor: consider-using-from-import
58 - refactor: useless-object-inheritance
61 - warning: dangerous-default-value
58 - refactor: too-few-public-methods
120 - refactor: too-many-arguments
120 - refactor: too-many-positional-arguments
122 - warning: unspecified-encoding
120 - warning: unused-argument
|
1 import torch
2 import torch.nn as nn
3 import torch.nn.functional as F
4
5 class L2ContrastiveLoss(nn.Module):
6 """
7 Contrastive loss
8 Takes embeddings of two samples and a target label == 1 if samples are from the same class and label == 0 otherwise
9 Args :
10 output1 & output2 : [N, dim]
11 target : [N]
12 """
13
14 def __init__(self, margin=1.0):
15 super().__init__()
16 self.margin = margin
17 self.eps = 1e-9
18
19 def forward(self, output1, output2, target, size_average=True):
20 target = target.squeeze()
21 distances = (output2 - output1).pow(2).sum(1) # squared distances
22 losses = 0.5 * (target.float() * distances +
23 (1 + -1 * target).float() * F.relu(self.margin - (distances + self.eps).sqrt()).pow(2))
24 return losses.mean() if size_average else losses.sum() | 2 - refactor: consider-using-from-import
1 - warning: unused-import
|
1 import torch
2 import torch.nn as nn
3
4 class SiameseNetwork(nn.Module):
5 def __init__(self, model, pretrained=False, simple_model=False):
6 super(SiameseNetwork, self).__init__()
7 self.simple_model = simple_model
8 if simple_model:
9 self.features = nn.Sequential(nn.Conv2d(3, 32, 5), nn.PReLU(),
10 nn.MaxPool2d(2, stride=2),
11 nn.Conv2d(32, 64, 5), nn.PReLU(),
12 nn.MaxPool2d(2, stride=2),
13 nn.Conv2d(64, 64, 5), nn.PReLU(),
14 nn.MaxPool2d(2, stride=2))
15
16 self.classifier = nn.Sequential(nn.Linear(64 * 4 * 4, 256),
17 nn.PReLU(),
18 nn.Linear(256, 256),
19 nn.PReLU(),
20 nn.Linear(256, 2))
21
22 else:
23 if pretrained:
24 self.encoder = model(pretrained=True)
25 self.encoder.classifier = nn.Sequential(*[self.encoder.classifier[i] for i in range(6)])
26 self.encoder.classifier.add_module('out', nn.Linear(4096, 2))
27 else:
28 self.encoder = model(num_classes=2)
29
30 def forward_once(self, x):
31 if self.simple_model:
32 output = self.features(x)
33 output = output.view(output.size()[0], -1)
34 output = self.classifier(output)
35
36 else:
37 output = self.encoder(x)
38
39 return output
40
41 def forward(self, input1, input2):
42 output1 = self.forward_once(input1)
43 output2 = self.forward_once(input2)
44 return output1, output2 | 2 - refactor: consider-using-from-import
6 - refactor: super-with-arguments
1 - warning: unused-import
|
1 import os
2 import argparse
3 import logging
4 import numpy as np
5
6 import torch
7 import torch.nn as nn
8 import torch.nn.functional as F
9 import torch.optim as optim
10 from torch.optim import lr_scheduler
11 from torch.utils.data import DataLoader
12 import torchvision.models as models
13
14 from datasets.cifar10 import get_simsiam_dataset
15 from models.create_linear_eval_model import LinearEvalModel
16 from utils.visualizer import AccLossGraphPloter
17 from utils.logger import setlogger
18
19 args = None
20
21 def parse_args():
22 parser = argparse.ArgumentParser(description='Test ')
23 parser.add_argument('--save-dir', default='/mnt/hdd02/contrastive-learn/0113-193048',
24 help='model directory')
25 parser.add_argument('--device', default='0', help='assign device')
26 parser.add_argument('--arch', default='vgg19', help='model architecture')
27
28 parser.add_argument('--max-epoch', default=100, type=int, help='train epoch')
29 parser.add_argument('--crop-size', default=224, type=int, help='input size')
30 parser.add_argument('--batch-size', default=512, type=int, help='input size')
31 parser.add_argument('--lr', default=1e-1, type=float, help='learning rate')
32 parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
33
34 args = parser.parse_args()
35 return args
36
37 if __name__ == '__main__':
38 args = parse_args()
39 os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip() # set vis gpu
40 plotter = AccLossGraphPloter(args.save_dir)
41 setlogger(os.path.join(args.save_dir, 'eval.log')) # set logger
42
43 datasets = {x: get_simsiam_dataset(args, x) for x in ['linear_train', 'linear_val']}
44
45 dataloaders = {x: DataLoader(datasets[x],
46 batch_size=(args.batch_size),
47 shuffle=(True if x == 'linear_train' else False),
48 num_workers=8,
49 pin_memory=(True if x == 'linear_train' else False)) for x in ['linear_train', 'linear_val']}
50
51 device = torch.device('cuda')
52
53 model = LinearEvalModel(arch=args.arch)
54 model.weight_init(args.save_dir, device, args.arch) ## initialize & freeze
55
56 criterion = nn.CrossEntropyLoss()
57
58 optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
59 scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[40, 60, 80], gamma=0.1)
60
61 ## Training & Test Roop
62 model.to(device)
63 for epoch in range(args.max_epoch):
64 model.train()
65 losses, acc, step, total = 0., 0., 0., 0.
66 for data, target in dataloaders['linear_train']:
67 data, target = data.to(device), target.to(device)
68
69 logits = model(data)
70
71 optimizer.zero_grad()
72 loss = criterion(logits, target)
73 loss.backward()
74 losses += loss.item()
75 optimizer.step()
76 scheduler.step()
77
78 pred = F.softmax(logits, dim=-1).max(-1)[1]
79 acc += pred.eq(target).sum().item()
80
81 step += 1
82 total += target.size(0)
83
84 tr_loss = losses / step
85 tr_acc = acc / total * 100.
86 logging.info('[Train Epoch: {0:2d}], loss: {1:.3f}, acc: {2:.3f}'.format(epoch, tr_loss, tr_acc))
87
88 model.eval()
89 losses, acc, step, total = 0., 0., 0., 0.
90 with torch.no_grad():
91 for data, target in dataloaders['linear_val']:
92 data, target = data.to(device), target.to(device)
93
94 logits = model(data)
95 loss = criterion(logits, target)
96 losses += loss.item()
97
98 pred = F.softmax(logits, dim=-1).max(-1)[1]
99 acc += pred.eq(target).sum().item()
100
101 step += 1
102 total += target.size(0)
103
104 vl_loss = losses / step
105 vl_acc = acc / total * 100.
106 logging.info('[Test Epoch: {0:2d}], loss: {1:.3f} acc: {2:.2f}'.format(epoch, vl_loss, vl_acc))
107
108 plotter(epoch, tr_acc, vl_acc, tr_loss, vl_loss, args.arch) | 7 - refactor: consider-using-from-import
9 - refactor: consider-using-from-import
12 - refactor: consider-using-from-import
14 - error: no-name-in-module
34 - warning: redefined-outer-name
47 - refactor: simplifiable-if-expression
49 - refactor: simplifiable-if-expression
4 - warning: unused-import
12 - warning: unused-import
|
1 from utils.contrastive_trainer import CoTrainer
2 from utils.simsiam_trainer import SimSiamTrainer
3 import argparse
4 import os
5 import math
6 import torch
7 args = None
8
9 def parse_args():
10 parser = argparse.ArgumentParser(description='Train ')
11 parser.add_argument('--data-dir', default='/mnt/hdd02/process-ucf',
12 help='training data directory')
13 parser.add_argument('--save-dir', default='D:/exp_results',
14 help='directory to save models.')
15 parser.add_argument('--cifar10', action='store_true',
16 help='use cifar10 dataset')
17
18 parser.add_argument('--SimSiam', action='store_true',
19 help='try Simple Siamese Net')
20
21 parser.add_argument('--arch', type=str, default='vgg19',
22 help='the model architecture [vgg19, vgg19_bn, resnet18]')
23 parser.add_argument('--pattern-feature', type=str, default='conv-512x1x1',
24 help='the feature to contrast [conv-512x1x1, fc-4096]')
25 parser.add_argument('--projection', action='store_true',
26 help='use MLP projection')
27 parser.add_argument('--prediction', action='store_true',
28 help='use MLP prediction')
29 parser.add_argument('--mlp-bn', action='store_true',
30 help='use MLP Batch Normalization')
31
32 parser.add_argument('--lr', type=float, default=1e-2,
33 help='the initial learning rate')
34 parser.add_argument('--weight-decay', type=float, default=1e-4,
35 help='the weight decay')
36 parser.add_argument('--momentum', type=float, default=0.9,
37 help='the momentum')
38
39 parser.add_argument('--div-row', type=int, default=3,
40 help='one side`s number of pathes')
41 parser.add_argument('--div-col', type=int, default=3,
42 help='one side`s number of pathes')
43 parser.add_argument('--aug', action='store_true',
44 help='the weight decay')
45 parser.add_argument('--margin', type=float, default=1.0,
46 help='the margin of loss function')
47
48 parser.add_argument('--resume', default='',
49 help='the path of resume training model')
50 parser.add_argument('--max-model-num', type=int, default=30,
51 help='max models num to save ')
52 parser.add_argument('--check_point', type=int, default=100,
53 help='milestone of save model checkpoint')
54
55 parser.add_argument('--max-epoch', type=int, default=300,
56 help='max training epoch')
57 parser.add_argument('--val-epoch', type=int, default=10,
58 help='the num of steps to log training information')
59 parser.add_argument('--val-start', type=int, default=0,
60 help='the epoch start to val')
61
62 parser.add_argument('--batch-size', type=int, default=8,
63 help='train batch size')
64 parser.add_argument('--device', default='0', help='assign device')
65 parser.add_argument('--num-workers', type=int, default=8,
66 help='the num of training process')
67
68 parser.add_argument('--crop-size', type=int, default=224,
69 help='the crop size of the train image')
70
71 parser.add_argument('--visual-num', type=int, default=4,
72 help='the number of visualize images')
73
74 args = parser.parse_args()
75 return args
76
77 if __name__ == '__main__':
78 args = parse_args()
79 torch.backends.cudnn.benchmark = True
80 os.environ['CUDA_VISIBLE_DEVICES'] = args.device.strip('-') # set vis gpu
81 if args.SimSiam:
82 trainer = SimSiamTrainer(args)
83 else:
84 trainer = CoTrainer(args)
85 trainer.setup()
86 trainer.train()
| 74 - warning: redefined-outer-name
5 - warning: unused-import
|
1 import os
2 from glob import glob
3
4 import numpy as np
5 import argparse
6
7 def parse_args():
8 parser = argparse.ArgumentParser(description='Test ')
9 parser.add_argument('--data-dir', default='/mnt/hdd02/shibuya_scramble',
10 help='original data directory')
11 args = parser.parse_args()
12 return args
13
14 if __name__ == '__main__':
15 args = parse_args()
16
17 ## Random Train-Val split
18
19 im_list = sorted(glob(os.path.join(args.data_dir, '*.jpg')))
20 im_list = [im_name for im_name in im_list]
21
22 tr_im_list = list(np.random.choice(im_list, size=int(len(im_list)*0.8), replace=False))
23 vl_im_list = list(set(im_list) - set(tr_im_list))
24
25 for phase in ['train', 'val']:
26 with open(os.path.join(args.data_dir, './{}.txt'.format(phase)), mode='w') as f:
27 if phase == 'train':
28 f.write('\n'.join(tr_im_list))
29 elif phase == 'val':
30 f.write('\n'.join(vl_im_list))
| 11 - warning: redefined-outer-name
20 - refactor: unnecessary-comprehension
26 - warning: unspecified-encoding
|
1 import os
2 import sys
3 import time
4 import logging
5
6 import numpy as np
7
8 import torch
9 from torch import optim
10 from torch.optim import lr_scheduler
11 from torch.utils.data import DataLoader
12 import torchvision.models as models
13 import torchvision.datasets as datasets
14
15 from models.siamese_net import SiameseNetwork
16 from models.l2_contrastive_loss import L2ContrastiveLoss
17 from utils.trainer import Trainer
18 from utils.helper import Save_Handle, AverageMeter, worker_init_fn
19 from utils.visualizer import ImageDisplayer, EmbeddingDisplayer
20 from datasets.spatial import SpatialDataset
21 from datasets.cifar10 import PosNegCifar10
22
23 class CoTrainer(Trainer):
24 def setup(self):
25 """initialize the datasets, model, loss and optimizer"""
26 args = self.args
27 self.vis = ImageDisplayer(args, self.save_dir)
28 self.emb = EmbeddingDisplayer(args, self.save_dir)
29
30 if torch.cuda.is_available():
31 self.device = torch.device("cuda")
32 self.device_count = torch.cuda.device_count()
33 logging.info('using {} gpus'.format(self.device_count))
34 else:
35 raise Exception("gpu is not available")
36
37 if args.cifar10:
38 # Download and create datasets
39 or_train = datasets.CIFAR10(root="CIFAR10_Dataset", train=True, transform=None, download=True)
40 or_val = datasets.CIFAR10(root="CIFAR10_Dataset", train=False, transform=None, download=True)
41
42 # splits CIFAR10 into two streams
43 self.datasets = {x: PosNegCifar10((or_train if x == 'train' else or_val),
44 phase=x) for x in ['train', 'val']}
45 else:
46 self.datasets = {x: SpatialDataset(os.path.join(args.data_dir, x),
47 args.crop_size,
48 args.div_num,
49 args.aug) for x in ['train', 'val']}
50
51 self.dataloaders = {x: DataLoader(self.datasets[x],
52 batch_size=args.batch_size,
53 shuffle=(True if x == 'train' else False),
54 num_workers=args.num_workers*self.device_count,
55 pin_memory=(True if x == 'train' else False),
56 worker_init_fn=worker_init_fn) for x in ['train', 'val']}
57
58 # Define model, loss, optim
59 self.model = SiameseNetwork(models.__dict__[args.arch], pattern_feature = args.pattern_feature)
60 self.model.to(self.device)
61
62 self.criterion = L2ContrastiveLoss(args.margin)
63 self.criterion.to(self.device)
64
65 self.optimizer = optim.SGD(self.model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
66
67 self.scheduler = lr_scheduler.MultiStepLR(self.optimizer, milestones=[80, 120, 160, 200, 250], gamma=0.1)
68
69 self.start_epoch = 0
70 self.best_loss = np.inf
71 if args.resume:
72 suf = args.resume.rsplit('.', 1)[-1]
73 if suf == 'tar':
74 checkpoint = torch.load(args.resume, self.device)
75 self.model.load_state_dict(checkpoint['model_state_dict'])
76 self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
77 self.start_epoch = checkpoint['epoch'] + 1
78 elif suf == 'pth':
79 self.model.load_state_dict(torch.load(args.resume, self.device))
80
81 self.save_list = Save_Handle(max_num=args.max_model_num)
82
83 def train(self):
84 """training process"""
85 args = self.args
86 for epoch in range(self.start_epoch, args.max_epoch):
87 logging.info('-'*5 + 'Epoch {}/{}'.format(epoch, args.max_epoch - 1) + '-'*5)
88 self.epoch = epoch
89 self.train_epoch(epoch)
90 if epoch % args.val_epoch == 0 and epoch >= args.val_start:
91 self.val_epoch(epoch)
92
93 def train_epoch(self, epoch):
94 epoch_loss = AverageMeter()
95 epoch_start = time.time()
96 self.model.train() # Set model to training mode
97
98 for step, (input1, input2, target, label) in enumerate(self.dataloaders['train']):
99 input1 = input1.to(self.device)
100 input2 = input2.to(self.device)
101 target = target.to(self.device)
102
103 with torch.set_grad_enabled(True):
104 output1, output2 = self.model(input1, input2)
105 loss = self.criterion(output1, output2, target)
106 epoch_loss.update(loss.item(), input1.size(0))
107 self.optimizer.zero_grad()
108 loss.backward()
109 self.optimizer.step()
110 self.scheduler.step()
111
112 # visualize
113 if step == 0:
114 self.vis(epoch, 'train', input1, input2, target)
115 self.emb(output1, label, epoch, 'train')
116
117 logging.info('Epoch {} Train, Loss: {:.5f}, Cost {:.1f} sec'
118 .format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
119
120 model_state_dic = self.model.state_dict()
121 save_path = os.path.join(self.save_dir, '{}_ckpt.tar'.format(self.epoch))
122 torch.save({
123 'epoch': self.epoch,
124 'optimizer_state_dict': self.optimizer.state_dict(),
125 'model_state_dict': model_state_dic
126 }, save_path)
127 self.save_list.append(save_path) # control the number of saved models
128
129 def val_epoch(self, epoch):
130 epoch_start = time.time()
131 self.model.eval() # Set model to evaluate mode
132 epoch_loss = AverageMeter()
133
134 for step, (input1, input2, target, label) in enumerate(self.dataloaders['val']):
135 input1 = input1.to(self.device)
136 input2 = input2.to(self.device)
137 target = target.to(self.device)
138 with torch.set_grad_enabled(False):
139 output1, output2 = self.model(input1, input2)
140 loss = self.criterion(output1, output2, target)
141 epoch_loss.update(loss.item(), input1.size(0))
142
143 # visualize
144 if step == 0:
145 self.vis(epoch, 'val', input1, input2, target)
146 self.emb(output1, label, epoch, 'val')
147
148 logging.info('Epoch {} Val, Loss: {:.5f}, Cost {:.1f} sec'
149 .format(self.epoch, epoch_loss.get_avg(), time.time()-epoch_start))
150
151 model_state_dic = self.model.state_dict()
152 if self.best_loss > epoch_loss.get_avg():
153 self.best_loss = epoch_loss.get_avg()
154 logging.info("save min loss {:.2f} model epoch {}".format(self.best_loss, self.epoch))
155 torch.save(model_state_dic, os.path.join(self.save_dir, 'best_model.pth')) | 12 - refactor: consider-using-from-import
13 - refactor: consider-using-from-import
20 - error: no-name-in-module
21 - error: no-name-in-module
23 - refactor: too-many-instance-attributes
33 - warning: logging-format-interpolation
35 - warning: broad-exception-raised
53 - refactor: simplifiable-if-expression
55 - refactor: simplifiable-if-expression
87 - warning: logging-not-lazy
27 - warning: attribute-defined-outside-init
28 - warning: attribute-defined-outside-init
31 - warning: attribute-defined-outside-init
32 - warning: attribute-defined-outside-init
43 - warning: attribute-defined-outside-init
46 - warning: attribute-defined-outside-init
51 - warning: attribute-defined-outside-init
59 - warning: attribute-defined-outside-init
62 - warning: attribute-defined-outside-init
65 - warning: attribute-defined-outside-init
67 - warning: attribute-defined-outside-init
69 - warning: attribute-defined-outside-init
77 - warning: attribute-defined-outside-init
70 - warning: attribute-defined-outside-init
153 - warning: attribute-defined-outside-init
81 - warning: attribute-defined-outside-init
88 - warning: attribute-defined-outside-init
2 - warning: unused-import
|
1 import os
2 import numpy as np
3 from PIL import Image
4
5 import torch
6
7 import matplotlib
8 matplotlib.use('Agg')
9 from matplotlib import pyplot as plt
10
11 ### torch テンソル(バッチ)を受け取って、args.div_numに応じて、描画する
12
13 mean = np.array([0.485, 0.456, 0.406])
14 std = np.array([0.229, 0.224, 0.225])
15
16 def invnorm(img, N):
17 img = img[N,:,:,:].to('cpu').detach().numpy().copy()
18 img = img.transpose(1,2,0)
19 img = img*std+mean
20 return img
21
22 class ImageDisplayer:
23 def __init__(self, args, save_fir):
24 # N is number of batch to display
25 self.args = args
26 self.save_dir = save_fir
27 self.N = args.visual_num
28
29 @torch.no_grad()
30 def __call__(self, epoch, prefix, img1, img2, target):
31 imgs1 = []
32 imgs2 = []
33 targets = []
34 for n in range(self.N):
35 imgs1.append(invnorm(img1,n))
36 imgs2.append(invnorm(img2,n))
37 if target is not None:
38 targets.append(target[n].item())
39 else:
40 targets = None
41
42 self.display_images(epoch, prefix, imgs1, imgs2, targets)
43
44 def display_images(self, epoch, prefix, images1: [Image], images2: [Image], targets,
45 columns=2, width=8, height=8, label_wrap_length=50, label_font_size=8):
46
47 if not (images1 and images2):
48 print("No images to display.")
49 return
50
51 height = max(height, int(len(images1)/columns) * height)
52 plt.figure(figsize=(width, height))
53 i = 1
54 if targets is not None:
55 for (im1, im2, tar) in zip(images1, images2, targets):
56 im1 = Image.fromarray(np.uint8(im1*255))
57 im2 = Image.fromarray(np.uint8(im2*255))
58
59 plt.subplot(self.N, 2, i)
60 plt.title(tar, fontsize=20)
61 plt.imshow(im1)
62 i += 1
63 plt.subplot(self.N, 2, i)
64 plt.title(tar, fontsize=20)
65 plt.imshow(im2)
66 i += 1
67 else:
68 for (im1, im2) in zip(images1, images2):
69 im1 = Image.fromarray(np.uint8(im1*255))
70 im2 = Image.fromarray(np.uint8(im2*255))
71
72 plt.subplot(self.N, 2, i)
73 plt.imshow(im1)
74 i += 1
75 plt.subplot(self.N, 2, i)
76 plt.imshow(im2)
77 i += 1
78
79 plt.tight_layout()
80 output_img_name = 'imgs_{}_{}.png'.format(prefix, epoch)
81 plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
82 plt.close()
83
84 class EmbeddingDisplayer:
85 def __init__(self, args, save_fir):
86
87 self.args = args
88 self.save_dir = save_fir
89 self.cifar10_classes = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
90 self.colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',
91 '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',
92 '#bcbd22', '#17becf']
93
94 @torch.no_grad()
95 def __call__(self, embeddings, targets, epoch, prefix, xlim=None, ylim=None):
96 embeddings = embeddings.to('cpu').detach().numpy().copy()
97 targets = targets.to('cpu').detach().numpy().copy()
98 plt.figure(figsize=(10,10))
99 for i in range(10):
100 inds = np.where(targets==i)[0]
101 plt.scatter(embeddings[inds,0], embeddings[inds,1], alpha=0.5, color=self.colors[i])
102 if xlim:
103 plt.xlim(xlim[0], xlim[1])
104 if ylim:
105 plt.ylim(ylim[0], ylim[1])
106 plt.legend(self.cifar10_classes)
107 output_img_name = 'emb_{}_{}.png'.format(prefix, epoch)
108 plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
109 plt.close()
110
111 class LossGraphPloter:
112 def __init__(self, save_fir):
113 self.save_dir = save_fir
114 self.epochs = []
115 self.losses = []
116
117 def __call__(self, epoch, loss, prefix):
118 self.epochs.append(epoch)
119 self.losses.append(loss)
120 output_img_name = '{}_loss.svg'.format(prefix)
121
122 plt.plot(self.epochs, self.losses)
123 plt.title('Loss')
124 plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
125 plt.close()
126
127 class AccLossGraphPloter:
128 def __init__(self, save_fir):
129 self.save_dir = save_fir
130 self.tr_accs = []
131 self.vl_accs = []
132 self.tr_losses = []
133 self.vl_losses = []
134 self.epochs = []
135
136 def __call__(self, epoch, tr_acc, vl_acc, tr_loss, vl_loss, prefix):
137 self.tr_accs.append(tr_acc)
138 self.vl_accs.append(vl_acc)
139 self.tr_losses.append(tr_loss)
140 self.vl_losses.append(vl_loss)
141
142 self.epochs.append(epoch)
143 output_img_name = '{}_eval.svg'.format(prefix)
144
145 fig, (axL, axR) = plt.subplots(ncols=2, figsize=(10,4))
146
147 axL.plot(self.epochs, self.tr_accs, label='train')
148 axL.plot(self.epochs, self.vl_accs, label='val')
149 axL.set_title('Top-1 Accuracy')
150 axL.set_xlabel('epoch')
151 axL.set_ylabel('acc [%]')
152 axL.legend(loc="lower right")
153
154 axR.plot(self.epochs, self.tr_losses, label='train')
155 axR.plot(self.epochs, self.vl_losses, label='val')
156 axR.set_title('Loss')
157 axR.set_xlabel('epoch')
158 axR.set_ylabel('loss')
159 axR.legend(loc="upper right")
160
161 plt.savefig(os.path.join(self.save_dir, 'images', output_img_name))
162 plt.close() | 30 - refactor: too-many-arguments
30 - refactor: too-many-positional-arguments
44 - refactor: too-many-arguments
44 - refactor: too-many-positional-arguments
44 - refactor: too-many-locals
45 - warning: unused-argument
45 - warning: unused-argument
95 - refactor: too-many-arguments
95 - refactor: too-many-positional-arguments
84 - refactor: too-few-public-methods
111 - refactor: too-few-public-methods
136 - refactor: too-many-arguments
136 - refactor: too-many-positional-arguments
145 - warning: unused-variable
127 - refactor: too-few-public-methods
|
1 import torch
2 import torch.nn as nn
3 import torch.nn.functional as F
4
5
6 def D(p, z, version='simplified'): # negative cosine similarity
7 if version == 'original':
8 z = z.detach() # stop gradient
9 p = F.normalize(p, dim=1) # l2-normalize
10 z = F.normalize(z, dim=1) # l2-normalize
11 return -(p*z).sum(dim=1).mean()
12
13 elif version == 'simplified':
14 return - F.cosine_similarity(p, z.detach(), dim=-1).mean()
15 else:
16 raise Exception
17
18 class CosineContrastiveLoss(nn.Module):
19 def __init__(self):
20 super().__init__()
21
22 def forward(self, z1, z2, p1, p2):
23 if z1.dim() != 2:
24 z1 = z1.squeeze()
25 if z2.dim() != 2:
26 z2 = z2.squeeze()
27
28 if p1 is not None or p2 is not None:
29 loss = D(p1, z2) / 2 + D(p2, z1) / 2
30 else:
31 loss = D(z1, z2)
32
33 return loss
| 2 - refactor: consider-using-from-import
7 - refactor: no-else-return
14 - error: not-callable
16 - warning: broad-exception-raised
1 - warning: unused-import
|
1 import torch
2 import torchvision
3 from PIL import Image
4 from matplotlib import pyplot as plt
5 import random
6
7 model = torchvision.models.__dict__['vgg19']()
8 print(model)
9
10 img = torch.rand(1,3,256,256)
11 out = model.features(img)
12 print(out.size())
13
14 import torchvision.transforms as trans
15
16 crop = trans.RandomCrop(224)
17 img = torch.rand(1,3,256,256)
18
19 out = crop(img)
20 print(out.size())
21
22 def divide_patches(img, row, col):
23 patche_size_w = int(img.size[0] / col)
24 patche_size_h = int(img.size[1] / row)
25
26 patches = []
27 for cnt_i, i in enumerate(range(0, img.size[1], patche_size_h)):
28 if cnt_i == row:
29 break
30 for cnt_j, j in enumerate(range(0, img.size[0], patche_size_w)):
31 if cnt_j == col:
32 break
33 box = (j, i, j+patche_size_w, i+patche_size_h)
34 patches.append(img.crop(box))
35
36 return patches
37
38 def display_images(
39 images: [Image],
40 row=3, col=3, width=10, height=4, max_images=15,
41 label_wrap_length=50, label_font_size=8):
42
43 if not images:
44 print("No images to display.")
45 return
46
47 if len(images) > max_images:
48 print(f"Showing {max_images} images of {len(images)}:")
49 images=images[0:max_images]
50
51 height = max(height, int(len(images)/col) * height)
52 plt.figure(figsize=(width, height))
53 for i, image in enumerate(images):
54
55 plt.subplot(row, col, i + 1)
56 plt.imshow(image)
57
58 plt.show()
59
60 image = Image.open("/mnt/hdd02/shibuya_scramble/image_000294.jpg").convert("RGB")
61
62 p = divide_patches(image, 2, 3)
63 print(len(p))
64
65 display_images(p, row=2, col=3)
66
67 def create_pos_pair(patches):
68 idx = random.randint(0, len(patches)-1)
69 img1 = patches[idx]
70 img2 = patches[idx]
71 label = 1
72 return img1, img2, label
73
74 def create_neg_pair(patches):
75 idx = random.sample(range(0, len(patches)-1), k=2)
76 img1 = patches[idx[0]]
77 img2 = patches[idx[1]]
78 label = 0
79 return img1, img2, label
80
81 def get_img(img):
82 patches = divide_patches(img, 3, 2)
83
84 if random.random() > 0.5:
85 img1, img2, label = create_pos_pair(patches)
86 else:
87 img1, img2, label = create_neg_pair(patches)
88
89 return img1, img2, label
90
91 res = []
92 for i in range(10):
93 img1, img2, label = get_img(image)
94 flag = False
95 if img1 == img2:
96 flag = True
97 res.append([flag, label])
98
99 print(res) | 22 - warning: redefined-outer-name
27 - warning: redefined-outer-name
38 - refactor: too-many-arguments
38 - refactor: too-many-positional-arguments
53 - warning: redefined-outer-name
53 - warning: redefined-outer-name
41 - warning: unused-argument
41 - warning: unused-argument
69 - warning: redefined-outer-name
70 - warning: redefined-outer-name
71 - warning: redefined-outer-name
76 - warning: redefined-outer-name
77 - warning: redefined-outer-name
78 - warning: redefined-outer-name
81 - warning: redefined-outer-name
85 - warning: redefined-outer-name
85 - warning: redefined-outer-name
85 - warning: redefined-outer-name
|
1 functionalities = {
2 'Login': 'Login page',
3 'Feedback': 'This feedback form',
4 'Todo': 'To do module',
5 'Projects': 'Anything related to projects',
6 'Code': 'Code editor',
7 'Forum': 'The forum',
8 'Profile': 'Your profile page',
9 }
| Clean Code: No Issues Detected
|
1 from flask_testing import TestCase
2
3 from models.shared import db
4 from models.model import User, Task, Project, Question, Response, Questionnaire
5 from turing import create_app
6
7 import unittest
8
9
10 class MyTest(TestCase):
11 def create_app(self):
12 config = {
13 'SQLALCHEMY_DATABASE_URI': 'sqlite:///test.db',
14 'TESTING': True,
15 'SECRET_KEY': 'secret',
16 'SQLALCHEMY_TRACK_MODIFICATIONS': True
17 }
18 return create_app(config)
19
20 def setUp(self):
21 db.create_all()
22
23 def tearDown(self):
24 db.session.remove()
25 db.drop_all()
26
27 def test_nothing(self):
28 assert True
29
30 def test_user(self):
31 user = User(email='em', name='us', password='pass')
32 db.session.add(user)
33 db.session.commit()
34 assert user in db.session
35
36 def test_project(self):
37 project = Project(name='n',description='desc')
38 db.session.add(project)
39 db.session.commit()
40 assert project in db.session
41
42 def test_task(self):
43 task = Task(name='n', description='desc')
44 db.session.add(task)
45 db.session.commit()
46 assert task in db.session
47
48 def test_usr_add_tsk2_prj(self):
49 user = User(email='em', name='us', password='pass')
50 db.session.add(user)
51 db.session.commit()
52
53 project = Project(name='n',description='desc')
54 db.session.add(project)
55 user.project.append(project)
56 db.session.commit()
57
58 project: Project= User.query.filter_by(email='em').first().project[0]
59
60 task = Task(name='n', description='desc')
61 db.session.add(task)
62
63 project.tasks.append(task)
64 db.session.commit()
65
66 assert user.project[0].tasks[0] == task
67
68
69 def test_sub_tasks(self):
70 task = Task(name='n', description='desc')
71 db.session.add(task)
72 assert task in db.session
73
74 s_task = Task(name='n', description='desc')
75 db.session.add(s_task)
76 assert task in db.session
77
78 db.session.commit()
79 task.tasks.append(s_task)
80
81
82 db.session.commit()
83 assert task.tasks[0] == s_task
84
85 def test_questionnaire(self):
86 questionnaire = Questionnaire(name='Questions')
87 db.session.add(questionnaire)
88
89 question0 = Question(text="ola ?", questionnaire=questionnaire)
90 question1 = Question(text="tudo bem ?", questionnaire=questionnaire)
91
92 questionnaire.questions.append(question0)
93 questionnaire.questions.append(question1)
94
95 for i in range(10):
96 question0.responses.append(Response(rating=5,question=question0))
97
98 for i in range(10):
99 question1.responses.append(Response(rating=5,question=question1))
100
101 rs = [x.rating for x in questionnaire.questions[0].responses]
102 assert sum(rs)/len(rs) == 5
103
104 rs = [x.rating for x in questionnaire.questions[1].responses]
105 assert sum(rs)/len(rs) == 5
106
107
108
109
110
111
112 if __name__ == '__main__':
113 unittest.main()
| 95 - warning: unused-variable
|
1 import requests
2 import json
3 import time
4 from sqlalchemy import create_engine
5 from sqlalchemy.orm import sessionmaker
6
7 from poolModels import pool, poolBase
8
9 engine = create_engine('sqlite:///poolData.db')
10 # Bind the engine to the metadata of the Base class so that the
11 # declaratives can be accessed through a DBSession instance
12 poolBase.metadata.bind = engine
13
14 DBSession = sessionmaker(bind=engine)
15 # A DBSession() instance establishes all conversations with the database
16 # and represents a "staging zone" for all the objects loaded into the
17 # database session object. Any change made against the objects in the
18 # session won't be persisted into the database until you call
19 # session.commit(). If you're not happy about the changes, you can
20 # revert all of them back to the last commit by calling
21 # session.rollback()
22 session = DBSession()
23
24 # Insert a Person in the person table
25 new_pool = pool(url='http://pool.conceal.network/api/live_stats', name='Official Pool', type="normal", poolurl='https://pool.conceal.network')
26 session.add(new_pool)
27 new_pool = pool(url='https://ccx.scecf.org:21001/live_stats', name='SCECF', type="normal", poolurl='https://ccx.scecf.org')
28 session.add(new_pool)
29 new_pool = pool(url='https://ccx.bluerockpools.net:8119/live_stats', name='Blue Rock Pool', type="normal", poolurl='https://ccx.bluerockpools.net')
30 session.add(new_pool)
31 new_pool = pool(url='http://minexmr24.ru:8124/live_stats', name='CCX Майнинг пул', type="normal", poolurl='http://ccx.minexmr24.ru')
32 session.add(new_pool)
33 new_pool = pool(url='https://ccx.go-mine.it/api/pool/stats', name='go mine it!', type="node", poolurl='https://ccx.go-mine.it')
34 session.add(new_pool)
35 new_pool = pool(url='https://api.ccx.heigh-ho.funkypenguin.co.nz/live_stats', name='Funky Penguin', type="normal", poolurl='https://ccx.heigh-ho.funkypenguin.co.nz')
36 session.add(new_pool)
37 new_pool = pool(url='https://conceal.herominers.com/api/stats', name='herominers', type="normal", poolurl='https://conceal.herominers.com')
38 session.add(new_pool)
39 new_pool = pool(url='https://ccx.thepiratemine.nl:2890/live_stats', name='ThePirateMine', type="normal", poolurl='https://ccx.thepiratemine.nl')
40 session.add(new_pool)
41 session.commit()
42
| 1 - warning: unused-import
2 - warning: unused-import
3 - warning: unused-import
|
1 import random
2 import requests
3 import sys
4 import discord
5 import binascii
6 import json
7 from collections import deque
8 from jsonrpc_requests import Server
9
10 from models import Transaction, TipJar
11
12 config = json.load(open('config.json'))
13
14 class CCXServer(Server):
15 def dumps(self, data):
16 data['password'] = config['rpc_password']
17 return json.dumps(data)
18
19 rpc = CCXServer("http://{}:{}/json_rpc".format(config['rpc_host'], config['rpc_port']))
20 daemon = CCXServer("http://{}:{}/json_rpc".format(config['daemon_host'], config['daemon_port']))
21 CONFIRMED_TXS = []
22
23
24 def get_supply():
25 lastblock = daemon.getlastblockheader()
26 bo = daemon.f_block_json(hash=lastblock["block_header"]["hash"])
27 return float(bo["block"]["alreadyGeneratedCoins"])/1000000
28
29
30 def format_hash(hashrate):
31 i = 0
32 byteUnits = [" H", " KH", " MH", " GH", " TH", " PH"]
33 while (hashrate > 1000):
34 hashrate = hashrate / 1000
35 i = i+1
36 return "{0:,.2f} {1}".format(hashrate, byteUnits[i])
37
38
39 def gen_paymentid(address):
40 rng = random.Random(address+config['token'])
41 length = 32
42 chunk_size = 65535
43 chunks = []
44 while length >= chunk_size:
45 chunks.append(rng.getrandbits(chunk_size * 8).to_bytes(chunk_size, sys.byteorder))
46 length -= chunk_size
47 if length:
48 chunks.append(rng.getrandbits(length * 8).to_bytes(length, sys.byteorder))
49 result = b''.join(chunks)
50
51 return "".join(map(chr, binascii.hexlify(result)))
52
53
54 def get_deposits(session):
55 # get the current block height
56 # we only want to insert tx after 10 blocks from the tx
57 data = daemon.getlastblockheader()
58 height = int(data["block_header"]["height"])
59 print("INFO: Current blockchain height is {}".format(height))
60 # scan for deposits
61 print("scanning the blockchain for deposits")
62 print("getting list of payment id's in the tipjar database")
63 allPID = session.query(TipJar).all()
64 thePID = 0
65 totalPID = len(allPID)
66 for thePID in range(0,totalPID):
67 currentPID = allPID[thePID].paymentid
68 print("INFO: checking PID {}".format(currentPID))
69 params = {"payment_id": currentPID}
70 data = rpc.get_payments(params)
71 #go through each transaction and them to the confirmed transactions array
72 for tx in data['payments']:
73 unlockWindow = int(tx["block_height"]) + 10
74 if tx['tx_hash'] in CONFIRMED_TXS: # if its already there, ignore it
75 continue
76 if unlockWindow < height: # its a confirmed and unlocked transaction
77 CONFIRMED_TXS.append({'transactionHash': tx['tx_hash'],'amount': tx['amount'], 'ready':True, 'pid':currentPID})
78 print("CONF: confirmed tx {} for {} ccx at block {}".format(tx['tx_hash'],tx['amount'],tx['block_height']))
79 else :
80 toUnlock = unlockWindow - height
81 print("UNCF: unconfirmed tx {} for {} ccx will unlock in {} blocks".format(tx['tx_hash'],tx['amount'],toUnlock))
82 for i,trs in enumerate(CONFIRMED_TXS): #now we go through the array of all transactions from our registered users
83 processed = session.query(Transaction).filter(Transaction.tx == trs['transactionHash']).first()
84 amount = 0
85 print("INFO: looking at tx: " + trs['transactionHash'])
86 if processed: # done already, lets ignore and remove it from the array
87 print("INFO: already processed: " + trs['transactionHash'])
88 CONFIRMED_TXS.pop(i)
89 continue
90 likestring = trs['pid']
91 balance = session.query(TipJar).filter(TipJar.paymentid.contains(likestring)).first() #get the balance from that PID
92 print("INFO: Balance for pid {} is: {}".format(likestring,balance))
93 if not balance:
94 print("user does not exist!")
95 continue
96 amount = trs['amount']
97 change = 0
98 if trs['pid']==balance.paymentid: # money entering tipjar, add to user balance
99 print("UPDATE: deposit of {} to PID {}".format(amount,balance.paymentid))
100 change += amount
101 try:
102 balance.amount += change
103 except:
104 print("no balance, setting balance to: {}".format(change))
105 balance.amount = change
106 print("new balance: {}".format(balance.amount))
107 session.commit()
108 if balance:
109 nt = Transaction(trs['transactionHash'], change, trs['pid'])
110 CONFIRMED_TXS.pop(i)
111 yield nt
112
113
114 def get_fee(amount):
115 return 100
116
117
118 def build_transfer(amount, transfers, balance):
119 print("SEND PID: {}".format(balance.paymentid[0:58] + balance.withdraw))
120 params = {
121 'fee': get_fee(amount),
122 'paymentId': balance.paymentid[0:58] + balance.withdraw,
123 'mixin': 3,
124 'destinations': transfers
125 }
126 return params
127
128
129 REACTION_AMP_CACHE = deque([], 500)
130
131
132 def reaction_tip_lookup(message):
133 for x in REACTION_AMP_CACHE:
134 if x['msg'] == message:
135 return x
136
137
138 def reaction_tip_register(message, user):
139 msg = reaction_tip_lookup(message)
140 if not msg:
141 msg = {'msg': message, 'tips': []}
142 REACTION_AMP_CACHE.append(msg)
143
144 msg['tips'].append(user)
145
146 return msg
147
148
149 def reaction_tipped_already(message, user):
150 msg = reaction_tip_lookup(message)
151 if msg:
152 return user in msg['tips']
| 12 - refactor: consider-using-with
12 - warning: unspecified-encoding
14 - refactor: too-few-public-methods
54 - refactor: too-many-locals
103 - warning: bare-except
54 - refactor: too-many-statements
114 - warning: unused-argument
132 - refactor: inconsistent-return-statements
149 - refactor: inconsistent-return-statements
2 - warning: unused-import
4 - warning: unused-import
|
1 #coding: utf-8
2 '''
3 HW2.py is used to solve eight queens puzzle,
4 you can change the size number to resize the board.
5
6 change:
7 1,draw pieces with special operator not XOR but SOURCE
8 2,long string can write like """....""
9 3,format for str have symbol{} so use %d
10
11 '''
12 import itertools,cairo,math
13
14 size=8 #the size of the board
15 cnt=0 #number of right answer
16
17 #check whether it is a right step or not
18 def conflict(state, nextX):
19 nextY = len(state)
20 for i in range(nextY):
21 if abs(nextX-state[i])== nextY-i:
22 return True
23 return False
24
25 def drawqueen(solution):
26 surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,size*100,size*100)
27 ctx=cairo.Context(surface)
28
29 #draw the board
30 for i in range(0,size):
31 for j in range(0,size):
32 if (i+j)%2==1:
33 ctx.rectangle(i*100,j*100,100,100)
34 ctx.fill()
35
36 #draw the pieces
37 ctx.set_line_width(10)
38 ctx.set_source_rgb(1, 1, 0.2)
39 #change1
40 ctx.set_operator(cairo.OPERATOR_SOURCE)
41 for i in range(size):
42 ctx.arc(solution[i]*100+50,i*100+50,35,0,2*math.pi)
43 ctx.stroke()
44
45 filename="chess"+str(cnt)+".png"
46 surface.write_to_png(filename)
47
48
49
50 #remove the solutions in same cols
51 for solve in itertools.permutations(range(size)):
52 flag=0
53 for i in range(1,size):
54 if conflict(solve[0:i],solve[i]):
55 break
56 else:
57 flag+=1
58 if flag==size-1:
59 cnt+=1
60 drawqueen(solve)
61
62
63
64 #make a tex ducument to generate pdf
65 f=open("mkreport.tex",'w')
66
67 #change2
68 f.write("""\documentclass[twocolumn]{article}
69 \usepackage{graphicx}
70 \\title{A Report About Eight Queens Puzzle}\n
71 \\begin{document}
72 \maketitle\n
73 """)
74
75 #change3
76 for i in range(1,cnt+1):
77 f.write("""
78 \\begin{figure}[t]
79 \centering
80 \includegraphics[width=0.3\\textwidth]{chess%d.png}
81 \caption{Sulution %d of Eight Queens Puzzle}
82 \end{figure}\n""" % (i,i) )
83
84 if i%6==0:
85 f.write('\n\clearpage\n')
86
87 f.write('\n\end{document}')
88 f.close()
| 73 - error: syntax-error
|
1 #coding=utf-8
2 '''
3 use re to
4 caul the num of the words
5 alice=ALICE
6
7 change:
8 use these function
9 1,with open(...) as f:
10 2,content = f.read()
11 3,allwords = finditer( ... content ... )
12 finditer is iter, findall is list
13 4,all_lower_words = imap(str.lower, allwords)
14 5,count = Counter(all_lower_words)
15 much butter than build a empty dict
16 '''
17 import re,math
18 import itertools
19 import collections
20 from operator import itemgetter
21
22
23 with open('alice.txt',"rt") as f:
24 content=f.read()
25
26 allwords=re.findall(r'[a-zA-Z]+',content)
27 #if i use find finditer ,i cannot use imap,allwords is a list
28 all_lower_words = itertools.imap(str.lower, allwords)
29 count = collections.Counter(all_lower_words)
30
31
32 #dict sort method 1: change key and value
33 #cntSorted=dict((v,k) for k,v in cnt.iteritems())
34 #cntSorted.sort()
35 #important and not be neglected
36 #print list(cntSorted.iteritems())[-10:]
37
38 #dict sort method 2: use lambda
39 #cntSorted=sorted(count.iteritems(),key=lambda d:d[1],reverse=True)
40 #print cntSorted[0:10]
41
42 #dict sort method 3: use operator
43 cntSorted=sorted(count.iteritems(),key=itemgetter(1),reverse=True)
44 print cntSorted[0:10]
45
46
47
48 #draw a pic
49 import matplotlib.pyplot as plt
50
51 #plt.bar(range(20), [cntSorted[i][1] for i in range(20)])
52 #plt.xticks(range(20), [cntSorted[i][0] for i in range(20)],rotation=30)
53
54 length=len(cntSorted)
55 plt.plot(range(length), [math.log(cntSorted[i][1],10) for i in range(length)])
56 plt.title(u"WordFrequencyAnalysis-zipf")
57 plt.show() | 44 - error: syntax-error
|
1 # Generated by Django 3.0.3 on 2020-06-04 18:56
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('dimensoes', '0013_remove_dimensaomodel_profundidade_media'),
10 ]
11
12 operations = [
13 migrations.AddField(
14 model_name='dimensaomodel',
15 name='profundidade_media',
16 field=models.CharField(default=0, max_length=25),
17 ),
18 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.3 on 2020-06-18 18:20
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('dimensoes', '0018_auto_20200611_1905'),
10 ]
11
12 operations = [
13 migrations.AlterField(
14 model_name='clientemodel',
15 name='numero_casa',
16 field=models.CharField(blank=True, max_length=10),
17 ),
18 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.3 on 2020-06-04 20:10
2
3 from django.db import migrations
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('dimensoes', '0014_dimensaomodel_profundidade_media'),
10 ]
11
12 operations = [
13 migrations.RemoveField(
14 model_name='dimensaomodel',
15 name='construcao',
16 ),
17 migrations.RemoveField(
18 model_name='dimensaomodel',
19 name='contra_piso',
20 ),
21 migrations.RemoveField(
22 model_name='dimensaomodel',
23 name='escavacao',
24 ),
25 migrations.RemoveField(
26 model_name='dimensaomodel',
27 name='instalacao_vinil',
28 ),
29 migrations.RemoveField(
30 model_name='dimensaomodel',
31 name='isomanta_m2',
32 ),
33 migrations.RemoveField(
34 model_name='dimensaomodel',
35 name='perfil_fixo_m',
36 ),
37 migrations.RemoveField(
38 model_name='dimensaomodel',
39 name='preco',
40 ),
41 migrations.RemoveField(
42 model_name='dimensaomodel',
43 name='produto',
44 ),
45 migrations.RemoveField(
46 model_name='dimensaomodel',
47 name='remocao_terra',
48 ),
49 migrations.RemoveField(
50 model_name='dimensaomodel',
51 name='vinil_m2',
52 ),
53 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.3 on 2020-03-17 17:11
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('dimensoes', '0004_auto_20200317_0933'),
10 ]
11
12 operations = [
13 migrations.AddField(
14 model_name='dimensaomodel',
15 name='data',
16 field=models.DateTimeField(blank=True, null=True),
17 ),
18 ]
| 6 - refactor: too-few-public-methods
|
1 # Generated by Django 3.0.3 on 2020-05-16 18:18
2
3 from django.db import migrations, models
4
5
6 class Migration(migrations.Migration):
7
8 dependencies = [
9 ('dimensoes', '0010_auto_20200511_1521'),
10 ]
11
12 operations = [
13 migrations.AlterField(
14 model_name='clientemodel',
15 name='telefone',
16 field=models.IntegerField(blank=True, max_length=15),
17 ),
18 ]
| 6 - refactor: too-few-public-methods
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.