hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9579e725a92b212adbfbee1f939f56455d5e30da | 22 | py | Python | nextfeed/settings/__init__.py | Nurdok/nextfeed | 197818310bbf7134badc2ef5ed11ab5ede7fdb35 | [
"MIT"
] | 1 | 2015-08-09T10:42:04.000Z | 2015-08-09T10:42:04.000Z | nextfeed/settings/__init__.py | Nurdok/nextfeed | 197818310bbf7134badc2ef5ed11ab5ede7fdb35 | [
"MIT"
] | null | null | null | nextfeed/settings/__init__.py | Nurdok/nextfeed | 197818310bbf7134badc2ef5ed11ab5ede7fdb35 | [
"MIT"
] | null | null | null | __author__ = 'Rachum'
| 11 | 21 | 0.727273 |
957ac7e6d29caaecedbbbd4e6c92497096862e51 | 10,072 | py | Python | croisee/croisee/models.py | fiee/croisee | 922a163b627855468aac84e0c56ea51082424732 | [
"BSD-3-Clause"
] | 6 | 2017-09-06T02:03:36.000Z | 2021-07-11T15:06:29.000Z | croisee/croisee/models.py | fiee/croisee | 922a163b627855468aac84e0c56ea51082424732 | [
"BSD-3-Clause"
] | null | null | null | croisee/croisee/models.py | fiee/croisee | 922a163b627855468aac84e0c56ea51082424732 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import absolute_import
import unicodedata
import re, os
import logging
from django.utils.translation import ugettext_lazy as _
from django.db import models
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.auth.models import User
logger = logging.getLogger(settings.PROJECT_NAME)
REPLACEMENTS = (
# international characters that need more than just stripping accents
('', 'AE'),
('', 'OE'),
('', 'UE'),
('', 'SS'),
('', 'OE'),
('', 'AE'),
('', 'OE'),
)
reASCIIonly = re.compile(r'[^A-Z]', re.I)
reCleanInput = re.compile(r'[^\w_%\?\*]', re.I)
def splitwordline(line):
"""
a line from a wordlist may contain word, description and priority, separated by tabs
if description and priority are missing, default is the word and 0
"""
parts = line.replace('\n','').split('\t')
if len(parts)==1:
parts.extend([parts[0],0])
elif len(parts)==2:
parts.append(0)
elif len(parts)>3:
parts = parts[0:2]
if len(parts[1])<2:
parts[1] = parts[0]
try:
parts[2] = int(parts[2])
except ValueError as ex:
parts[2] = 0
parts[0] = cleanword(parts[0])
return parts
PUZZLE_TYPES = (
('d', _('default crossword puzzle with black squares')), # numbers and black squares in grid. only possible type ATM
('b', _('crossword puzzle with bars (no squares)')),
('s', _('Swedish crossword puzzle (questions in squares)')), # default in most magazines
# other...
)
| 43.601732 | 196 | 0.642276 |
957b9b53b7b5837fb4e6e2e80f7b80d9f1347ef1 | 5,372 | py | Python | tests/views/userprofile/forms_test.py | BMeu/Aerarium | 119946cead727ef68b5ecea339990d982c006391 | [
"MIT"
] | null | null | null | tests/views/userprofile/forms_test.py | BMeu/Aerarium | 119946cead727ef68b5ecea339990d982c006391 | [
"MIT"
] | 139 | 2018-12-26T07:54:31.000Z | 2021-06-01T23:14:45.000Z | tests/views/userprofile/forms_test.py | BMeu/Aerarium | 119946cead727ef68b5ecea339990d982c006391 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from unittest import TestCase
from flask_login import login_user
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms import ValidationError
from app import create_app
from app import db
from app.configuration import TestConfiguration
from app.localization import get_language_names
from app.userprofile import User
from app.views.userprofile.forms import UniqueEmail
from app.views.userprofile.forms import UserSettingsForm
| 27.690722 | 92 | 0.625838 |
957d235d1750094b4270c5454f14c28d2e8173f1 | 769 | py | Python | Post/migrations/0002_auto_20201110_0901.py | singh-sushil/minorproject | 02fe8c1dce41109447d5f394bb37e10cb34d9316 | [
"MIT"
] | 2 | 2020-12-27T11:28:02.000Z | 2021-01-04T07:52:38.000Z | Post/migrations/0002_auto_20201110_0901.py | singh-sushil/minorproject | 02fe8c1dce41109447d5f394bb37e10cb34d9316 | [
"MIT"
] | 1 | 2020-12-26T13:36:12.000Z | 2020-12-26T13:36:12.000Z | Post/migrations/0002_auto_20201110_0901.py | singh-sushil/minorproject | 02fe8c1dce41109447d5f394bb37e10cb34d9316 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.1 on 2020-11-10 03:16
import django.core.validators
from django.db import migrations, models
| 30.76 | 224 | 0.594278 |
957e510be8f3a2b81dab14d254545719454d7bb3 | 2,714 | py | Python | About.py | pm-str/CountDown-More | 90eed19b3d5e417d474f1d79e07c6740f5a9a53d | [
"MIT"
] | null | null | null | About.py | pm-str/CountDown-More | 90eed19b3d5e417d474f1d79e07c6740f5a9a53d | [
"MIT"
] | null | null | null | About.py | pm-str/CountDown-More | 90eed19b3d5e417d474f1d79e07c6740f5a9a53d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'About.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 49.345455 | 140 | 0.70339 |
957eae3da3f74babe3abba60f328832ad8f0ef04 | 948 | py | Python | userprofile/migrations/0001_initial.py | jmickela/stalkexchange | 2182fcdfb716dbe3c227c83ac52c567331cc9e73 | [
"Apache-2.0"
] | null | null | null | userprofile/migrations/0001_initial.py | jmickela/stalkexchange | 2182fcdfb716dbe3c227c83ac52c567331cc9e73 | [
"Apache-2.0"
] | 10 | 2020-06-05T17:05:48.000Z | 2022-03-11T23:13:08.000Z | userprofile/migrations/0001_initial.py | jmickela/stalkexchange | 2182fcdfb716dbe3c227c83ac52c567331cc9e73 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
| 36.461538 | 157 | 0.635021 |
957f17448a40b5f7a9697897e18e53b84546771d | 1,244 | py | Python | DML.py | WellingtonFSouza1/SGI | 89746bc1d9745931fd9b451e575b92c8197fcc65 | [
"Apache-2.0"
] | null | null | null | DML.py | WellingtonFSouza1/SGI | 89746bc1d9745931fd9b451e575b92c8197fcc65 | [
"Apache-2.0"
] | null | null | null | DML.py | WellingtonFSouza1/SGI | 89746bc1d9745931fd9b451e575b92c8197fcc65 | [
"Apache-2.0"
] | null | null | null | import pymysql
conexao = pymysql.connect(
host='localhost',
user='root',
password='admin1234',
db='ex_01')
cursor = conexao.cursor(pymysql.cursors.DictCursor)
| 18.848485 | 90 | 0.568328 |
9581ac185297ca50496beb710a3edddd006be6af | 6,792 | py | Python | misc_scripts/downsample.py | rajesh-ibm-power/MITObim | 5d617054975a0e30e0f6c6fb88d21862eaae238f | [
"MIT"
] | 81 | 2015-01-21T21:48:20.000Z | 2022-03-22T12:43:50.000Z | misc_scripts/downsample.py | rajesh-ibm-power/MITObim | 5d617054975a0e30e0f6c6fb88d21862eaae238f | [
"MIT"
] | 47 | 2015-02-16T22:53:00.000Z | 2021-12-16T20:38:17.000Z | misc_scripts/downsample.py | rajesh-ibm-power/MITObim | 5d617054975a0e30e0f6c6fb88d21862eaae238f | [
"MIT"
] | 37 | 2015-01-29T07:34:32.000Z | 2022-03-17T07:20:00.000Z | #!/usr/bin/python
"""downsample
Author: Christoph Hahn (christoph.hahn@uni-graz.at)
February 2017
Extract a random subsample of ~ x % reads from fastq data.
The choice is based on a random number generator. For each fastq read, a random number between 1-100 will be generated. If the random number is smaller than the desired proportion in percent, the read will be kept, otherwise it will be discarded. So to extract ~15 % of the reads any read that gets a random number of <=15 will be kept, which will result in roughly 15% of the reads.
Subsamples can be taken from several fastq files at the same time. We allow to input paired end data in two separate files. If so specified subsamples will be taken so that the pairs will remain intact and the ouptut will be given in interleaved format.
Input fastq files can be compressed with gzipped. Mixed compressed / non-compressed input is possible except in the case of paired end data. In this case both read files need to be either compressed or non-compressed.
Examples:
# sample ~20 % of reads from three files
downsample.py -s 20 -r test.fastq.gz -r test2.fastq -r test3.fastq.gz > test.subsample_20.fastq
# sample ~30 % of reads from two files, and interleave reads from the two files on the fly
downsample.py -s 30 --interleave -r test_R1.fastq.gz -r test_R2.fastq.gz > test.interleaved.subsample_30.fastq
# sample ~40 % of reads from three files, defining a seed for the random number generator, to allow replication of the process.
downsample.py -s 20 --rand -421039 -r test.fastq.gz -r test2.fastq -r test3.fastq.gz > test.subsample_40.fastq
# sample ~20 % of reads from two files, compressing results on the fly.
downsample.py -s 20 -r test.fastq.gz -r test2.fastq | gzip > test.subsample_20.fastq.gz
"""
import sys
# import re
# import random
if __name__ == '__main__':
sys.exit(main())
| 36.12766 | 383 | 0.6197 |
9581c71bce4ce0b38517044c9d5a2c496d783a78 | 585 | py | Python | find_nb.py | DemetriusStorm/100daysofcode | ce87a596b565c5740ae3c48adac91cba779b3833 | [
"MIT"
] | null | null | null | find_nb.py | DemetriusStorm/100daysofcode | ce87a596b565c5740ae3c48adac91cba779b3833 | [
"MIT"
] | null | null | null | find_nb.py | DemetriusStorm/100daysofcode | ce87a596b565c5740ae3c48adac91cba779b3833 | [
"MIT"
] | null | null | null | """
Your task is to construct a building which will be a pile of n cubes.
The cube at the bottom will have a volume of n^3, the cube above will have volume of (n-1)^3 and so on until the top
which will have a volume of 1^3.
You are given the total volume m of the building. Being given m can you find the number n of cubes you will have to
build?
The parameter of the function findNb (find_nb, find-nb, findNb) will be an integer m and you have to return the integer
n such as n^3 + (n-1)^3 + ... + 1^3 = m if such a n exists or -1 if there is no such n.
"""
| 45 | 119 | 0.711111 |
9582a4c6372ffccedd8c93f53707273fd3fe596d | 4,597 | py | Python | src/__main__.py | BennyWestsyde/FakeNewsDetection | 8b171f2c93d0849e13c9ea6d94b784caf037a3bb | [
"BSD-3-Clause"
] | null | null | null | src/__main__.py | BennyWestsyde/FakeNewsDetection | 8b171f2c93d0849e13c9ea6d94b784caf037a3bb | [
"BSD-3-Clause"
] | 16 | 2021-04-29T14:22:46.000Z | 2021-05-21T04:02:02.000Z | src/__main__.py | BennyWestsyde/FakeNewsDetection | 8b171f2c93d0849e13c9ea6d94b784caf037a3bb | [
"BSD-3-Clause"
] | 2 | 2021-04-09T16:39:45.000Z | 2021-05-02T19:39:32.000Z | """Class to handle iterating through tweets in real time."""
import json
import os
import pandas as pd
# Said this was unused.
# from bluebird import BlueBird
from bluebird.scraper import BlueBird
from sentiment import PoliticalClassification
from train import TrainingML
col_names32 = "created_at,id,id_str,full_text,truncated,display_text_range,entities,source,in_reply_to_status_id,in_reply_to_status_id_str,in_reply_to_user_id,in_reply_to_user_id_str,in_reply_to_screen_name,user_id,user_id_str,geo,coordinates,place,contributors,is_quote_status,retweet_count,favorite_count,conversation_id,conversation_id_str,favorited,retweeted,possibly_sensitive,possibly_sensitive_editable,lang,supplemental_language,,self_thread"
# api = TwitterClient()
# trained_model = TrainingML()
# sentiment = PoliticalClassification()
user_results = "../data/results.csv"
def search_term():
"""Using a user-specified keyword to find related tweets."""
index = 0
searching = input("Enter a term to search. \n")
query = {
'fields': [
{'items': [searching]},
]
}
for tweet in BlueBird().search(query):
index += 1
with open('../data/temp.json', 'w') as temp:
json.dump(tweet, temp)
df = pd.read_json('../data/temp.json', lines=True)
with open(user_results, 'a') as f:
df.to_csv(f, header=None, index=False)
if index == 50:
dummy_file = user_results + '.bak'
with open(user_results, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
write_obj.write(col_names32 + '\n')
for line in read_obj:
write_obj.write(line)
os.remove(user_results)
os.rename(dummy_file, user_results)
break
def search_hashtag():
""""Using a user-specified hashtag to find related tweets."""
index = 0
searching = input("Enter a hashtag to search. \n")
query = {
'fields': [
{'items': [searching], 'target':'hashtag'},
]
}
for tweet in BlueBird().search(query):
index += 1
with open('data/temp.json', 'w') as temp:
json.dump(tweet, temp)
df = pd.read_json('data/temp.json', lines=True)
with open(user_results, 'a') as f:
df.to_csv(f, header=None, index=False)
if index == 50:
dummy_file = user_results + '.bak'
with open(user_results, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
write_obj.write(col_names32 + '\n')
for line in read_obj:
write_obj.write(line)
os.remove(user_results)
os.rename(dummy_file, user_results)
break
def search_user():
"""Using a user-specified username to find related tweets."""
index = 0
searching = input("Enter a user to search. \n")
query = {
'fields': [
{'items': [searching], 'target':'from'},
]
}
for tweet in BlueBird().search(query):
index += 1
with open('data/temp.json', 'w') as temp:
json.dump(tweet, temp)
df = pd.read_json('data/temp.json', lines=True)
with open(user_results, 'a') as f:
df.to_csv(f, header=None, index=False)
if index == 50:
dummy_file = user_results + '.bak'
with open(user_results, 'r') as read_obj, open(dummy_file, 'w') as write_obj:
write_obj.write(col_names32 + '\n')
for line in read_obj:
write_obj.write(line)
os.remove(user_results)
os.rename(dummy_file, user_results)
break
def main():
"""Main method to give selection options."""
try:
os.remove('../results.csv')
os.remove('../temp.csv')
except:
print()
print("Welcome to the Fake News Dection Program! \n")
print("Would you like to search by:\nkeyword\nhashtag\nuser")
done = False
while done == False:
choice = input("keyword/hashtag/user: ")
if choice == "keyword":
search_term()
done = True
elif choice == "hashtag":
search_hashtag()
done = True
elif choice == "user":
search_user()
done = True
else:
print("Sorry, Bad Input. Please Enter One of the Options Below")
done = False
try:
os.remove('data/temp.json')
except:
print()
if __name__ == '__main__':
# calls main function
main()
| 32.146853 | 450 | 0.591255 |
9583bb525f9a10680502ac52b441c849a250aefe | 2,107 | py | Python | cdk-cross-stack-references/app.py | MauriceBrg/aws-blog.de-projects | ce0e86ccdd845c68c41d9190239926756e09c998 | [
"MIT"
] | 36 | 2019-10-01T12:19:49.000Z | 2021-09-11T00:55:43.000Z | cdk-cross-stack-references/app.py | MauriceBrg/aws-blog.de-projects | ce0e86ccdd845c68c41d9190239926756e09c998 | [
"MIT"
] | 2 | 2021-06-02T00:19:43.000Z | 2021-06-02T00:51:48.000Z | cdk-cross-stack-references/app.py | MauriceBrg/aws-blog.de-projects | ce0e86ccdd845c68c41d9190239926756e09c998 | [
"MIT"
] | 29 | 2019-07-23T04:05:15.000Z | 2021-08-12T14:36:57.000Z | #!/usr/bin/env python3
import aws_cdk.aws_iam as iam
import aws_cdk.aws_s3 as s3
from aws_cdk import core
app = core.App()
export = ExportingStack(app, "export")
ImportingStack(
app,
"import",
role_a=export.exported_role_a,
role_b=export.exported_role_b
)
app.synth()
| 25.385542 | 106 | 0.570954 |
9583cbd4d2fe5cf7c96a5c027ce0ed71ff87cf28 | 6,344 | py | Python | test_trustpaylib.py | beezz/trustpaylib | a56d12d6ff97ad02034d85940ec09abbfe9eba76 | [
"BSD-3-Clause"
] | null | null | null | test_trustpaylib.py | beezz/trustpaylib | a56d12d6ff97ad02034d85940ec09abbfe9eba76 | [
"BSD-3-Clause"
] | null | null | null | test_trustpaylib.py | beezz/trustpaylib | a56d12d6ff97ad02034d85940ec09abbfe9eba76 | [
"BSD-3-Clause"
] | 1 | 2016-05-27T07:12:47.000Z | 2016-05-27T07:12:47.000Z | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
import pytest
import trustpaylib
try:
unicode
py3 = False
except NameError:
py3 = True
unicode = lambda s: s
| 29.784038 | 75 | 0.576135 |
9584860203f1962d57d77ed27e2fa1c1d418bbe7 | 606 | py | Python | Day 01/AdventOfCode01.py | KelvinFurtado/Advent-of-Code-2020 | 7aab4d542507222ef6aaef699d16cc1e2936e1d5 | [
"MIT"
] | null | null | null | Day 01/AdventOfCode01.py | KelvinFurtado/Advent-of-Code-2020 | 7aab4d542507222ef6aaef699d16cc1e2936e1d5 | [
"MIT"
] | null | null | null | Day 01/AdventOfCode01.py | KelvinFurtado/Advent-of-Code-2020 | 7aab4d542507222ef6aaef699d16cc1e2936e1d5 | [
"MIT"
] | null | null | null | inputfile = open('inputDay01.txt', 'r')
values = [int(i) for i in inputfile.readlines()]
#PART1
#PART2
print("Part1:",aoc01(values,2020))
print("Part2:",aoc02(values,2020))
inputfile.close() | 25.25 | 48 | 0.531353 |
9584cb7682c9b757f9f395cf4af9a536e43da394 | 1,686 | py | Python | src/backend/api/views/auth_views.py | zackramjan/motuz | 892252eb50acbd8135bf9df9872df5e4cfe6277b | [
"MIT"
] | 84 | 2019-05-10T14:56:48.000Z | 2022-03-19T17:07:24.000Z | src/backend/api/views/auth_views.py | zackramjan/motuz | 892252eb50acbd8135bf9df9872df5e4cfe6277b | [
"MIT"
] | 226 | 2019-05-28T21:59:22.000Z | 2022-03-09T10:58:24.000Z | src/backend/api/views/auth_views.py | zackramjan/motuz | 892252eb50acbd8135bf9df9872df5e4cfe6277b | [
"MIT"
] | 16 | 2019-09-27T01:35:49.000Z | 2022-03-08T16:18:50.000Z | import logging
from flask import request
from flask_restplus import Resource, Namespace, fields
from ..managers import auth_manager
from ..managers.auth_manager import token_required
from ..exceptions import HTTP_EXCEPTION
api = Namespace('auth', description='Authentication related operations')
dto = api.model('auth', {
'username': fields.String(required=True, description='The (Linux) username'),
'password': fields.String(required=True, description='The user password'),
})
| 29.068966 | 81 | 0.641163 |
95850f5ad82092788d3a213273d93bc24cd594e7 | 4,079 | py | Python | src/api/algorithm/abstract.py | moevm/nosql1h19-text-graph | 410f156ad4f232f8aa060d43692ab020610ddfd4 | [
"MIT"
] | null | null | null | src/api/algorithm/abstract.py | moevm/nosql1h19-text-graph | 410f156ad4f232f8aa060d43692ab020610ddfd4 | [
"MIT"
] | null | null | null | src/api/algorithm/abstract.py | moevm/nosql1h19-text-graph | 410f156ad4f232f8aa060d43692ab020610ddfd4 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod, abstractproperty
from typing import Dict
__all__ = ["AbstractAlgorithm"]
def analyze_comparison(self, res1: Dict, res2: Dict,
comp_res: Dict, acc):
""" .
.
:param res1: AbstractAlgorithm.preprocess
:type res1: Dict
:param res2: AbstractAlgorithm.preprocess
:type res2: Dict
:param comp_res: AbstractAlgorithm.compare(res1, res2)
:type comp_res: Dict
:param acc: , AbstractAlgorithm.analyze
"""
acc['edges'] += 1
acc['sum_intersect'] += comp_res['intersection']
return acc
def describe_result(self, acc) -> str:
""" HTML-
:param acc: AbstractAlgorithm.analyze
:rtype: str
"""
if acc['edges']:
avg_inter = f"{acc['sum_intersect'] / acc['edges'] * 100:.2f}%"
else:
avg_inter = "0%"
return f"""
: {acc['fragments']} <br>
: {acc['edges']} <br>
:
{avg_inter}
"""
| 31.620155 | 79 | 0.61314 |
95851ced698edaf85c4890ce3e5ba9ddb348e00d | 304 | py | Python | buidl/libsec_build.py | jamesob/buidl-python | 84ef0284c2bff8bb09cb804c6a02f99e78e59dbe | [
"MIT"
] | 45 | 2020-10-23T13:03:41.000Z | 2022-03-27T17:32:43.000Z | buidl/libsec_build.py | jamesob/buidl-python | 84ef0284c2bff8bb09cb804c6a02f99e78e59dbe | [
"MIT"
] | 87 | 2020-10-23T19:59:36.000Z | 2022-03-03T18:05:58.000Z | buidl/libsec_build.py | jamesob/buidl-python | 84ef0284c2bff8bb09cb804c6a02f99e78e59dbe | [
"MIT"
] | 8 | 2020-11-26T14:29:32.000Z | 2022-03-01T23:00:44.000Z | #!/usr/bin/python3
from cffi import FFI
source = open("libsec.h", "r").read()
header = """
#include <secp256k1.h>
#include <secp256k1_extrakeys.h>
#include <secp256k1_schnorrsig.h>
"""
ffi = FFI()
ffi.cdef(source)
ffi.set_source("_libsec", header, libraries=["secp256k1"])
ffi.compile(verbose=True)
| 16.888889 | 58 | 0.703947 |
9587650c0783fa597913cbb4c287026be8eb0512 | 938 | py | Python | src/crud-redmine/client/kafka_client.py | LeoNog96/IntegradorRedmine | bb5477caa9088665b3d18e26530609ba831517d9 | [
"MIT"
] | null | null | null | src/crud-redmine/client/kafka_client.py | LeoNog96/IntegradorRedmine | bb5477caa9088665b3d18e26530609ba831517d9 | [
"MIT"
] | null | null | null | src/crud-redmine/client/kafka_client.py | LeoNog96/IntegradorRedmine | bb5477caa9088665b3d18e26530609ba831517d9 | [
"MIT"
] | null | null | null | from kafka import KafkaConsumer, KafkaProducer
import json | 24.684211 | 88 | 0.60661 |
958794f84d8fee2575a58b5c2e83f3a77dc04ee4 | 2,038 | py | Python | remove_empty_csv's.py | asadrazaa1/emails-extraction | bb2b7b9f4caa9f62a81e6d9588c1c652d074dfde | [
"Unlicense"
] | null | null | null | remove_empty_csv's.py | asadrazaa1/emails-extraction | bb2b7b9f4caa9f62a81e6d9588c1c652d074dfde | [
"Unlicense"
] | null | null | null | remove_empty_csv's.py | asadrazaa1/emails-extraction | bb2b7b9f4caa9f62a81e6d9588c1c652d074dfde | [
"Unlicense"
] | null | null | null | import psycopg2
import sys
from nltk.tokenize import sent_tokenize
import re
import csv
import os
# pmid {16300001 - 16400000}
try:
# starting_pmid = 16300001
# intermediate_pmid = 16400000
starting_pmid = 100001
intermediate_pmid = 200000
ending_pmid = 32078260
while 1:
if intermediate_pmid<ending_pmid:
#open existing csv files
with open('pmid {%s - %s}.csv' % (starting_pmid, intermediate_pmid), mode='r') as csv_file:
reader = csv.reader(csv_file)
if len(list(reader))==1:
#removing the file if there is only header in the file and there is no data
os.remove('pmid {%s - %s}.csv' % (starting_pmid, intermediate_pmid))
print ("File " + str(starting_pmid) + " - " + str(intermediate_pmid) + " has been removed.")
else:
print ("File " + str(starting_pmid) + " - " + str(intermediate_pmid) + " is not empty.")
starting_pmid = intermediate_pmid + 1
intermediate_pmid = intermediate_pmid + 100000
else:
print("Entering base case ...")
with open('pmid {%s - %s}.csv' % (starting_pmid, ending_pmid), mode='r') as csv_file:
reader = csv.reader(csv_file)
if len(list(reader))==1:
os.remove('pmid {%s - %s}.csv' % (starting_pmid, ending_pmid))
print ("File " + str(starting_pmid) + " - " + str(ending_pmid) + " has been removed.")
else:
print ("File " + str(starting_pmid) + " - " + str(ending_pmid) + " is not empty.")
break
#94357012, total rows
#51556076, null affiliation
#42800936, not null affiliation
#21, minimum pmid
#32078260, maximum pmid
# print(len(temp_row))
sys.exit('Script completed')
except (Exception, psycopg2.Error) as error:
sys.exit('Script failed')
| 33.966667 | 112 | 0.552993 |
9587f1170fd14bbc3fde52488cf4748e36a462f2 | 439 | py | Python | src/core.py | unior-nlp-research-group/Ghigliottina | d78cf54cb7412301dd35ef3f3d6419a0350fe3af | [
"Apache-2.0"
] | 2 | 2021-01-21T11:20:57.000Z | 2021-01-21T17:51:07.000Z | src/core.py | unior-nlp-research-group/Ghigliottina | d78cf54cb7412301dd35ef3f3d6419a0350fe3af | [
"Apache-2.0"
] | null | null | null | src/core.py | unior-nlp-research-group/Ghigliottina | d78cf54cb7412301dd35ef3f3d6419a0350fe3af | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import utility
###################
## main
###################
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model", help="the path to the model file")
args = parser.parse_args()
print('Loading association matrix')
matrix = utility.loadObjFromPklFile(args.model)
interactive_solver(matrix)
| 23.105263 | 79 | 0.603645 |
958823c46f3203892c3a9a7227ee987c3b6cf53a | 3,412 | py | Python | volcengine_ml_platform/datasets/image_dataset.py | volc-mlplatform/ml-platform-sdk-python | 2d85e23c10a1f3c008da0f1a8ea59c277c750233 | [
"MIT"
] | 11 | 2021-09-08T09:20:54.000Z | 2022-02-18T06:45:47.000Z | volcengine_ml_platform/datasets/image_dataset.py | volc-mlplatform/ml-platform-sdk-python | 2d85e23c10a1f3c008da0f1a8ea59c277c750233 | [
"MIT"
] | 1 | 2021-09-24T03:21:07.000Z | 2021-09-24T06:32:26.000Z | volcengine_ml_platform/datasets/image_dataset.py | volc-mlplatform/ml-platform-sdk-python | 2d85e23c10a1f3c008da0f1a8ea59c277c750233 | [
"MIT"
] | 4 | 2021-09-23T07:54:06.000Z | 2021-11-27T09:40:55.000Z | import json
from collections.abc import Callable
from typing import Optional
import numpy as np
from PIL import Image
from volcengine_ml_platform.datasets.dataset import _Dataset
from volcengine_ml_platform.io.tos_dataset import TorchTOSDataset
| 32.188679 | 84 | 0.586166 |
95887e566eb9b0860bede603c8c4d3bf2e059af1 | 5,634 | py | Python | main.py | TrueMLGPro/MultiDownloader | 8ef6cdccbe253fe79cf3cec9ed83fd40c3f834bc | [
"Apache-2.0"
] | 3 | 2021-02-05T09:33:39.000Z | 2021-07-25T18:39:43.000Z | main.py | TrueMLGPro/MultiDownloader | 8ef6cdccbe253fe79cf3cec9ed83fd40c3f834bc | [
"Apache-2.0"
] | null | null | null | main.py | TrueMLGPro/MultiDownloader | 8ef6cdccbe253fe79cf3cec9ed83fd40c3f834bc | [
"Apache-2.0"
] | 1 | 2022-02-28T21:41:12.000Z | 2022-02-28T21:41:12.000Z | # Copyright 2020 TrueMLGPro
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pyfiglet
import subprocess
import sys
parser = argparse.ArgumentParser(add_help=False)
group_download = parser.add_argument_group('Download Tools')
group_download.add_argument('URL', metavar='url', help='a url to download', nargs='?')
group_download.add_argument('-c', '--curl', dest='curl', action='store_true', help='Uses curl for download')
group_download.add_argument('-w', '--wget', dest='wget', action='store_true', help='Uses wget for download')
group_download.add_argument('-H', '--httrack', dest='httrack', action='store_true', help='Uses httrack for mirroring')
group_download_args = parser.add_argument_group('Download Arguments')
group_download_args.add_argument('-v', '--verbose', dest='verbose', action='store_true', help='Makes output more detailed')
group_download_args.add_argument('-d', '--depth', dest='depth', help='Defines depth of mirror (httrack only)')
group_download_args.add_argument('-eD', '--ext-depth', dest='ext_depth', help='Defines depth of mirror for external links (httrack only)')
group_download_args.add_argument('-cN', '--conn-num', dest='conn_num', help='Defines a number of active connections during mirroring (httrack only)')
group_files = parser.add_argument_group('Files')
group_files.add_argument('-f', '--filename', dest='filename', help='Sets filename (or path) for file which is being downloaded')
group_misc = parser.add_argument_group('Misc')
group_misc.add_argument('-u', '--update', dest='update', action='store_true', help='Updates MultiDownloader')
group_misc.add_argument('-h', '--help', action='help', help='Shows this help message and exits')
args = parser.parse_args()
if (args.curl):
if (args.verbose):
curl_download(args.URL, args.filename, args.verbose)
else:
curl_download(args.URL, args.filename)
if (args.wget):
if (args.verbose):
wget_download(args.URL, args.filename, args.verbose)
else:
wget_download(args.URL, args.filename)
if (args.httrack):
if (args.verbose):
httrack_download(args.URL, args.filename, args.depth, args.ext_depth, args.conn_num, args.verbose)
else:
httrack_download(args.URL, args.filename, args.depth, args.ext_depth, args.conn_num)
if (args.update):
launch_updater()
try:
main()
except KeyboardInterrupt:
print("[!] Exiting...")
sys.exit() | 39.398601 | 149 | 0.671814 |
958a38d4edf87c352270fdf92a3b1727c3d068e0 | 1,129 | py | Python | forge/kubernetes.py | Acidburn0zzz/forge | c53d99f49abe61a2657a1a41232211bb48ee182d | [
"Apache-2.0"
] | 1 | 2017-11-15T15:04:44.000Z | 2017-11-15T15:04:44.000Z | forge/kubernetes.py | Acidburn0zzz/forge | c53d99f49abe61a2657a1a41232211bb48ee182d | [
"Apache-2.0"
] | 2 | 2021-03-20T05:32:38.000Z | 2021-03-26T00:39:11.000Z | forge/kubernetes.py | Acidburn0zzz/forge | c53d99f49abe61a2657a1a41232211bb48ee182d | [
"Apache-2.0"
] | null | null | null | import os, glob
from tasks import task, TaskError, get, sh, SHResult
| 29.710526 | 75 | 0.558902 |
958ba96c16c5793bb5abfd2bf23b7c56685312b0 | 615 | py | Python | src/models.py | mchuck/tiny-ssg | 52998288daea9fe592b8e6ce769eca782db591cd | [
"MIT"
] | null | null | null | src/models.py | mchuck/tiny-ssg | 52998288daea9fe592b8e6ce769eca782db591cd | [
"MIT"
] | null | null | null | src/models.py | mchuck/tiny-ssg | 52998288daea9fe592b8e6ce769eca782db591cd | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import List, Dict, Any
| 16.184211 | 45 | 0.689431 |
958c59599470ad36c300e0c6dec5381bb27923b6 | 1,952 | py | Python | demucs/ema.py | sparshpriyadarshi/demucs | 7c7f65401db654d750df2b6f4d5b82a0101500b1 | [
"MIT"
] | 1 | 2022-02-14T05:52:53.000Z | 2022-02-14T05:52:53.000Z | demucs/ema.py | sparshpriyadarshi/demucs | 7c7f65401db654d750df2b6f4d5b82a0101500b1 | [
"MIT"
] | null | null | null | demucs/ema.py | sparshpriyadarshi/demucs | 7c7f65401db654d750df2b6f4d5b82a0101500b1 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Inspired from https://github.com/rwightman/pytorch-image-models
from contextlib import contextmanager
import torch
from .states import swap_state
| 29.134328 | 73 | 0.585553 |
958d20eb83026863f5c7fe7f0d9e55731a14596b | 250 | py | Python | tests/test_all.py | dpineo/gadann | ff5dce9a8fc6192ba1efd854672f593872116beb | [
"MIT"
] | null | null | null | tests/test_all.py | dpineo/gadann | ff5dce9a8fc6192ba1efd854672f593872116beb | [
"MIT"
] | null | null | null | tests/test_all.py | dpineo/gadann | ff5dce9a8fc6192ba1efd854672f593872116beb | [
"MIT"
] | null | null | null | import os
import fnmatch
import deep_learning
tests = [file for file in os.listdir(os.getcwd()) if fnmatch.fnmatch(file, 'test_*.py')]
tests.remove('test_all.py')
for test in tests:
print '---------- '+test+' ----------'
execfile(test)
| 22.727273 | 89 | 0.632 |
958e7f740b7a101b6adbafb3854a0ff8c7e6558c | 12,328 | py | Python | gws.py | intelligence-csd-auth-gr/greek-words-evolution | ab1ee717f7567ffa8171e64f835932af7502955d | [
"MIT"
] | 9 | 2020-07-12T13:45:24.000Z | 2021-12-05T16:08:58.000Z | word_embeddings/we.py | emiltj/NLP_exam_2021 | 9342e8dc9ad684927bbfa5eb6c125dd53c14cccb | [
"MIT"
] | 2 | 2021-03-30T14:35:26.000Z | 2022-03-12T00:40:17.000Z | word_embeddings/we.py | emiltj/NLP_exam_2021 | 9342e8dc9ad684927bbfa5eb6c125dd53c14cccb | [
"MIT"
] | 2 | 2021-04-23T13:07:55.000Z | 2021-12-16T14:06:51.000Z | import warnings
import argparse
import os
import logging
import lib.metadata as metadata
import lib.model as model
import lib.text as text
import lib.website as website
warnings.filterwarnings('ignore')
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
DATA_FOLDER = os.path.join(os.path.curdir, 'data')
MODELS_FOLDER = os.path.join(os.path.curdir, 'output', 'models')
SCRAPPED_PDF_FOLDER = os.path.join(os.path.curdir, 'data', 'scrap', 'pdf')
FASTTEXT_PATH = os.path.join(os.path.curdir, 'fastText', 'fasttext')
SCRAPPED_TEXT_FOLDER = os.path.join(os.path.curdir, 'data', 'scrap', 'text')
PRODUCED_TEXTS_FOLDER = os.path.join(os.path.curdir, 'output', 'texts')
LIB_FOLDER = os.path.join(os.path.curdir, 'lib')
MODEL_FILE_EXTENSION = '.model'
TEXT_FILE_EXTENSION = '.txt'
PDF_FILE_EXTENSION = '.pdf'
POST_URLS_FILENAME = 'post_urls.pickle'
METADATA_FILENAME = 'raw_metadata.csv'
CORPORA = [
{
'name': 'openbook',
'textFilesFolder': os.path.join(DATA_FOLDER, 'corpora', 'openbook', 'text', 'parsable'),
'metadataFilename': os.path.join(DATA_FOLDER, 'corpora', 'openbook', 'metadata.tsv')
},
{
'name': 'project_gutenberg',
'textFilesFolder': os.path.join(DATA_FOLDER, 'corpora', 'project_gutenberg', 'text', 'parsable'),
'metadataFilename': os.path.join(DATA_FOLDER, 'corpora', 'project_gutenberg', 'metadata.tsv')
},
]
COMBINED_TEXTS_FILENAME = 'corpus_combined.txt'
COMBINED_MODEL_FILENAME = os.path.join(MODELS_FOLDER, 'corpus_combined_model.bin')
NEIGHBORS_COUNT = 20
#####################################
# Set up required folders and perform any other preliminary tasks
#####################################
if not os.path.exists(SCRAPPED_PDF_FOLDER):
os.makedirs(SCRAPPED_PDF_FOLDER)
if not os.path.exists(SCRAPPED_TEXT_FOLDER):
os.makedirs(SCRAPPED_TEXT_FOLDER)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='1.0.0')
subparsers = parser.add_subparsers()
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser_website = subparsers.add_parser('website')
parser_website.add_argument('--target', default='openbook', choices=['openbook'], help='Target website to '
'scrap data from')
parser_website.add_argument('--action', default='fetchFiles', choices=['fetchLinks', 'fetchMetadata', 'fetchFiles'],
help='The action to execute on the selected website')
parser_website.set_defaults(func=websiteParser)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser_metadata = subparsers.add_parser('metadata')
parser_metadata.add_argument('--corpus', default='all', choices=['all', 'openbook', 'project_gutenberg'],
help='The name of the target corpus to work with')
parser_metadata.add_argument('--action', default='printStandard', choices=['printStandard', 'printEnhanced',
'exportEnhanced'],
help='Action to perform against the metadata of the selected text corpus')
parser_metadata.add_argument('--fromYear', default=1800, type=int, help='The target starting year to extract data from')
parser_metadata.add_argument('--toYear', default=1900, type=int, help='The target ending year to extract data from')
parser_metadata.add_argument('--splitYearsInterval', default=10, type=int, help='The interval to split the years with '
'and export the extracted data')
parser_metadata.set_defaults(func=metadataParser)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser_text = subparsers.add_parser('text')
parser_text.add_argument('--corpus', default='all', choices=['all', 'openbook', 'project_gutenberg'],
help='The name of the target corpus to work with')
parser_text.add_argument('--action', default='exportByPeriod', choices=['exportByPeriod', 'extractFromPDF'],
help='Action to perform against the selected text corpus')
parser_text.add_argument('--fromYear', default=1800, type=int, help='The target starting year to extract data from')
parser_text.add_argument('--toYear', default=1900, type=int, help='The target ending year to extract data from')
parser_text.add_argument('--splitYearsInterval', default=10, type=int, help='The interval to split the years with '
'and export the extracted data')
parser_text.set_defaults(func=textParser)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
parser_model = subparsers.add_parser('model')
parser_model.add_argument('--action', default='getNN', choices=['create', 'getNN', 'getCS', 'getCD'],
help='Action to perform against the selected model')
parser_model.add_argument('--word', help='Target word to get nearest neighbours for')
parser_model.add_argument('--period', help='The target period to load the model from')
parser_model.add_argument('--textsFolder', default='./output/texts', help='The target folder that contains the '
'texts files')
parser_model.add_argument('--fromYear', default='1800', help='the target starting year to create the model for')
parser_model.add_argument('--toYear', default='1900', help='the target ending year to create the model for')
parser_model.set_defaults(func=modelParser)
########################################################################################################################
# ----------------------------------------------------------------------------------------------------------------------
########################################################################################################################
if __name__ == '__main__':
args = parser.parse_args()
args.func(args)
| 56.036364 | 120 | 0.455224 |
958e9155b3239d72fa5b7b6e836c3597e9e664a8 | 3,887 | py | Python | OP3/op3/messages.py | gvx/op3 | 888ab5975a3f911fc9ed9afea983928de3110033 | [
"MIT"
] | null | null | null | OP3/op3/messages.py | gvx/op3 | 888ab5975a3f911fc9ed9afea983928de3110033 | [
"MIT"
] | null | null | null | OP3/op3/messages.py | gvx/op3 | 888ab5975a3f911fc9ed9afea983928de3110033 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from collections.abc import MutableSequence
from datetime import datetime
from typing import NamedTuple, Any, Optional, Iterator
from .encoders import ENCODERS, string_encode, default_encoder, datetime_encode, blob_encode
| 30.849206 | 92 | 0.637252 |
958ef26cd63d83883ded41820724c2716c93e70b | 2,716 | py | Python | ssepaperless/Organizer/views.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | ssepaperless/Organizer/views.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | ssepaperless/Organizer/views.py | michaelkressaty/ssepaperless | d536f9106fd499e664d3c03fb6331b4feb1cc4ca | [
"BSD-3-Clause"
] | null | null | null | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
from django.template import RequestContext, loader
from Organizer.models import Department
from Organizer.models import Advisor
from Organizer.models import Student
from Organizer.models import Course
from Organizer.models import Degree
from Organizer.models import Certificate
from Organizer.models import Degree_Core_Course_Structure
from Organizer.models import Degree_Elective_Course_Structure
from Organizer.models import Certificate_Course_Structure
# Create your views here.
| 48.5 | 111 | 0.796024 |
95908c4c021ce144e1c7f298836a5c4a2cc424d8 | 462 | py | Python | project/3/cal.py | Aries-Dawn/Cpp-Program-Design | 9d4fc9a902fff2f76e41314f5d6c52871d30a511 | [
"MIT"
] | null | null | null | project/3/cal.py | Aries-Dawn/Cpp-Program-Design | 9d4fc9a902fff2f76e41314f5d6c52871d30a511 | [
"MIT"
] | null | null | null | project/3/cal.py | Aries-Dawn/Cpp-Program-Design | 9d4fc9a902fff2f76e41314f5d6c52871d30a511 | [
"MIT"
] | null | null | null | import numpy as np
matrixA = np.loadtxt('./mat-A-32.txt')
matrixB = np.loadtxt('./mat-B-32.txt')
checking = np.loadtxt('./out32.txt')
result = np.dot(matrixA, matrixB)
diff = result - checking
print(checking)
print(result)
print(diff)
np.absolute(diff)
print(np.max(diff))
[rows, cols] = diff.shape
with open ('./out2048-diff.txt','w') as f:
for i in range(rows):
for j in range(cols):
f.write("%.6f "%diff[i, j])
f.write('\n')
| 23.1 | 42 | 0.623377 |
959230e7e9d9994cf553883c73d07ce0fe30741d | 16,749 | py | Python | 2020/src/day24.py | Sujatha-Nagarajan/AdventOfCode | afce23c74fd0a72caa29c1604a582b21806e794e | [
"CC0-1.0"
] | 1 | 2020-12-05T06:14:37.000Z | 2020-12-05T06:14:37.000Z | 2020/src/day24.py | Sujatha-Nagarajan/AdventOfCode | afce23c74fd0a72caa29c1604a582b21806e794e | [
"CC0-1.0"
] | null | null | null | 2020/src/day24.py | Sujatha-Nagarajan/AdventOfCode | afce23c74fd0a72caa29c1604a582b21806e794e | [
"CC0-1.0"
] | null | null | null | import re
from collections import defaultdict
from util import *
input1="""sesenwnenenewseeswwswswwnenewsewsw
neeenesenwnwwswnenewnwwsewnenwseswesw
seswneswswsenwwnwse
nwnwneseeswswnenewneswwnewseswneseene
swweswneswnenwsewnwneneseenw
eesenwseswswnenwswnwnwsewwnwsene
sewnenenenesenwsewnenwwwse
wenwwweseeeweswwwnwwe
wsweesenenewnwwnwsenewsenwwsesesenwne
neeswseenwwswnwswswnw
nenwswwsewswnenenewsenwsenwnesesenew
enewnwewneswsewnwswenweswnenwsenwsw
sweneswneswneneenwnewenewwneswswnese
swwesenesewenwneswnwwneseswwne
enesenwswwswneneswsenwnewswseenwsese
wnwnesenesenenwwnenwsewesewsesesew
nenewswnwewswnenesenwnesewesw
eneswnwswnwsenenwnwnwwseeswneewsenese
neswnwewnwnwseenwseesewsenwsweewe
wseweeenwnesenwwwswnew"""
input2="""nwesesesesewswenwswsesesenesenwnesenwse
nwnenwnwnenenwnenwnenewnwenenwwnenesesenw
neneswnenwenwseeneweswsenesewnenenee
senwewnwnenenwnwnwwesenenwswnenwwnwnw
swseseeseswseseeswseneseswsesesenwsesew
weeneeneswsewnwnesweseneswenwneswne
swseseswswneswswsesewswswseswse
swswseeswswwswnweenewswswesenwswwse
swswswswsweswseeswseseseseeswwsewnw
eneeseenenweeneenenee
eesesenwsesweeseeese
neenenenewnenenenenenwnenenenwnwne
nenenwnwnwnenwnenwnwswnenesenenwnw
neneweweneneenenenenesewneeneenee
nwweswswewneenenwneneneeswneneneswne
eeseeneseesesesewneswseeeseese
swseswsenwswnewswseswswswseswswse
senenenwnwnenwnwnwewnwwnwswnenenwnwnwenw
senwnenenwnwnenwnwwnwswnwnwnenwnwenenwnw
neweseneswswnwswnwswseneseenwseeswee
esesweeneeneswsenwsweeeeseeseee
nenenwewseswseseswsewseneewwwnww
neeswswenwnewnwnwwswwwneswswnwwwnwnw
wwweswwwwwwswwwwww
eeseenweenwseneeeeeeweeenee
eeeeesenenenwesweeeswenwswseswee
neswenenesenenenewnwenesweneneeswne
swswswenwswwswswswswswwwswweswnwsww
seseswseseseeswneeeeesewesesenenw
swwswwwswwwswwswsweneswwwsesww
eneeswenweewenwseeeseeeseswwnw
swnenwswswswseseswswswwseswswswswswswsw
seeseseeseeeesesesenwsenwseweseese
swswswswnwnesweswewseseneswswwnenwsw
eewnenweneswwseeeeneneeeeeene
esenweswwnwnwnwnwnwnwnwnwnwnwnwnwwnw
seeeeeseeneeswweeeeeeneenw
weneswswenenenenwneswneswneneneesene
wnwsesesenwnwnenwnwnesweneenwseswwsw
sewsesesesesesesesewsenesesesesesenesese
swswswwnwswswwweswswswnewwseswsww
nwneneswnwnwnenenenwsenenwnwnenwwnene
neenwenenwsweseeswsesweeseseswneswene
eneeenewewneeneeneweneeesee
nwnwwwnwnewsenwsenw
sesesewswswwneneneeseeewswnwswnwsenw
sewwswwnwwewwwneswswswwwneew
nwsenwwnenenenesenwsenenenenenenenenenwne
sewsewnesenwsenesenwsesweswswsesenenw
eseeeeeeeenweeeeseesee
eseenwseesweswenweseenweeeeswee
neseseseswwneswsesesesewseseseswse
sesweewseseeesenwseeeseeswsweneenw
wnwneseeeseseeseenwwenwseseesese
enwneswnwneneneneneneneenenwnenwwnene
wnwneneneneneneewnwwnenweneesw
nwnenenenwnenenwenenwneneseswnewnenene
nwwsenwnwnenwnenwnenwneneneenwnwsenenww
wwwsewwnwwwnewwneswwewww
swseswwswseswswswswsenwseneeneewsenwsw
nwnesenenwenwnwnwnenwnwnwneswnwnwnenene
seeeweswnenenwsenewenenwewneseee
nwwnwneseswsesweenweswsese
seeseseswsesenesesesenesewseseseeese
swwswneswwnwswneswwewsesewswswsww
seswneswswseswswseseswnwswswswswseswew
wwwwwswwewseswwwwwswwnenesw
nwnwwsenwnenwnwnwnwneenwnwnwnwnenwnww
nwnwneswwswseswswnwnwenwnenesenenenwswenw
neneneneeneeseneneneeneneneswnwnenee
neeeswswnweenwsweseneeseswnwnewe
neswesenwneneneenenweeneene
swseeneewnwseeeenwesenweseseeswnw
eweneeneseeneneneneeeseeeneewene
eeneneewneneeeswneneeneeenwsenenew
nwnwswsweswswnenwswseswswswsweswswnesw
neeeeseeeswewenenwswnene
nwenwnwenwswnwnwwnwswnwnwneswnwneswswese
neswseweeneneeeseenwwnenesenenwnee
wswwseewwwewweewwswnewwwsww
swswswwswswwwswswswswswnweeswswswsw
enenenenenenenenenenwnenenwsenenenewnw
seseswseseseeswseneseseseseseneseesee
neewneeweeeeeneese
enewneseeweneneeneneewenesenene
enwswneeswnwswsewenwwnesewneswseswe
senwswnwnenwnwwnenwnwswnwnwnwnwnwnwnwnwe
sewswneswswswseseseseswswseneswseswswsw
nwnenwwsewneneswnweenwnwnenwnwnwsenene
eswwewswswnwswsw
wwwwwwwwswsenw
nwnwnwnwnwnwnwwwnwnwwnwnwwnwnenwsese
seswnewnenwnweswnwsesenwseeseesesewnw
neneseneeswneneneswwsenwnw
nwnwnwnewnwnesenwnwnenwnwswnwneseenwnw
wwneneeneswneneeewwnesesenenenese
eseeswswsesenwneeewswnenwnwnewnw
nwnesesenewwwswnwewsenwwsewnwwww
eneeenewneneeneneneneswnenwewnesee
neneenewenenenenew
nwsenweewnwwwwenwnwswnwnenwswnwnwse
seseeeeeeswwsenwseeseeseseeese
wwwnwwwwwwwewewwwwwww
swswseseneseswswsewseseseswenwneseseswsw
seswsesesenweseeseswwseseneswsesesesese
swswseswswswswswswswswswnwswswseswsw
nweeneewneeseseesenwsenwseweswnwnw
eeeeeseseeeeewsweenweeeese
nwesesesesenenewwwneeeeweeee
eenenwneneeswewneeeeneenee
seseseswseswseswsenwseeswsesenwseseswne
eseseseswseeneseeseseewnwswsesenese
nwnenwnwseewsenwenewsenwsweswswenenee
wsenwnwwnwwnwneeenwnwne
seswswswswseneneseseswswswswswswswwnesww
wswseswnwsweswseseswesesenwswseseseswsw
sewweseseneseneswsesesenesesesesesese
nwwnwneewwewwwwnwwwwwswwwswsw
nwsweswwneeeeeenwseeenwnwswswesw
wwwwwwsenwwnewwnwwwenwwwew
swneneneswneneswenenwnwnewnwnwsenenenwnene
eswnweseweseeseenwsene
esewewneneneneseneneneneneneneneewwne
eneeeenesenwnenwseneneenenenweesw
nwnwnwseenwnweswnwnwnwnenewnwnwnwswenw
neenenesewsewneeswseseenwweeeesw
eewneeeneeesweeeeeeenenee
nenenenenenwwsenenenenenenenwswneneneene
nwwwwswwwnewwwwwwnwwww
enwswseswenesenwenwseseeswesesenewse
swesweneeenwenenweeneneesweeee
wnwnwnesenwnwwsenewswnwwnwsenwseneswse
neseswseneenwsweneswwnwsenwnesewsenwsw
swswswseseneswweenwswswsesenewseswnesenw
weseseeseseswseseseneeeesesewnese
seeneswnewsesewnwwwwnw
sewseeseeeeesewneeese
seseseenwewsesewneseeeeesweenw
ewswwwswswwswwwswswswsw
nwnwwwsenewswnwwwenwsenwnenwnwnwnw
esenwseseweeneneneswwsewsesewneese
wnwnwswnenwnewwenenesewnenenwnesenesene
wnwnenwwnwnwnwnwnwnwswnwnwnwnwwnenwnwse
eewnesenwsesesweeneeewesweeesee
swenwesweseenwseeseseseenenwesee
nwnwneswnwenwwnwnwnenenwnwnweneswsenw
swwwewwnewseewswwswnewwwww
swwwswewwwsewwsewnwwswwwwnww
wsenwewnwwsewwwnwsewnwnwwsenwnwnw
neswswnenenwneneneenewneneneswnwsw
wwwswwwwnwewwwsewwwwwwwnw
seseseswseswswnweswwswswswnwsesesesesee
nwneneenesenenenwwnwneneswnwnwnenenenwnw
neeenwneeneesweenweeeesw
eweeeeeeeeeeeeneswneeeswe
wseewesesesesesesesesewseenesesesesee
eeeeeseeeeweeee
wsewneseeewseswnewnenenwnenewnesenw
wswenwnwnwnwwnwnwnwwewnwwnwwnww
wwwwwwnewwwsewwwswwneseww
wwwwewwwwwwnw
nwnwnenenwnwnwseenwwsenwenwnwwwnw
seeswswsewnewnwwsweswwswnwswswnwnw
eweseseneeseese
sweeeeeswenenesweseeeeseneee
wnwswewnenewsewwnewwwswwsww
nweenwwwneswnwsenwsewewnwwnwnwww
eswneeneneneneeeneeneeneneeeswne
eeseseneeeeeeeweeneeeswee
enweeseneswnenwnwnwswswswnw
swseseseseseeseeneeese
swswswswswswswswwseswswswswswseeneswnwsesw
senwseenwwsweswseseswse
wnwwnwwnwnwnwwsenwnwnwnewnwnwnwsenwnwse
seseenwesesenwseseseseseseseseseseswswsw
nesewnenenwnwneneswneneswsenwnenwnenw
eswnwweenweseeneeswneeeeeee
seesweneewenenweswseseweseneswsenwse
wsesesesenesesesesesesesesesesese
neneneneenenenenenewneneneneneneswne
eseseeseeseeenweeeswseesenwse
neneseeeeweseewwseseeenweseee
senewnwwwswwewnwwnwwsewenewse
seseseeweseseseneseeeseweseseseee
wswseswseenwwswneswswnwswsww
nwnwnwnwwswnwnwnwnwenenwenwnwnwnwne
sewnwsenweswswswneenwwsenewnwnewnwnw
swseeseseswseneswwsesewwwswnenesesese
eeeneneesweeeeeeneee
nwnweswnwnwenwnwnwnwnwswnwsenwnwnwsenw
swswswswnwenwswswswswweswswesw
nenwnwneswnwneswnenwnenenenwnwnenenenene
wneneneneneneneneseswneneesenewnenwe
enwsenenweneeswswsesesweseseseseswsese
swwseseseeseewnesewnewswseseswseswse
enwneeneneswneneneenenenene
eswweeeeseeneeeeeesesenweee
nenwsenwnwnenwneswnwnwnwnwenwnwnenwnenw
esenwswwnwnwenwsenwnwseseenwswnwwew
nwswnewwwnwswnwwnenwnenwswnwwwwnwnw
eeweseeseseeeeeeesesesewese
nwseeeeenwseeseeeeseeeseeeew
senenwswnweswnwwwwwnwnenwwseswwnwe
ewswnwnewewsenwswseneswswswswseswsw
nwseswnenwwenwwswsesenwnwneewwnwse
seeneseseneweseseseeseseswseseseese
wwsewwswswwswswneswweswswswswwsww
swswswsweswswswswswnwwswswsesesweswsw
seeeeweweeweeeseneenewene
nwseseesewseesewnwneewseesesenenwee
swewwnwnwswswwwweswswswswswneswe
eeneeeneneneneeweneneenesenenenew
swsewwwsewnewwwnwwwwwwnewww
seneswwweswswswwsweswswswswswwswswwnw
seneseseseswsesesewsesesesesw
seswswseswswwewswswswswseswswswswnenw
eseseseesesenenweseesweseeewseseese
swesenenwswnesesenwwwnwse
nenewswnenenenenenesenenenenenenenenesw
senwneseneeneenenw
wseseseeseseeseseseeseseseeenesewe
neeeneweenenee
nwsenenwnenwneeneeeneneneneeswnesene
nwswseeneseenwswnweseneswswnweesesese
nwseseseswsesesesewseeesesesese
eswenesewnenwnwwwnwnwnwneswesenwswsene
sewswwswwswswwswwswswwwneneswnwsww
nenwswenenenenesesesenwwneswnenenewew
senenwswseswsewwsewseseseneeswneswswsw
nwwwnwswswseswseswswwnwweswwwew
eswswswswseseseswswseswswnwswsweswwswse
nenesenenewnenenwnenenenesenenenenenenesw
wnwnwnwwwwwewwwswwwnewnwsenwsw
enwnewnwneswewnewwswwneeseswesew
nwnwnenenwsenwewnenwnenenenwnenwnwnwnwsw
nesenewneenwnwnwnwnwneneneswneswnewnee
ewenewswwsewenwwsenenwwswnwsenwnw
nesenwsenwseseeswswnwese
wnwsenwnwsenwnwswwnwwnenwnwseswnwnwne
newnenwneneenwesenesenenwseseweswswe
senwsesesenwsweseswswsenwnesesesww
sweswseswswwseseseswswsesesenwneseseswnw
nwwenwnwnwsenwnweswnwswnwwswnwnwnenw
enesenenwsewesewsweeneeeeweeee
nwnwnwnwwnwnwnwnwnwwwewnwenwnwnwnw
wseswseseswneeseeseenwseenwseswnwse
seesenwnwwwewseswswnwnwnwe
sewwwwwwwweswwswwwwnewww
neneeswnenwneneswsenweneswneseswseeww
nwnwswenwnwnwnwnwneseswnwsweneswenwnwsw
wwwswwneswwwwwnewswww
senwnwnenwenenenenwnwnewswnwnwnwesw
wswwwwnwswwswwewswnweswswwswew
swseseseswseseswswseseswsesesesenesenwse
nwwnenenenwswsweneenenenenesweneeene
wwnwswwswwswewwwsewwwswswswe
ewwnwwnenesweseenwswswseeswwneenww
eseswsesenwwnwseseseseseseseseswesesesw
wwwwwsewwwwnenwsewnwnenwwwww
nenenenwneneeneswnenenenenwwnenwneenw
seeeeeeeswwseeeeeweneeeenw
senweeneeneswwneeneesweeeswenenesw
nwwnenwnwnwenwnenwnwnwswswnwnwnwenwse
sesenwseseseeseseseseseseese
swswneswsewseseswseswseswseswseeswsewse
seseeseseseeeesesesesewee
seneneeswnenenenenenenenwsenwnenenwnenenew
eeeneeneneweeewneeneneneeseene
swneneneneeneneneenenenenwneenewnene
seswnwseswnwnewneswswnesenewswwwswswsw
enweseeweewewesweeenw
wwwswewneswwwsesewwwnwwswswww
nwwwswwneswwsewswse
swwneseswswnewwswnewwwwse
nwwnwwwnwewwnwswwwwnwwnwwewnw
seseseswneseswseeseswswseswseswwesesenwsw
nweseseseseseseswewseseswsesesesesesese
seseswesesenwseenwsenwseseseseseseseswse
swsenwwnwnwnwnwneswewnenwnweweeswne
eeeenweeeweeeeeeeeesee
nwwwwswnewswewwenwnwwwewswwnw
nwnwnwnwwnwswenwnwnwnwnwnwnwnwnwnwnw
nwwsewewnesewswewnwswwnwwneewse
wnwnwwnwwwnwwnwnewwswwswwwne
enenesweeeeeeneenweeneeneeesw
neeeenweneeneneneneeeeeswsweee
sweswwewseswwwseneneswsewnwwsww
neesesenweweseneseeesesewseeseenwe
sweeeneeeswnene
nwwwswwnwseeweswwwnw
ewswswswwwwneswswnwswswwswswswwww
neewseenwneeswseeneweneweenwesw
seneweseeseseseewseseswweeeese
eneswswswnwswwwswswswswswswswswneswsw
sewswswswswnwswseswswswswnewswwwsww
wwsesenenwnewwwsenw
swnewweswwenenwneseenenenenenenewne
nwwnwnwnwswnwnenwwwnwnwnw
sesesesenwseswnwseseseseseeseswswswswse
swnwswenenwswswneweswwsewsw
nwnwnwnwnenenewewnenenenesenwnwswnwnw
seseswsewswswsenwseseesesenenwesenesww
neneeneeseeeewwwneeenweeeswe
enenwewwswswsenewswsenwewseeneenee
nwwenwswwwnwwnwnwwnwwwwewenwww
sweswnwswesenwsweswseswswnwnwswsweswnwsw
seseseneseseweseewseseswsesewsesese
eweneeneeeeeeseeeeeeeeesw
ewwwnwwwwswwwswswwwwwswwnesw
swnwnenwnwnwwnwnewnwswnwenwnwnwsenwnw
swneeswseseneswwnesesenwsesesenwswnww
seswneseseesesewseseene
wnwwsewnenwnwwsesesenwnwsesesewwwne
eswswwwswseswewwswwswnwswswwwnw
enwnwnenenwnwnwswnwnenwnenwnwswnwnw
newsesenwnenenwnwenwnewnwwnwnwswnwnwnwnw
swneneenesenwwsenwnewnesesenenenwnenw
neneenwnwswswswweeeeeeeenenee
swswseswnwswswswenwswsesenwswseswnewswse
newnenwnenewsenewnesenewneesenwnene
neseseseseswsesewseswneseseswsesesesese
ewseeeeseeesesesesenwseeeswse
wseeeseeeseseesewseenwswseneeee
neseneseswswsesenesewswsesenewsesesenwse
swneneneneneenwneeswneneneeneneneswne
eenewnenenesweeenenenenene
nenenesenenenwneeneeneewseeeene
nwsenenwnenewnwnwnwnwnwnenwnenwnwsenenenw
swnwenwnwwnwnwnwswswewnwnwnwnenwnwnwnww
wwswnwwewwwwsewnwwwww
wseseswnenewwwwwswwwsw
swswwswswwnewswwwseeswwwswwswsw
enesesewewsesweeeseeseseseseesese
nwwnewenwnenwnwnweneswnwneswnenwneenw
eweeeeeseeeweweneeeesesese
wswswswswswswnwseswneswswswswwwswsww
swsenwnwwnwseseseswweeneenwnesenwnee
neeswneneneneewneneneneneneneneneene
nwnewnwnwnwwnwnenwwswnwnwnwwsenwsenwnenw
nwnwnwnwnwnwnwnwsenwnwnwnw
nwnewnwswwwswneewsewnewwswwwww
eeswwesesesenwseeeeeeseeenwe
nenwneswswenwsweneeswneneneneneeswnenw
neneseeneneesewneswnenenwnw
nwnwwwenwenwnwnwwnwnwnwnwwnwswnwnwnw
seseseswswseswsenwswnenwseswwweseswnese
wwewwwseswwswsenwwnwweswnwnee
neenwnwseeneewwneneenenenesewseenese
nwwswswenesewwwwswwswenwneswnewse
seswwnenwnwnenwwneeswsewewsewsesw
eseeseseeeesweneee
eenesesweeeeenwswnwneenenwswnenene
seneseseeeseeswswseswsenwsenwnwsesesese
seseseseseeeeseseeenw
wnwwwweewwwwwswnewwswwww
swswnewnwswseswswswswswwwswswswswwsw
nwnwenwnwnwnwewnwnwnwnwnwnwnwswnwnwnw
seswswswseseseswseswswnwneseneswswseswne
swseswseswswswsewseswsesenenwsesenwseseese
swswsweseswneneswwnewswswswswswswswnew
nwwnwnwnwswnwnwnwnwsenwnwneeeswnwnwnenw
wneneneneseneneew
weeeseweneenewseesesewesesesese
swnweswswseeseswswswseswswswswnwseswew
eneseenweeeswneeeenweswneeee
neneenewneweneneeneseneneswneenenwnene
nesenewneneneeseswneneeneenenwenewnw
neneneweeneneewneeneeneneneene
senenewnesewwwwswswneswwneswsenwse
eenweneseeswnenweswnwsee
nwnwnwsenwnwnwnenwneneneswnwew
sweswneseenwesweeswnwewseneneneeenw
swnwneswwswseswswswswseswseseswswsesw
nwnwnwnwnewnwnwnwsewewsenw
swseseseseswseswswswsenwse
nenwnwesenwnwsenwwnwsenwneswneeneswnw
ewwwewnwwsewnw
nwwnwwnwnwswwnwnwnwnwwnenw
wneeneneneenwswswwneneeneneenesene
nenwnenwnenwnwnenwnwneesenwnenenenwwnew
eenewnenwswwseeenwsenwweneneswne
nwnwnenenwnwwnwnwnwnwswnwswsenw
eeseneenenenenwwseswneneewneenenenee
nenwnenewnwswnenewnwseswneenwnenesene
wnwswwnwwenwenwnwwnwswnwnwnwnwnwnww
neeneneenesewneseenenewnwenwswenese
nwewneeswnwnwseseneswneneswnenwswnwnw
nwnesesewseswsewsewnenenesesenewsesese
seneseswswswswswsenwseseseswseswswswsee
nwwswnwsewwwnewsewnwewesewnwnwnw
eswwenwnenwnwnenwswnwnwnwnwsenenwwne
senweesenwwsewseeneeeenesewseee
nenwnenwnwnwswnwswnenwnenweeswnenwnenene
wnwnwwewsewnewwswwwnwnwwnwwwww
nwwnwswnwnwwnwnenwnwswseewnwnwnwnwe
nenwnenenenwnwnwnenwnenenweenenenwwnesw
wnwnenweseneswwswnwneeseswnenenwswwe
eeeweeeeeeweeeseeee
wwwswwnwwnweswweneswnenwwwnwww
swswwswswseswswswsweseswswseneswswse
seswnwewswwwwswwsewnwneswswewww
seswwwwnewwnwwewwnewwwwww
seneswnenwsweewnwnwenwswswswnesenew
eswnweeesweeseneeeeeeeeeee
wnwswswswswwswswswswwswwwewewsww
nwnwnenwnwnweeswsenweesewswswnwnwswnw
seswwnwsewwwwswsenenw
wwswwwwswswseswwwwewswwswnww
seswseseswseseswseseseswswseseswswnw
swnesewwnwwneswne
wswswsesweswswswseswwswswsweswswnwnwe
seenwsenweseseseesesewseseseseesese
esenwnwnwneswnwnenwwsenwnenwwsenenww
eeeeeneseswseseeseenwseeesw
swseseeneseneseeswwnwese
eeeenweswseeeesee
seseswweenwswnewwwwnew
wswswswnwswswwswswswseneneswseseseeswse
nwswwnwsewewswwswwwenenwwnwww
seneseweseseeneesesesesesenweseseswse
nwnenwnwnwnwsewwenwnenwsenesenwnwnenwne
senwenewsesesewnwwseeweseswsesesenwe
wenwewnwnwnwwnwewnwwwnwwwwnw
seeeeseseeseseesenwseenweesesese
swswswwnwswwwwswnewswswwwswswwew
nenwnenenwnwnwsenenwneneneswnwnwnwsesene
wnewswsenesewswwwswnwwswswnewwseew
wsesenwenwseswsenwwseeseenesenenwwnw
senewewswwswwewwwwnewswwwswsw
swneenwseweseeenwweseseeesenwnwse"""
lines=input1.split('\n')
tiles=defaultdict(lambda:False)# false = white
#main
start_profiling()
for l in lines:
# find directions in order
directions=re.findall('(se|sw|ne|nw|e|w)',l)
x=y=0
for d in directions:
if d=='se':
x+=1
y+=1
elif d=='sw':
x-=1
y+=1
elif d=='ne':
x+=1
y-=1
elif d=='nw':
x-=1
y-=1
elif d=='e':
x+=2
elif d=='w':
x-=2
tiles[x,y]=not tiles[x,y]
print('a)',sum(t for t in tiles.values()))
end_profiling()
start_profiling()
for _ in range(100):
tiles=day()
print('b)', sum(t for t in tiles.values()))
end_profiling()
| 31.661626 | 81 | 0.920294 |
9594993f4525fce4f5b648804a7994f70f4ed262 | 4,773 | py | Python | ci/check-documentation.py | FredrikBlomgren/aff3ct | fa616bd923b2dcf03a4cf119cceca51cf810d483 | [
"MIT"
] | 315 | 2016-06-21T13:32:14.000Z | 2022-03-28T09:33:59.000Z | ci/check-documentation.py | a-panella/aff3ct | 61509eb756ae3725b8a67c2d26a5af5ba95186fb | [
"MIT"
] | 153 | 2017-01-17T03:51:06.000Z | 2022-03-24T15:39:26.000Z | ci/check-documentation.py | a-panella/aff3ct | 61509eb756ae3725b8a67c2d26a5af5ba95186fb | [
"MIT"
] | 119 | 2017-01-04T14:31:58.000Z | 2022-03-21T08:34:16.000Z | #!/usr/bin/env python3
import argparse
import sys
import re
import subprocess
import os
import glob
import copy
import aff3ct_help_parser as ahp
# read all the lines from the given file and set them in a list of string lines with striped \n \r
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--keys', action='store', dest='keys_file', type=str, default='doc/strings.rst')
parser.add_argument('--aff3ct', action='store', dest='aff3ct_path', type=str, default='build/bin/aff3ct')
parser.add_argument('--doc', action='store', dest='doc_path', type=str, default='doc/source/user/simulation/parameters/')
args = parser.parse_args()
nDiff = check_keys(args.keys_file, args.aff3ct_path, args.doc_path)
sys.exit(nDiff); | 27.431034 | 128 | 0.707521 |
9595a509a88acc24d2199e14d5a84b03b3fb5415 | 677 | py | Python | todoster/list_projects.py | SophieAu/todoster | 6f69f7b254683d63f60f934eafa8971e78df7eb2 | [
"MIT"
] | 5 | 2020-08-05T21:02:35.000Z | 2021-11-11T14:31:35.000Z | todoster/list_projects.py | SophieAu/todoster | 6f69f7b254683d63f60f934eafa8971e78df7eb2 | [
"MIT"
] | 1 | 2020-09-24T04:41:20.000Z | 2020-09-28T04:37:50.000Z | todoster/list_projects.py | SophieAu/todoster | 6f69f7b254683d63f60f934eafa8971e78df7eb2 | [
"MIT"
] | 1 | 2021-08-09T19:23:24.000Z | 2021-08-09T19:23:24.000Z | from todoster.file_operations import load_projects
from todoster.output_formatter import format_string
| 33.85 | 85 | 0.669129 |
95988a5a0c747ad5cc792f45a029f70fc328bc8e | 621 | py | Python | src/game_test.py | TomNo/tictactoe-mcts | 5d5db97f54fe5a3bf7c9afaaa4d74984fdb30ec4 | [
"MIT"
] | null | null | null | src/game_test.py | TomNo/tictactoe-mcts | 5d5db97f54fe5a3bf7c9afaaa4d74984fdb30ec4 | [
"MIT"
] | null | null | null | src/game_test.py | TomNo/tictactoe-mcts | 5d5db97f54fe5a3bf7c9afaaa4d74984fdb30ec4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
__author__ = 'Tomas Novacik'
import unittest2
from game import Game
from board import Board, PlayerType, Move
# eof
| 18.264706 | 75 | 0.613527 |
95993548b5a77661a71dcd96b3ee1f6f35d686ce | 1,911 | py | Python | skills_taxonomy_v2/pipeline/skills_extraction/get_sentence_embeddings_utils.py | india-kerle/skills-taxonomy-v2 | a71366dfea3c35580dbafddba9470f83795805ae | [
"MIT"
] | 3 | 2021-11-21T17:21:12.000Z | 2021-12-10T21:19:57.000Z | skills_taxonomy_v2/pipeline/skills_extraction/get_sentence_embeddings_utils.py | india-kerle/skills-taxonomy-v2 | a71366dfea3c35580dbafddba9470f83795805ae | [
"MIT"
] | 16 | 2021-10-06T11:20:35.000Z | 2022-02-02T11:44:28.000Z | skills_taxonomy_v2/pipeline/skills_extraction/get_sentence_embeddings_utils.py | india-kerle/skills-taxonomy-v2 | a71366dfea3c35580dbafddba9470f83795805ae | [
"MIT"
] | 1 | 2021-10-04T12:27:20.000Z | 2021-10-04T12:27:20.000Z | """
Functions to mask sentences of undesirable words (stopwords, punctuation etc).
Used in get_sentence_embeddings.py to process sentences before finding embeddings.
"""
import re
from skills_taxonomy_v2.pipeline.skills_extraction.cleaning_sentences import (
separate_camel_case,
)
def is_token_word(token, token_len_threshold, stopwords, custom_stopwords):
"""
Returns true if the token:
- Doesn't contain 'www'
- Isn't too long (if it is it is usually garbage)
- Isn't a proper noun/number/quite a few other word types
- Isn't a word with numbers in (these are always garbage)
"""
return (
("www" not in token.text)
and (len(token) < token_len_threshold)
and (
token.pos_
not in [
"PROPN",
"NUM",
"SPACE",
"X",
"PUNCT",
"ADP",
"AUX",
"CONJ",
"DET",
"PART",
"PRON",
"SCONJ",
]
)
and (not re.search("\d", token.text))
and (not token.text.lower() in stopwords + custom_stopwords)
and (not token.lemma_.lower() in stopwords + custom_stopwords)
)
def process_sentence_mask(
sentence, nlp, bert_vectorizer, token_len_threshold, stopwords, custom_stopwords
):
"""
Mask sentence of stopwords etc, then get sentence embedding
"""
sentence = separate_camel_case(sentence)
doc = nlp(sentence)
masked_sentence = ""
for i, token in enumerate(doc):
if is_token_word(token, token_len_threshold, stopwords, custom_stopwords):
masked_sentence += " " + token.text
else:
masked_sentence += " [MASK]"
return masked_sentence
| 29.4 | 85 | 0.553114 |
959a854d76fcee93383a4561465ab39d08da02e1 | 1,000 | py | Python | migrations/versions/033809bcaf32_destinations.py | RagtagOpen/carpools | 56b8f6491a2d347b637b345fbad7bc744130ec7f | [
"Apache-2.0"
] | 11 | 2017-08-23T17:41:43.000Z | 2018-10-24T03:00:38.000Z | migrations/versions/033809bcaf32_destinations.py | RagtagOpen/carpools | 56b8f6491a2d347b637b345fbad7bc744130ec7f | [
"Apache-2.0"
] | 480 | 2017-07-14T00:29:11.000Z | 2020-01-06T19:04:51.000Z | migrations/versions/033809bcaf32_destinations.py | RagtagOpen/carpools | 56b8f6491a2d347b637b345fbad7bc744130ec7f | [
"Apache-2.0"
] | 22 | 2017-07-07T00:07:32.000Z | 2020-02-27T19:43:14.000Z | """destinations
Revision ID: 033809bcaf32
Revises: 4a77b8fb792a
Create Date: 2017-08-24 05:56:45.166590
"""
from alembic import op
import sqlalchemy as sa
import geoalchemy2
# revision identifiers, used by Alembic.
revision = '033809bcaf32'
down_revision = '4a77b8fb792a'
branch_labels = None
depends_on = None
| 27.027027 | 89 | 0.698 |
959ac1baff7cea9daabf593760b72f74cd08cb19 | 778 | py | Python | porcupine/plugins/gotoline.py | rscales02/porcupine | 91b3c90d19d2291c0a60ddb9dffac931147cde3c | [
"MIT"
] | null | null | null | porcupine/plugins/gotoline.py | rscales02/porcupine | 91b3c90d19d2291c0a60ddb9dffac931147cde3c | [
"MIT"
] | null | null | null | porcupine/plugins/gotoline.py | rscales02/porcupine | 91b3c90d19d2291c0a60ddb9dffac931147cde3c | [
"MIT"
] | null | null | null | from tkinter import simpledialog
from porcupine import actions, get_tab_manager, tabs
| 31.12 | 79 | 0.638817 |
959aea6673bc315fd2a49870629b49b87e1b393a | 4,634 | py | Python | preprocessing.py | JackAndCole/Detection-of-sleep-apnea-from-single-lead-ECG-signal-using-a-time-window-artificial-neural-network | 692bb7d969b7eb4a0ad9b221660901a863bc76e2 | [
"Apache-2.0"
] | 7 | 2020-01-22T03:23:39.000Z | 2021-12-26T05:02:10.000Z | preprocessing.py | JackAndCole/Detection-of-sleep-apnea-from-single-lead-ECG-signal-using-a-time-window-artificial-neural-network | 692bb7d969b7eb4a0ad9b221660901a863bc76e2 | [
"Apache-2.0"
] | null | null | null | preprocessing.py | JackAndCole/Detection-of-sleep-apnea-from-single-lead-ECG-signal-using-a-time-window-artificial-neural-network | 692bb7d969b7eb4a0ad9b221660901a863bc76e2 | [
"Apache-2.0"
] | 1 | 2020-05-29T06:32:24.000Z | 2020-05-29T06:32:24.000Z | import os
import pickle
import sys
import warnings
from collections import OrderedDict
import biosppy.signals.tools as st
import numpy as np
import wfdb
from biosppy.signals.ecg import correct_rpeaks, hamilton_segmenter
from hrv.classical import frequency_domain, time_domain
from scipy.signal import medfilt
from tqdm import tqdm
warnings.filterwarnings(action="ignore")
base_dir = "dataset"
fs = 100 # ECG sample frequency
hr_min = 20
hr_max = 300
if __name__ == "__main__":
apnea_ecg = OrderedDict()
# train data
recordings = [
"a01", "a02", "a03", "a04", "a05", "a06", "a07", "a08", "a09", "a10",
"a11", "a12", "a13", "a14", "a15", "a16", "a17", "a18", "a19", "a20",
"b01", "b02", "b03", "b04", "b05",
"c01", "c02", "c03", "c04", "c05", "c06", "c07", "c08", "c09", "c10"
]
for recording in recordings:
signal = wfdb.rdrecord(os.path.join(base_dir, recording), channels=[0]).p_signal[:, 0]
labels = wfdb.rdann(os.path.join(base_dir, recording), extension="apn").symbol
apnea_ecg[recording] = feature_extraction(recording, signal, labels)
print()
# test data
recordings = [
"x01", "x02", "x03", "x04", "x05", "x06", "x07", "x08", "x09", "x10",
"x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20",
"x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x29", "x30",
"x31", "x32", "x33", "x34", "x35"
]
answers = {}
filename = os.path.join(base_dir, "event-2-answers")
with open(filename, "r") as f:
for answer in f.read().split("\n\n"):
answers[answer[:3]] = list("".join(answer.split()[2::2]))
for recording in recordings:
signal = wfdb.rdrecord(os.path.join(base_dir, recording), channels=[0]).p_signal[:, 0]
labels = answers[recording]
apnea_ecg[recording] = feature_extraction(recording, signal, labels)
with open(os.path.join(base_dir, "apnea-ecg.pkl"), "wb") as f:
pickle.dump(apnea_ecg, f, protocol=2)
print("ok")
| 44.990291 | 120 | 0.579197 |
959b3935838082e9b39f90f0dbe7ce84722264d7 | 3,904 | py | Python | tiddlywebplugins/tiddlyspace/openid.py | FND/tiddlyspace | 7b26e5b4e0b0a817b3ea0a357613c59705d016d4 | [
"BSD-3-Clause"
] | 2 | 2015-12-15T00:40:36.000Z | 2019-04-22T16:54:41.000Z | tiddlywebplugins/tiddlyspace/openid.py | jdlrobson/tiddlyspace | 70f500687fcd26e3fa4ef144297a05203ccf0f35 | [
"BSD-3-Clause"
] | null | null | null | tiddlywebplugins/tiddlyspace/openid.py | jdlrobson/tiddlyspace | 70f500687fcd26e3fa4ef144297a05203ccf0f35 | [
"BSD-3-Clause"
] | null | null | null | """
Subclass of tiddlywebplugins.openid2 to support
tiddlyweb_secondary_user cookie.
"""
import urlparse
from tiddlyweb.web.util import server_host_url, make_cookie
from tiddlywebplugins.openid2 import Challenger as OpenID
FRAGMENT_PREFIX = 'auth:OpenID:'
| 36.148148 | 72 | 0.615523 |
959b55108828b137a9e2c7ce659d11e247c56fff | 226 | py | Python | tests/__init__.py | tltx/iommi | a0ca5e261040cc0452d7452e9320a88af5222b30 | [
"BSD-3-Clause"
] | 192 | 2020-01-30T14:29:56.000Z | 2022-03-28T19:55:30.000Z | tests/__init__.py | tltx/iommi | a0ca5e261040cc0452d7452e9320a88af5222b30 | [
"BSD-3-Clause"
] | 105 | 2020-03-29T21:59:01.000Z | 2022-03-24T12:29:09.000Z | tests/__init__.py | tltx/iommi | a0ca5e261040cc0452d7452e9320a88af5222b30 | [
"BSD-3-Clause"
] | 28 | 2020-02-02T20:51:09.000Z | 2022-03-08T16:23:42.000Z | from datetime import datetime
import freezegun
# Initialize freezegun to avoid freezegun being reinitialized which is expensive
initialize_freezegun = freezegun.freeze_time(datetime(2021, 1, 1))
initialize_freezegun.start()
| 28.25 | 80 | 0.836283 |
959bcca51833c2423f463ff10fb943bd7f71b93f | 9,047 | py | Python | pyacoustics/morph/intensity_morph.py | UNIST-Interactions/pyAcoustics | f22d19d258b4e359fec365b30f11af261dee1b5c | [
"MIT"
] | 72 | 2015-12-10T20:00:04.000Z | 2022-03-31T05:42:17.000Z | pyacoustics/morph/intensity_morph.py | alivalehi/pyAcoustics | ab446681d7a2267063afb6a386334dcaefd0d93b | [
"MIT"
] | 5 | 2017-08-08T05:13:15.000Z | 2020-11-26T00:58:04.000Z | pyacoustics/morph/intensity_morph.py | alivalehi/pyAcoustics | ab446681d7a2267063afb6a386334dcaefd0d93b | [
"MIT"
] | 16 | 2016-05-09T07:36:15.000Z | 2021-08-30T14:23:25.000Z | '''
Created on Apr 2, 2015
@author: tmahrt
'''
import os
from os.path import join
import math
import copy
from pyacoustics.morph.morph_utils import common
from pyacoustics.morph.morph_utils import plot_morphed_data
from pyacoustics.utilities import utils
from pyacoustics.utilities import sequences
from pyacoustics.signals import audio_scripts
from pyacoustics.utilities import my_math
def getNormalizationFactor(lst, refLst=None):
'''
'''
# Get the source values that we will be normalizing
lst = list(set(lst))
if 0 in lst:
lst.pop(lst.index(0))
actMaxV = float(max(lst))
actMinV = float(min(lst))
# Get the reference values
if refLst is None:
refMaxV = 32767.0
refMinV = -32767.0
else:
refLst = list(set(refLst))
if 0 in refLst:
refLst.pop(refLst.index(0))
refMaxV = float(max(refLst))
refMinV = float(min(refLst))
actualFactor = min(refMaxV / actMaxV, abs(refMinV) / abs(actMinV))
# print("Normalization factor: ", actualFactor)
return actualFactor
def getRelativeNormalizedFactors(fromDataList, toDataList, chunkSize):
'''
Determines the factors to be used to normalize sourceWav from targetWav
This can be used to relatively normalize the source based on the target
on an iterative basis (small chunks are normalized rather than the entire
wav.
'''
# Sample proportionately from the targetWav
# - if the two lists are the same length, there is no change
# - if /target/ is shorter, it will be lengthened with some repeated values
# - if /target/ is longer, it will be shortened with some values dropped
tmpIndexList = sequences.interp(0, len(toDataList) - 1,
fromDataList)
newTargetRawDataList = [toDataList[int(round(i))]
for i in tmpIndexList]
assert(len(fromDataList) == len(newTargetRawDataList))
fromGen = sequences.subsequenceGenerator(fromDataList,
chunkSize,
sequences.sampleMiddle,
sequences.DO_SAMPLE_GATED)
toGen = sequences.subsequenceGenerator(newTargetRawDataList,
chunkSize,
sequences.sampleMiddle,
sequences.DO_SAMPLE_GATED)
normFactorList = []
i = 0
for fromTuple, toTuple in zip(fromGen, toGen):
fromDataChunk = fromTuple[0]
toDataChunk = toTuple[0]
distToNextControlPoint = fromTuple[2]
normFactor = getNormalizationFactor(fromDataChunk, toDataChunk)
normFactorList.append((normFactor, distToNextControlPoint))
# i += 1
# if i >= 38:
# print("hello")
# print(len(sourceWav.rawDataList), allChunks)
# assert(len(sourceWav.rawDataList) == allChunks)
return normFactorList, newTargetRawDataList
def expandNormalizationFactors(normFactorList):
'''
Expands the normFactorList from being chunk-based to sample-based
E.g. A wav with 1000 samples may be represented by a factorList of 5 chunks
(5 factor values). This function will expand that to 1000.
'''
i = 0
normFactorsFull = []
controlPoints = []
while i < len(normFactorList) - 1:
startVal, chunkSize = normFactorList[i]
endVal = normFactorList[i + 1][0]
normFactorsFull.extend(my_math.linspace(startVal, endVal, chunkSize))
controlPoints.append(startVal)
controlPoints.extend(my_math.linspace(startVal, startVal,
chunkSize - 1))
i += 1
# We have no more data, so just repeat the final norm factor at the tail
# of the file
value, finalChunkSize = normFactorList[i]
controlPoints.append(value)
controlPoints.extend(my_math.linspace(startVal, startVal,
finalChunkSize - 1))
normFactorsFull.extend(my_math.linspace(value, value, finalChunkSize))
print('Norm factors full: %d' % len(normFactorsFull))
return normFactorsFull, controlPoints
| 36.776423 | 79 | 0.606831 |
959ca1652d25eeda188d0626465d82a0647c2777 | 1,886 | py | Python | algorithms/library/metricscontroller.py | heitor57/poi-rss | 12990af118f19595be01bf80e26a7ee93f9d05d8 | [
"MIT"
] | 1 | 2021-09-01T23:55:27.000Z | 2021-09-01T23:55:27.000Z | algorithms/library/metricscontroller.py | heitor57/poi-rss | 12990af118f19595be01bf80e26a7ee93f9d05d8 | [
"MIT"
] | 1 | 2021-09-09T06:21:48.000Z | 2021-09-14T02:08:33.000Z | algorithms/library/metricscontroller.py | heitor57/poi-rss | 12990af118f19595be01bf80e26a7ee93f9d05d8 | [
"MIT"
] | null | null | null | import numpy as np | 37.72 | 158 | 0.559915 |
959cbddc7a775bd66392c574ba57d0e444a033d9 | 736 | py | Python | backend-service/users-service/app/app/models/user.py | abhishek70/python-petclinic-microservices | e15a41a668958f35f1b962487cd2360c5c150f0b | [
"MIT"
] | 2 | 2021-05-19T07:21:59.000Z | 2021-09-15T17:30:08.000Z | backend-service/users-service/app/app/models/user.py | abhishek70/python-petclinic-microservices | e15a41a668958f35f1b962487cd2360c5c150f0b | [
"MIT"
] | null | null | null | backend-service/users-service/app/app/models/user.py | abhishek70/python-petclinic-microservices | e15a41a668958f35f1b962487cd2360c5c150f0b | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING
from sqlalchemy import Boolean, Column, Integer, String
from sqlalchemy.orm import relationship
from app.db.base_class import Base
if TYPE_CHECKING:
from .pet import Pet # noqa: F401
| 38.736842 | 90 | 0.744565 |
959f88de24a529a6005e19e9f3a68842519cdb55 | 930 | py | Python | slackbot/admin.py | surface-security/django-slackbot | 8d22fb922cf5365284d7a4836bb095eeeb8c7e90 | [
"MIT"
] | 1 | 2022-01-24T10:29:09.000Z | 2022-01-24T10:29:09.000Z | slackbot/admin.py | surface-security/django-slack-processor | 8d22fb922cf5365284d7a4836bb095eeeb8c7e90 | [
"MIT"
] | 4 | 2022-02-21T15:59:08.000Z | 2022-03-26T00:33:13.000Z | slackbot/admin.py | surface-security/django-slack-processor | 8d22fb922cf5365284d7a4836bb095eeeb8c7e90 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.utils.html import format_html
from . import get_user_model
| 34.444444 | 119 | 0.634409 |
95a0896392ae42746732acf467a7a7dc9ad52617 | 1,476 | py | Python | touroute/tourouteapp/migrations/0001_initial.py | oscarlamasrios/toroute | 5b00c0f606f438229e7857f25a23c4d51ff34293 | [
"Apache-2.0"
] | null | null | null | touroute/tourouteapp/migrations/0001_initial.py | oscarlamasrios/toroute | 5b00c0f606f438229e7857f25a23c4d51ff34293 | [
"Apache-2.0"
] | null | null | null | touroute/tourouteapp/migrations/0001_initial.py | oscarlamasrios/toroute | 5b00c0f606f438229e7857f25a23c4d51ff34293 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-04-24 14:53
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 34.325581 | 117 | 0.571816 |
95a1633d9ce1bb6f212d67d9111c6397f243ba02 | 19,691 | py | Python | catalog/application.py | gevannmullins/catalog-category-items | 850c77e17d5123511c954e3705f522228c6574ea | [
"MIT"
] | null | null | null | catalog/application.py | gevannmullins/catalog-category-items | 850c77e17d5123511c954e3705f522228c6574ea | [
"MIT"
] | null | null | null | catalog/application.py | gevannmullins/catalog-category-items | 850c77e17d5123511c954e3705f522228c6574ea | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from flask import Flask, render_template, request, redirect, jsonify, url_for, flash
from sqlalchemy import create_engine, asc
from sqlalchemy.orm import sessionmaker
from database_setup import Base, Category, Item, User
from flask import session as login_session
import random
import string
import collections
import json
import requests
from flask import make_response
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
import httplib2
# from dict2xml import dict2xml
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
import psycopg2
# from page_views import *
app = Flask(__name__)
CLIENT_ID = json.loads(open('/vagrant/catalog/client_secret.json', 'r').read())['web']['client_id']
APPLICATION_NAME = "Catalog Category Items Application"
engine = create_engine('sqlite:///catalog.db')
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
session = DBSession()
# User Helper Functions
# login page
# display home page / categories page
# Categories
# Show Category Items
# Create a new category
# Edit a categories
# Delete a category
# Item Services
# Create a new item
# Edit a item
# Delete a item
# Disconnect based on provider
##### JSON APIs to view Category Information
##### Social media routes #####
# DISCONNECT - Revoke a current user's token and reset their login_session
if __name__ == '__main__':
app.secret_key = "lRYRXEimZGfbt3Q2TpD_6_Kj"
app.debug = True
app.run(host='0.0.0.0', port=8002)
| 35.867031 | 174 | 0.632218 |
95a2f6f31ddcda8bf982507b3035c6d82bfe1d80 | 723 | py | Python | selfdrive/visiond/tensorflow_autodetect.py | jeroenbbb/openpilot | 4a2ff784f85ac87a4aa9ba8a345c2403102f960a | [
"MIT"
] | 4 | 2019-05-29T19:44:56.000Z | 2021-09-10T18:36:57.000Z | selfdrive/visiond/tensorflow_autodetect.py | jeroenbbb/openpilot | 4a2ff784f85ac87a4aa9ba8a345c2403102f960a | [
"MIT"
] | null | null | null | selfdrive/visiond/tensorflow_autodetect.py | jeroenbbb/openpilot | 4a2ff784f85ac87a4aa9ba8a345c2403102f960a | [
"MIT"
] | 5 | 2019-08-09T07:49:28.000Z | 2020-10-11T03:19:04.000Z | import os
from setuptools import setup
version = os.getenv('VERSION', '1.10.1')
setup(
name='tensorflow-autodetect',
version=version,
url='https://github.com/commaai/tensorflow-autodetect',
author='comma.ai',
author_email='',
license='MIT',
long_description='Auto-detect tensorflow or tensorflow-gpu package based on nvidia driver being installed',
keywords='tensorflow tensorflow-gpu',
install_requires=[
('tensorflow-gpu' if os.path.exists('/proc/driver/nvidia/version') else 'tensorflow') + '==' + version,
],
classifiers=[
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
)
| 30.125 | 111 | 0.656985 |
95a308d03af24087015385e9c1aa146e859dc63c | 1,639 | py | Python | intask_api/projects/permissions.py | KirovVerst/intask | 4bdec6f49fa2873cca1354d7d3967973f5bcadc3 | [
"MIT"
] | null | null | null | intask_api/projects/permissions.py | KirovVerst/intask | 4bdec6f49fa2873cca1354d7d3967973f5bcadc3 | [
"MIT"
] | 7 | 2016-08-17T23:08:31.000Z | 2022-03-02T02:23:08.000Z | intask_api/projects/permissions.py | KirovVerst/intask | 4bdec6f49fa2873cca1354d7d3967973f5bcadc3 | [
"MIT"
] | null | null | null | from rest_framework import permissions
from django.shortcuts import get_object_or_404
from django.contrib.auth.models import User
from intask_api.projects.models import Project
| 34.87234 | 74 | 0.748627 |
95a3853b501cce7a1c286e558ccff9a6692b3e3f | 171 | py | Python | Ekeopara_Praise/Phase 2/LIST/Day43 Tasks/Task3.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Ekeopara_Praise/Phase 2/LIST/Day43 Tasks/Task3.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Ekeopara_Praise/Phase 2/LIST/Day43 Tasks/Task3.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | '''3. Write a Python program to split a list into different variables. '''
universalList = [(1, 2, 3), ('w', 'e', 's')]
lst1, lst2 = universalList
print(lst1)
print(lst2) | 28.5 | 74 | 0.654971 |
95a3fd394b5e1d1a390370d7caef0aefa5912c98 | 576 | py | Python | Codefights/arcade/python-arcade/level-9/62.Check-Participants/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | 7 | 2017-09-20T16:40:39.000Z | 2021-08-31T18:15:08.000Z | Codefights/arcade/python-arcade/level-9/62.Check-Participants/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | Codefights/arcade/python-arcade/level-9/62.Check-Participants/Python/test.py | RevansChen/online-judge | ad1b07fee7bd3c49418becccda904e17505f3018 | [
"MIT"
] | null | null | null | # Python3
from solution1 import checkParticipants as f
qa = [
([0, 1, 1, 5, 4, 8],
[2]),
([0, 1, 2, 3, 4, 5],
[]),
([6],
[]),
([3, 3, 3, 3, 3, 3, 3, 3],
[4, 5, 6, 7]),
([0, 0, 1, 5, 5, 4, 5, 4, 10, 8],
[1, 2, 5, 6, 7, 9])
]
for *q, a in qa:
for i, e in enumerate(q):
print('input{0}: {1}'.format(i + 1, e))
ans = f(*q)
if ans != a:
print(' [failed]')
print(' output:', ans)
print(' expected:', a)
else:
print(' [ok]')
print(' output:', ans)
print()
| 19.2 | 47 | 0.378472 |
95a45f4832007319ba41671ba4a21dd2a62ab0fc | 202 | py | Python | models/__init__.py | mikuh/bert-tf2-keras | e361a0e7dc9fa0d64c48ac41320d302599dba025 | [
"MIT"
] | 4 | 2020-06-21T15:48:40.000Z | 2022-01-24T05:10:59.000Z | models/__init__.py | mikuh/bert-tf2-keras | e361a0e7dc9fa0d64c48ac41320d302599dba025 | [
"MIT"
] | null | null | null | models/__init__.py | mikuh/bert-tf2-keras | e361a0e7dc9fa0d64c48ac41320d302599dba025 | [
"MIT"
] | 3 | 2020-07-20T07:11:27.000Z | 2022-01-24T05:11:21.000Z | from models.base_model import BaseModel
from models.classifier import BertClassifier
from models.sequence_labeling import BertSequenceLabeling
from models.sequence_embedding import BertSequenceEmbedding | 50.5 | 59 | 0.905941 |
95a49255a761f17a3cc35cbf97bc73b1442eaf32 | 7,563 | py | Python | plex_import_watched_history.py | chazlarson/plex-watched-tools | ef3e34e733ec9555353d695ced582395bdc73480 | [
"MIT"
] | null | null | null | plex_import_watched_history.py | chazlarson/plex-watched-tools | ef3e34e733ec9555353d695ced582395bdc73480 | [
"MIT"
] | null | null | null | plex_import_watched_history.py | chazlarson/plex-watched-tools | ef3e34e733ec9555353d695ced582395bdc73480 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# python3 -m pip install --force -U --user PlexAPI
import json
import time
import logging
import plexapi
import plexapi.video
import plexapi.myplex
import plexapi.server
import plexapi.library
import plexapi.exceptions
PLEX_URL = ""
PLEX_TOKEN = ""
WATCHED_HISTORY = ""
LOG_FILE = ""
BATCH_SIZE = 10000
PLEX_REQUESTS_SLEEP = 0
CHECK_USERS = [
]
LOG_FORMAT = \
"[%(name)s][%(process)05d][%(asctime)s][%(levelname)-8s][%(funcName)-15s]" \
" %(message)s"
LOG_DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG_LEVEL = logging.INFO
plexapi.server.TIMEOUT = 3600
plexapi.server.X_PLEX_CONTAINER_SIZE = 2500
_SHOW_GUID_RATING_KEY_MAPPING = {}
_MOVIE_GUID_RATING_KEY_MAPPING = {}
_EPISODE_GUID_RATING_KEY_MAPPING = {}
logger = logging.getLogger("PlexWatchedHistoryImporter")
if __name__ == "__main__":
main()
| 36.713592 | 114 | 0.672352 |
95a5e5403994144db82f320da6b9ae78fdfacc78 | 3,556 | py | Python | django_thermostat/pypelib/Rule.py | jpardobl/django-thermostat | 184e398134f289eb0337ec2af33c650f9ee26a13 | [
"BSD-3-Clause"
] | null | null | null | django_thermostat/pypelib/Rule.py | jpardobl/django-thermostat | 184e398134f289eb0337ec2af33c650f9ee26a13 | [
"BSD-3-Clause"
] | null | null | null | django_thermostat/pypelib/Rule.py | jpardobl/django-thermostat | 184e398134f289eb0337ec2af33c650f9ee26a13 | [
"BSD-3-Clause"
] | null | null | null | import os
import sys
import time
import exceptions
import uuid
import logging
'''
@author: msune,lbergesio,omoya,CarolinaFernandez
@organization: i2CAT, OFELIA FP7
PolicyEngine Rule class
Encapsulates logic of a simple Rule
'''
from django_thermostat.pypelib.Condition import Condition
from django_thermostat.pypelib.persistence.PersistenceEngine import PersistenceEngine
from django_thermostat.pypelib.utils.Logger import Logger
| 27.353846 | 105 | 0.719629 |
95aa9b2ab7c302c981b157247e84659b7c3d8105 | 709 | py | Python | test/test_integration.py | gaborfodor/wave-bird-recognition | 6feafdbae82746e3e7b0f6588a9158aa8336309a | [
"MIT"
] | 17 | 2021-06-02T12:26:30.000Z | 2022-03-27T18:35:02.000Z | test/test_integration.py | gaborfodor/wave-bird-recognition | 6feafdbae82746e3e7b0f6588a9158aa8336309a | [
"MIT"
] | null | null | null | test/test_integration.py | gaborfodor/wave-bird-recognition | 6feafdbae82746e3e7b0f6588a9158aa8336309a | [
"MIT"
] | 3 | 2021-06-02T12:26:51.000Z | 2021-06-06T05:56:45.000Z | from birds.display_utils import geo_plot
from birds.pann import load_pretrained_model, read_audio_fast, get_model_predictions_for_clip, BIRDS
| 27.269231 | 100 | 0.71086 |
95abecff3908d6331f655cf91a24b321277dc4f4 | 12,306 | py | Python | For_Cluster/letshpc_folder_backtracking_2/main_script_without_perf.py | yatin2410/HPC_N_QUEENS | df629ac4ebc678815953370c8ae97c6d276819ff | [
"MIT"
] | 2 | 2019-05-10T09:09:07.000Z | 2022-02-07T05:46:57.000Z | For_Cluster/letshpc_folder_bitmasking/main_script_without_perf.py | yatin2410/HPC_N_QUEENS | df629ac4ebc678815953370c8ae97c6d276819ff | [
"MIT"
] | null | null | null | For_Cluster/letshpc_folder_bitmasking/main_script_without_perf.py | yatin2410/HPC_N_QUEENS | df629ac4ebc678815953370c8ae97c6d276819ff | [
"MIT"
] | null | null | null | #!/bin/python
import subprocess
import os
import sys
import maps
import time
import logging
logging.basicConfig(filename = "LetsHPC_Team_CodeRunner.log", level = logging.INFO)
logger = logging.getLogger(__name__)
########################################################################################################
USAGE = """
Usage:
run.py problem_name approach_name serial_executable parallel_executable runs log_directory output_directory input_directory base_directory
'problem_name' is the name of the problem assigned to you.
'approach_name' is the name of the appraoch assigned to you.
'serial_executable' must be the name of the compiled executable file for the serial code.
'parallel_executable' must be the name of the compiled executable file for the parallel code.
'runs' is the number of times to run the codes. Run at least thrice and ideally 10 times.
'log_directory' is the directory where you want to store the log files
'output_directory' is the directory where you want to store the output files
'input_directory' is the directory where you take the input from
"""
#######################################################################
base = os.getcwd()
all_files = os.listdir(base)
inp = None
while True:
if 'codes_run_file' in all_files:
inp = raw_input("Do you want to reuse the results of previous run? (y/n): ").lower()
if inp == 'y':
break
elif inp == 'n':
os.remove(base + '/codes_run_file')
break
else:
print "Invalid input. Try again."
else:
break
while True:
compiler_to_use = raw_input("Which parallel framework would you be using? (openmp/mpi): ").lower()
if compiler_to_use == 'mpi' or compiler_to_use == 'openmp':
break
else:
print("Incorrect input. Try again.")
while True:
try:
runs = int(raw_input("Enter the number of times you want the code to run (recommended: at least 10 runs): "))
if runs <= 0: # if not a positive int print message and ask for input again
print("Input must be a positive integer, try again!")
continue
except ValueError as ve:
print("That's not an int! Try again!")
continue
else:
print('the number of runs is ' + str(runs))
break
all_inputs = os.getcwd() + '/all_input/'
base = os.getcwd() + '/all_codes/'
starting_point = os.getcwd()
all_codes = os.listdir(base)
count = 0
try:
os.remove(base + "progress.txt")
except Exception as e:
print "File already deleted"
print(all_codes)
code_to_run = None
codes_already_run = None
try:
uber = open(os.getcwd() + "/codes_run_file", "r")
codes_already_run = uber.readlines()
uber.close()
except Exception as e:
command = "touch %s" % (starting_point + "/codes_run_file")
subprocess.call(command, shell = True)
if codes_already_run is None:
code_to_run = all_codes[0]
else:
for each in all_codes:
if each+"\n" not in codes_already_run:
code_to_run = each
break
print "The following code will be run now", code_to_run
if code_to_run is None:
print "All the codes have already been executed."# + " You can run the collect data script now"
sys.exit(1)
for each_code in [code_to_run]:
if each_code == "progress.txt" or "log" in each_code:
continue
subprocess.call("rm -rf "
+ base + each_code + "/output"
, shell=True)
subprocess.call("rm -rf "
+ base + each_code + "/logs"
, shell=True)
division = each_code.split("-")
problem = division[2]
approach = division[3]
print "-"*80
print problem, approach
all_files = os.listdir(base+each_code+"/")
serial = None
parallel = None
for each_file in all_files:
if 'clean' not in each_file.lower() and 'logs'!=each_file.lower() and 'output'!=each_file.lower():
if 'par' not in each_file.lower() and each_file!="ser":
serial = each_file
elif 'parallel' in each_file.lower():
parallel = each_file
if compiler_to_use == 'mpi':
compiler = "mpicc "
elif compiler_to_use == 'openmp':
compiler = "gcc "
if ".cpp" in parallel:
if compiler_to_use == "mpi":
compiler = "mpiCC "
elif compiler_to_use == "openmp":
compiler = "g++ "
print serial, parallel
if 'logs' not in all_files:
os.mkdir(base + each_code + "/logs")
os.mkdir(base + each_code + "/output")
if compiler_to_use == 'openmp':
subprocess.call(compiler
+ base + each_code + "/" + parallel
+ " -fopenmp -lm -w -o "
+ base + each_code + "/parr", shell=True)
subprocess.call(compiler
+ base + each_code + "/" + serial
+ " -fopenmp -lm -w -o "
+ base + each_code + "/ser", shell=True)
elif compiler_to_use == 'mpi':
subprocess.call(compiler
+ base + each_code + "/" + parallel
+ " -lm -w -o "
+ base + each_code + "/parr", shell=True)
subprocess.call(compiler
+ base + each_code + "/" + serial
+ " -lm -w -o "
+ base + each_code + "/ser", shell=True)
print serial,parallel
#raw_input()
foobar(['run.py', problem, approach, base + each_code + "/ser", base + each_code + "/parr", int(runs), base + each_code + "/logs/", \
base + each_code + "/output/", all_inputs, base + each_code + "/", compiler_to_use])
f = open(base + "progress.txt", "a")
f.write(str(time.time()) + " " + str(count) + " " + str(each_code)+"\n")
f.close()
count +=1
print "Reached Here:", code_to_run, type(code_to_run)
w2f = open(starting_point + "/codes_run_file", "a")
string_to_write = code_to_run + "\n"
w2f.write(string_to_write)
w2f.close()
print "Written To file"
| 34.664789 | 138 | 0.491549 |
95ae2e3a04b5bb9553c2d275221aaaba3d17f40e | 1,236 | py | Python | 0205.Isomorphic Strings/solution.py | zhlinh/leetcode | 6dfa0a4df9ec07b2c746a13c8257780880ea04af | [
"Apache-2.0"
] | null | null | null | 0205.Isomorphic Strings/solution.py | zhlinh/leetcode | 6dfa0a4df9ec07b2c746a13c8257780880ea04af | [
"Apache-2.0"
] | null | null | null | 0205.Isomorphic Strings/solution.py | zhlinh/leetcode | 6dfa0a4df9ec07b2c746a13c8257780880ea04af | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-03-24
Last_modify: 2016-03-24
******************************************
'''
'''
Given two strings s and t, determine if they are isomorphic.
Two strings are isomorphic if the characters in s can be replaced to get t.
All occurrences of a character must be replaced with another character
while preserving the order of characters.
No two characters may map to the same character
but a character may map to itself.
For example,
Given "egg", "add", return true.
Given "foo", "bar", return false.
Given "paper", "title", return true.
Note:
You may assume both s and t have the same length.
'''
| 24.235294 | 75 | 0.536408 |
95b11eb96aa3d734016e0fceb804be347a3066c5 | 1,860 | py | Python | testModules/migration.py | mannamman/newsCrawl | 8779c1ee06ef51d2affbd9b8a80e688c6ed056e7 | [
"MIT"
] | null | null | null | testModules/migration.py | mannamman/newsCrawl | 8779c1ee06ef51d2affbd9b8a80e688c6ed056e7 | [
"MIT"
] | 14 | 2021-12-20T03:44:08.000Z | 2022-02-24T06:04:06.000Z | testModules/migration.py | mannamman/newsCrawl | 8779c1ee06ef51d2affbd9b8a80e688c6ed056e7 | [
"MIT"
] | null | null | null | import pymongo
## local ##
from dotenv import load_dotenv
import os
import pytz
import datetime
import itertools
from uuid import uuid4
from collections import defaultdict
# ObjectId
from bson.objectid import ObjectId
"""
RDBMS Mongo DB
Database Database
Table Collection
Row Document
Index Index
DB server Mongod
DB client mongo
"""
if(__name__ == "__main__"):
new_db_worker = newWorker()
new_db_worker.migration_for_mistyping() | 28.181818 | 111 | 0.638172 |
95b233e62bad224b765ef9f8b1c2e67cce2b24ad | 1,659 | py | Python | YOLOv2.py | scain40/OpenCVCVImageComparisson | 368d901233111606fb2f0ecbce4447dd9c149fd0 | [
"MIT"
] | null | null | null | YOLOv2.py | scain40/OpenCVCVImageComparisson | 368d901233111606fb2f0ecbce4447dd9c149fd0 | [
"MIT"
] | null | null | null | YOLOv2.py | scain40/OpenCVCVImageComparisson | 368d901233111606fb2f0ecbce4447dd9c149fd0 | [
"MIT"
] | null | null | null | import numpy as np
import cv2 as cv
import os
import sys
| 36.866667 | 112 | 0.722122 |
95b3747c398cbe76bc2e8c76655c81e2a5cd82bc | 115 | py | Python | closuredag/apps.py | farmlab/django-closuredag | 19bacabea5e922613a18d21048866dceb44d0afe | [
"MIT"
] | null | null | null | closuredag/apps.py | farmlab/django-closuredag | 19bacabea5e922613a18d21048866dceb44d0afe | [
"MIT"
] | 93 | 2017-11-16T13:58:45.000Z | 2022-03-27T22:01:19.000Z | closuredag/apps.py | farmlab/django-closuredag | 19bacabea5e922613a18d21048866dceb44d0afe | [
"MIT"
] | null | null | null | # -*- coding: utf-8
from django.apps import AppConfig
| 16.428571 | 34 | 0.721739 |
95b3dfb14ba48f34faa00abbd1780bd7ac43862d | 499 | py | Python | experiments/reversed_string_stack.py | shruti-bt/data-structure-python | 0729f486f516ce05acdd92b28b108f43b67f656f | [
"MIT"
] | 1 | 2022-01-10T17:17:35.000Z | 2022-01-10T17:17:35.000Z | experiments/reversed_string_stack.py | shruti-bt/data-structure-python | 0729f486f516ce05acdd92b28b108f43b67f656f | [
"MIT"
] | null | null | null | experiments/reversed_string_stack.py | shruti-bt/data-structure-python | 0729f486f516ce05acdd92b28b108f43b67f656f | [
"MIT"
] | null | null | null |
if __name__ == '__main__':
str_ = input()
stack = Stack()
for i in str_:
stack.push(i)
for j in range(len(stack)):
print(stack.pop(), end='')
print()
| 19.192308 | 35 | 0.519038 |
95b40e4094e935db9b4e39bc3de9c67b55114bbe | 484 | py | Python | app/run.py | dudikbender/geocoder | af8c0839d3d73c7825a0488763d053b5e6bc8257 | [
"Unlicense"
] | null | null | null | app/run.py | dudikbender/geocoder | af8c0839d3d73c7825a0488763d053b5e6bc8257 | [
"Unlicense"
] | null | null | null | app/run.py | dudikbender/geocoder | af8c0839d3d73c7825a0488763d053b5e6bc8257 | [
"Unlicense"
] | null | null | null | from utils.db import connection, print_version
import pandas as pd
table = 'data/tables/postcode_coordinates.csv'
add_table(table, 'Postcode_coordinates', connection)
cur = connection.cursor()
cur.execute('''SELECT *
FROM Postcode_coordinates''')
data = cur.fetchmany(5)
print(data) | 25.473684 | 76 | 0.727273 |
95b525d705b0f34eba83af30d5fc61bd4affc2f0 | 48 | pyw | Python | seemee.pyw | gaming32/SeeMee | a99655efdd9e1aea218474bcdbd1370954a366d2 | [
"MIT"
] | null | null | null | seemee.pyw | gaming32/SeeMee | a99655efdd9e1aea218474bcdbd1370954a366d2 | [
"MIT"
] | null | null | null | seemee.pyw | gaming32/SeeMee | a99655efdd9e1aea218474bcdbd1370954a366d2 | [
"MIT"
] | null | null | null | import runpy
runpy._run_module_as_main('SeeMee') | 24 | 35 | 0.854167 |
95b591115eff8da9eaed281f3f62bddae8faefca | 755 | py | Python | model/param_const.py | tototo617/Biomodel-Raia2011 | a06d531e3d9f18ddee1d85a19d8c57363be3da8e | [
"MIT"
] | null | null | null | model/param_const.py | tototo617/Biomodel-Raia2011 | a06d531e3d9f18ddee1d85a19d8c57363be3da8e | [
"MIT"
] | null | null | null | model/param_const.py | tototo617/Biomodel-Raia2011 | a06d531e3d9f18ddee1d85a19d8c57363be3da8e | [
"MIT"
] | null | null | null | from .name2idx import parameters as C | 31.458333 | 46 | 0.682119 |
95b6aab732ea16915f09231a8049e60f6f242ea6 | 593 | py | Python | flaskr/commands.py | aicioara-old/flask_tutorial2 | acb5c6fa2743f2f060ad6a3a26cc7eef56b6490b | [
"MIT"
] | null | null | null | flaskr/commands.py | aicioara-old/flask_tutorial2 | acb5c6fa2743f2f060ad6a3a26cc7eef56b6490b | [
"MIT"
] | null | null | null | flaskr/commands.py | aicioara-old/flask_tutorial2 | acb5c6fa2743f2f060ad6a3a26cc7eef56b6490b | [
"MIT"
] | null | null | null | import os
import datetime
import click
from flask.cli import with_appcontext
from werkzeug.security import generate_password_hash
| 20.448276 | 82 | 0.735245 |
95b6e78900559f4f960f26e452c446bb79f637e4 | 191 | py | Python | intel_bot_sentenca_rj_civel/test.py | slarda/Web-Scrapping-Bots-For-Crawling-Docs | aa8ce3c72bfbe2111d16655ffc3a6759a825946e | [
"Apache-2.0"
] | 1 | 2020-12-17T11:21:01.000Z | 2020-12-17T11:21:01.000Z | intel_bot_sentenca_rj_civel/test.py | soft-super/Web-Scrapping-Bots-For-Crawling-Docs | aa8ce3c72bfbe2111d16655ffc3a6759a825946e | [
"Apache-2.0"
] | 5 | 2021-03-19T01:48:07.000Z | 2021-06-09T18:26:31.000Z | intel_bot_sentenca_rj_civel/test.py | tiny-1996/Web-Scrapping-Bots-For-Crawling-Docs | aa8ce3c72bfbe2111d16655ffc3a6759a825946e | [
"Apache-2.0"
] | null | null | null | with open('./logs/test.log', 'r') as f1:
data = f1.readlines()
formatted = [x.replace('.pdf', '') for x in data]
with open('./logs/test2.log', 'r') as f1:
f1.writelines(formatted)
| 21.222222 | 49 | 0.602094 |
95b771302ac3436f68366f36390ccc4ddba021fd | 2,206 | py | Python | validator_rewards/validator_rewards.py | harmony-one/monitor-ops | 0a379655ff26bff5821cd7cb6f619a15a308441b | [
"MIT"
] | 1 | 2020-04-11T16:46:56.000Z | 2020-04-11T16:46:56.000Z | validator_rewards/validator_rewards.py | harmony-one/monitor-ops | 0a379655ff26bff5821cd7cb6f619a15a308441b | [
"MIT"
] | 3 | 2020-04-13T10:42:59.000Z | 2020-07-10T06:26:23.000Z | validator_rewards/validator_rewards.py | harmony-one/monitor-ops | 0a379655ff26bff5821cd7cb6f619a15a308441b | [
"MIT"
] | 2 | 2020-04-22T10:36:25.000Z | 2020-05-20T15:58:02.000Z | import argparse
import json
from pyhmy import (
get_all_validator_addresses,
get_validator_information
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--start", required=True, type=int, help="First block")
parser.add_argument("--end", required=True, type=int, help="Last block")
parser.add_argument("--endpoint", default="http://localhost:9500", help="Endpoint to query")
parser.add_argument("--verbose", action='store_true', help="Verbose print for debug")
args = parser.parse_args()
if args.verbose:
else:
block_timestamps = []
block_tx = []
block_stx = []
for block_num in range(args.start, args.end):
v_print(f'Block {block_num}/{args.end}', end="\r")
reply = get_block_by_num(block_num, args.endpoint)
try:
block_timestamps.append(int(reply['result']['timestamp'], 0))
block_tx.append(len(reply['result']['transactions']))
block_stx.append(len(reply['result']['stakingTransactions']))
except Exception as e:
v_print(f'{e.__class__}: {e}')
pass
block_times = [y - x for x, y in zip(block_timestamps, block_timestamps[1:])]
avg = sum(block_times) / len(block_times)
print(f'Average Block Time: {avg}')
unique_times = Counter(block_times)
print(f'Unique block times: {unique_times.most_common()}')
# offset = [0].extend(block_times)
| 31.514286 | 96 | 0.609248 |
95b980c29bfb10b077998e38727075e9d4e823a6 | 2,271 | py | Python | day4/day4.py | UncleTed/adventOfCode2020 | 382560f7aee89f6b04b2ee60882d3801425ea46c | [
"MIT"
] | null | null | null | day4/day4.py | UncleTed/adventOfCode2020 | 382560f7aee89f6b04b2ee60882d3801425ea46c | [
"MIT"
] | null | null | null | day4/day4.py | UncleTed/adventOfCode2020 | 382560f7aee89f6b04b2ee60882d3801425ea46c | [
"MIT"
] | null | null | null | import re
valid = ['hcl', 'iyr', 'pid', 'ecl', 'hgt','eyr', 'byr' ]
#part1()
part2() | 27.695122 | 66 | 0.483928 |
95bb338ca37179ca6d20e80795bb6cc5417559db | 535 | py | Python | app/shared/models.py | prapeller/blackemployer_api | ae9232773e6e164b22ffccf0b39dd9a4c2a036cf | [
"MIT"
] | null | null | null | app/shared/models.py | prapeller/blackemployer_api | ae9232773e6e164b22ffccf0b39dd9a4c2a036cf | [
"MIT"
] | null | null | null | app/shared/models.py | prapeller/blackemployer_api | ae9232773e6e164b22ffccf0b39dd9a4c2a036cf | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import ArrayField
from utils.model_utils import default_1d_array_of_strings
| 35.666667 | 96 | 0.773832 |
95bbb3583a2750d5735e9244fe93a6a446fb803f | 8,314 | py | Python | dataset/data_load.py | clovaai/symmetrical-synthesis | 207953b1ae3d2e0a96fb676db3669bdc88cc18e8 | [
"MIT"
] | 76 | 2020-02-08T03:15:54.000Z | 2022-03-04T16:14:52.000Z | dataset/data_load.py | clovaai/symmetrical-synthesis | 207953b1ae3d2e0a96fb676db3669bdc88cc18e8 | [
"MIT"
] | 5 | 2020-02-07T14:00:58.000Z | 2021-05-31T01:37:55.000Z | dataset/data_load.py | clovaai/symmetrical-synthesis | 207953b1ae3d2e0a96fb676db3669bdc88cc18e8 | [
"MIT"
] | 13 | 2020-02-10T02:56:51.000Z | 2021-05-28T06:56:30.000Z | '''
symmetrical-synthesis
Copyright (c) 2020-present NAVER Corp.
MIT license
'''
import os
import time
import glob
import cv2
import random
import numpy as np
import tensorflow as tf
import random
try:
import data_util
except ImportError:
from dataset import data_util
tf.app.flags.DEFINE_boolean('random_resize', False, 'True or False')
tf.app.flags.DEFINE_boolean('past_dataset', False, 'True or False')
tf.app.flags.DEFINE_string('google_path', None, '')
tf.app.flags.DEFINE_integer('min_train3', 2, '')
tf.app.flags.DEFINE_string('match_info', None, '')
tf.app.flags.DEFINE_float('match_prob', 0.0, '')
tf.app.flags.DEFINE_boolean('mnist_mode', False, '')
FLAGS = tf.app.flags.FLAGS
'''
image_path = '/where/your/images/*.jpg'
'''
def get_images_dict(image_folder):
'''
image_folder = '/data/IR/DB/sid_images'
folder structure
sid_images - sid0 - image00.png, image01.png, ...
- sid1 - ...
- sid2 - ...
'''
if FLAGS.match_info is not None:
match_dict = {}
f_match = open(FLAGS.match_info, 'r')
match_lines = f_match.readlines()
cnt = 0
for match_line in match_lines:
ver1_cls, ver2_cls, prob = match_line.split()
prob = float(prob)
if prob >= FLAGS.match_prob:
match_dict[ver2_cls] = 1
possible_image_type = ['jpg', 'JPG', 'png', 'JPEG', 'jpeg']
sid_list = glob.glob(os.path.join(image_folder, '*'))
images_dict = {}
images_list = []
images_cnt = 0
sid_idx = 0
for sid_folder in sid_list:
ext_folder = sid_folder
#ext_folder = os.path.join(sid_folder, 'exterior')
images_path = [image_path for image_paths in [glob.glob(os.path.join(ext_folder, '*.%s' % ext)) for ext in possible_image_type] for image_path in image_paths]
n_instance = 2
if len(images_path) < n_instance:
continue
for image_path in images_path:
images_list.append([image_path, sid_idx])
images_dict[sid_idx] = images_path
images_cnt += len(images_path)
sid_idx += 1
#print(images_dict)
stat_db = {}
stat_db['num_sid'] = len(images_dict)
stat_db['images_cnt'] = images_cnt
return images_dict, stat_db, images_list
def get_generator(image_folder, **kwargs):
return generator(image_folder, **kwargs)
## image_path = '/where/is/your/images/'
if __name__ == '__main__':
image_path = '/data/IR/DB/data_refinement/place_exterior'
num_workers = 4
batch_size = 128
input_size = 224
data_generator = get_batch(image_path=image_path,
num_workers=num_workers,
batch_size=batch_size,
input_size=224)
_ = 0
while True:
_ += 1
#break
start_time = time.time()
data = next(data_generator)
anchor_images = np.asarray(data[0])
pos_images = np.asarray(data[1])
gts = np.asarray(data[2])
print('%d done!!! %f' % (_, time.time() - start_time), anchor_images.shape, pos_images.shape, gts.shape)
#for sub_idx, (loaded_image, gt) in enumerate(zip(loaded_images, gts)):
# save_path = '/data/IR/DB/naver_place/test/%03d_%03d_gt_%d_image.jpg' % (_, sub_idx, gt)
# cv2.imwrite(save_path, loaded_image[:,:,::-1])
| 35.228814 | 170 | 0.615227 |
95bc1cbdca2faf1169e04427ea20b03a36f4f201 | 1,678 | py | Python | python_parikshith21/Day39.py | 01coders/50-Days-Of-Code | 98928cf0e186ee295bc90a4da0aa9554e2918659 | [
"MIT"
] | null | null | null | python_parikshith21/Day39.py | 01coders/50-Days-Of-Code | 98928cf0e186ee295bc90a4da0aa9554e2918659 | [
"MIT"
] | null | null | null | python_parikshith21/Day39.py | 01coders/50-Days-Of-Code | 98928cf0e186ee295bc90a4da0aa9554e2918659 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 17 20:55:53 2019
@author: Parikshith.H
"""
import sqlite3
conn=sqlite3.connect('music.sqlite')
cur=conn.cursor()
cur.execute('DROP TABLE IF EXISTS Tracks')
cur.execute('CREATE TABLE Tracks(title TEXT,plays INTEGER)')
cur.execute('''INSERT INTO Tracks(title,plays) VALUES ('Thunder2',100)''')
cur.execute('''INSERT INTO Tracks VALUES ('Thunder3',100)''')
cur.execute('INSERT INTO Tracks(title,plays) VALUES (?,?)',('Thunderstuck',200))
cur.execute('INSERT INTO Tracks(title,plays) VALUES (?,?)',('Dangerous',20))
cur.execute('INSERT INTO Tracks(title,plays) VALUES (?,?)',('Myway',150))
cur.execute('INSERT INTO Tracks(title,plays) VALUES (?,?)',('Newway',30))
cur.execute('SELECT * FROM Tracks')
for row in cur:
print(row)
print('****************************')
cur.execute('''UPDATE Tracks SET plays=50 WHERE title='Myway' ''')
cur.execute('SELECT * FROM Tracks')
for row in cur:
print(row)
print('****************************')
cur.execute('''DELETE FROM Tracks WHERE plays<100 ''')
cur.execute('SELECT * FROM Tracks')
for row in cur:
print(row)
cur.close()
conn.close()
# =============================================================================
# #output:
# ('Thunder2', 100)
# ('Thunder3', 100)
# ('Thunderstuck', 200)
# ('Dangerous', 20)
# ('Myway', 150)
# ('Newway', 30)
# ****************************
# ('Thunder2', 100)
# ('Thunder3', 100)
# ('Thunderstuck', 200)
# ('Dangerous', 20)
# ('Myway', 50)
# ('Newway', 30)
# ****************************
# ('Thunder2', 100)
# ('Thunder3', 100)
# ('Thunderstuck', 200)
# ============================================================================= | 28.440678 | 80 | 0.544696 |
95bd0c7bd55d7d49e38f428fd858ef62fbc90459 | 269 | py | Python | tests/ansible/lib/modules/custom_python_external_pkg.py | webcoast-dk/mitogen | a5fe4a9fac5561511b676fe61ed127b732be5b12 | [
"BSD-3-Clause"
] | 1,526 | 2017-09-15T18:49:40.000Z | 2021-01-17T16:04:12.000Z | tests/ansible/lib/modules/custom_python_external_pkg.py | webcoast-dk/mitogen | a5fe4a9fac5561511b676fe61ed127b732be5b12 | [
"BSD-3-Clause"
] | 682 | 2017-09-11T17:43:12.000Z | 2021-01-17T05:26:26.000Z | tests/ansible/lib/modules/custom_python_external_pkg.py | webcoast-dk/mitogen | a5fe4a9fac5561511b676fe61ed127b732be5b12 | [
"BSD-3-Clause"
] | 111 | 2017-09-15T23:21:37.000Z | 2021-01-01T14:45:35.000Z | #!/usr/bin/python
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.externalpkg import extmod
if __name__ == '__main__':
main()
| 22.416667 | 52 | 0.750929 |
95bd18d246cfb63e62a2a8d0384166889102ed92 | 1,869 | py | Python | mlangpy/metalanguages/EBNF.py | rium9/mlangpy | 75821306b15d72278220d2a1a403daa36f60cc4a | [
"MIT"
] | 1 | 2020-04-20T20:23:31.000Z | 2020-04-20T20:23:31.000Z | mlangpy/metalanguages/EBNF.py | rium9/mlangpy | 75821306b15d72278220d2a1a403daa36f60cc4a | [
"MIT"
] | null | null | null | mlangpy/metalanguages/EBNF.py | rium9/mlangpy | 75821306b15d72278220d2a1a403daa36f60cc4a | [
"MIT"
] | null | null | null | from ..grammar import *
from .Metalanguage import Metalanguage
| 29.203125 | 83 | 0.652755 |
95bd8914d357d073cde74eb4ec195a84ebfe2b04 | 560 | py | Python | app/tests/test_db/test_jobs_crud.py | JvitorS23/jobboard_fastAPI | 5abcc69f19417ad99352c0434db96407e2d7da76 | [
"MIT"
] | 1 | 2021-10-01T16:40:33.000Z | 2021-10-01T16:40:33.000Z | app/tests/test_db/test_jobs_crud.py | JvitorS23/jobboard_fastAPI | 5abcc69f19417ad99352c0434db96407e2d7da76 | [
"MIT"
] | null | null | null | app/tests/test_db/test_jobs_crud.py | JvitorS23/jobboard_fastAPI | 5abcc69f19417ad99352c0434db96407e2d7da76 | [
"MIT"
] | null | null | null | from sqlalchemy.orm import Session
from db.crud.jobs import create_new_job, retrieve_job
from schemas.jobs import JobCreate
from tests.utils.users import create_random_owner
from tests.utils.jobs import create_sample_job
def test_retrieve_job_by_id(db_session: Session):
"""Test retrieving job from db"""
owner = create_random_owner(session=db_session)
job = create_sample_job(owner, db_session)
retrieved_job = retrieve_job(job_id=job.id, session=db_session)
assert retrieved_job.id == job.id
assert retrieved_job.title == job.title
| 37.333333 | 67 | 0.792857 |
95c0ec3bbf5dfcbc14218087f1c41fdd10c1b36f | 5,135 | py | Python | spacy/tests/website/test_home.py | moyogo/spacy | ddf5c5bb61864320189ebc70dac3bc10e4ecde82 | [
"MIT"
] | null | null | null | spacy/tests/website/test_home.py | moyogo/spacy | ddf5c5bb61864320189ebc70dac3bc10e4ecde82 | [
"MIT"
] | null | null | null | spacy/tests/website/test_home.py | moyogo/spacy | ddf5c5bb61864320189ebc70dac3bc10e4ecde82 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import pytest
import spacy
import os
try:
xrange
except NameError:
xrange = range
def test_get_and_set_string_views_and_flags(nlp, token):
assert token.shape_ == 'Xxxxx'
for lexeme in nlp.vocab:
if lexeme.is_alpha:
lexeme.shape_ = 'W'
elif lexeme.is_digit:
lexeme.shape_ = 'D'
elif lexeme.is_punct:
lexeme.shape_ = 'P'
else:
lexeme.shape_ = 'M'
assert token.shape_ == 'W'
def test_export_to_numpy_arrays(nlp, doc):
from spacy.attrs import ORTH, LIKE_URL, IS_OOV
attr_ids = [ORTH, LIKE_URL, IS_OOV]
doc_array = doc.to_array(attr_ids)
assert doc_array.shape == (len(doc), len(attr_ids))
assert doc[0].orth == doc_array[0, 0]
assert doc[1].orth == doc_array[1, 0]
assert doc[0].like_url == doc_array[0, 1]
assert list(doc_array[:, 1]) == [t.like_url for t in doc]
def test_calculate_inline_mark_up_on_original_string():
def put_spans_around_tokens(doc, get_classes):
'''Given some function to compute class names, put each token in a
span element, with the appropriate classes computed.
All whitespace is preserved, outside of the spans. (Yes, I know HTML
won't display it. But the point is no information is lost, so you can
calculate what you need, e.g. <br /> tags, <p> tags, etc.)
'''
output = []
template = '<span classes="{classes}">{word}</span>{space}'
for token in doc:
if token.is_space:
output.append(token.orth_)
else:
output.append(
template.format(
classes=' '.join(get_classes(token)),
word=token.orth_,
space=token.whitespace_))
string = ''.join(output)
string = string.replace('\n', '')
string = string.replace('\t', ' ')
return string
| 28.370166 | 78 | 0.631353 |
95c1052429e03206d9d42e4ca673e5f48a3f3906 | 35,774 | py | Python | bridge_sim/internal/make/ps_question.py | jerbaroo/bridge-sim | c4ec1c18a07a78462ccf3b970a99a1bd7efcc2af | [
"MIT"
] | 2 | 2020-05-12T11:41:49.000Z | 2020-08-10T15:00:58.000Z | bridge_sim/internal/make/ps_question.py | barischrooneyj/bridge-sim | c4ec1c18a07a78462ccf3b970a99a1bd7efcc2af | [
"MIT"
] | 48 | 2020-05-11T23:58:22.000Z | 2020-09-18T20:28:52.000Z | bridge_sim/internal/make/ps_question.py | jerbaroo/bridge-sim | c4ec1c18a07a78462ccf3b970a99a1bd7efcc2af | [
"MIT"
] | 1 | 2020-05-27T12:43:37.000Z | 2020-05-27T12:43:37.000Z | import os
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
from bridge_sim import model, sim, temperature, traffic, plot, util
from bridge_sim.model import Config, Point, Bridge
from bridge_sim.plot.util import equal_lims
from bridge_sim.sim.responses import without
from bridge_sim.util import print_i, print_w
from bridge_sim.internal.plot import axis_cmap_r
def plot_year_effects(config: Config, x: float, z: float, num_years: int):
"""Plot all effects over a single year and 100 years at a point."""
install_day = 37
year = 2018
weather = temperature.load("holly-springs-18")
_0, _1, traffic_array = traffic.load_traffic(
config, traffic.normal_traffic(config), 60 * 10
)
(
ll_responses,
ps_responses,
temp_responses,
shrinkage_responses,
creep_responses,
) = np.repeat(None, 5)
start_day, end_day = None, None
# from sklearn.decomposition import FastICA, PCA
# ica = FastICA(n_components=3)
# try_ = ica.fit_transform((ll_responses + temp_responses + creep_responses + shrinkage_responses).T)
# plt.plot(try_)
# plt.show()
plt.landscape()
lw = 2
plt.subplot(1, 2, 1)
set_responses(1)
xax = np.interp(
np.arange(len(traffic_array)), [0, len(traffic_array) - 1], [start_day, end_day]
)
plt.plot(xax, ll_responses[0] * 1e3, c="green", label="traffic", lw=lw)
plt.plot(xax, temp_responses[0] * 1e3, c="red", label="temperature")
plt.plot(xax, shrinkage_responses[0] * 1e3, c="blue", label="shrinkage", lw=lw)
plt.plot(xax, creep_responses[0] * 1e3, c="black", label="creep", lw=lw)
legend()
plt.ylabel("Y translation (mm)")
plt.xlabel("Time (days)")
plt.subplot(1, 2, 2)
end_day = 365 * num_years
set_responses(num_years)
xax = (
np.interp(
np.arange(len(traffic_array)),
[0, len(traffic_array) - 1],
[start_day, end_day],
)
/ 365
)
plt.plot(xax, ll_responses[0] * 1e3, c="green", label="traffic", lw=lw)
plt.plot(xax, temp_responses[0] * 1e3, c="red", label="temperature")
plt.plot(xax, shrinkage_responses[0] * 1e3, c="blue", label="shrinkage", lw=lw)
plt.plot(xax, creep_responses[0] * 1e3, c="black", label="creep", lw=lw)
legend()
plt.ylabel("Y translation (mm)")
plt.xlabel("Time (years)")
equal_lims("y", 1, 2)
plt.suptitle(f"Y translation at X = {x} m, Z = {z} m")
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.savefig(config.get_image_path("classify/ps", f"year-effect-{x}-{z}.png"))
| 38.138593 | 148 | 0.587494 |
95c1db49e8979342f440e2ee5e1a48186d51308c | 936 | py | Python | parsers/download_data.py | bioinf-mcb/polish-microbiome-project | 0fc15b1a5afe4edf63b6be6b945ac4053e3a24f9 | [
"BSD-3-Clause"
] | null | null | null | parsers/download_data.py | bioinf-mcb/polish-microbiome-project | 0fc15b1a5afe4edf63b6be6b945ac4053e3a24f9 | [
"BSD-3-Clause"
] | null | null | null | parsers/download_data.py | bioinf-mcb/polish-microbiome-project | 0fc15b1a5afe4edf63b6be6b945ac4053e3a24f9 | [
"BSD-3-Clause"
] | null | null | null | #%%
import json
import requests
from io import StringIO
import pandas as pd
# %%
with open("../db_pass", "r") as f:
token = json.load(f)['token']
# %%
data = {
'token': token,
'content': 'record',
'format': 'csv',
'type': 'flat',
'csvDelimiter': '',
'rawOrLabel': 'raw',
'rawOrLabelHeaders': 'raw',
'exportCheckboxLabel': 'false',
'exportSurveyFields': 'false',
'exportDataAccessGroups': 'false',
'returnFormat': 'csv',
'fields': 'patient_id,age,bmi,covid_test_date,date_of_test,weight,height,admission_date,final_date,death,sex'
}
r = requests.post('http://192.168.45.244/api/',data=data)
print('HTTP Status: ' + str(r.status_code))
data = StringIO(r.text)
# %%
df = pd.read_csv(data)
df = df[df["height"].apply(lambda x: not pd.isna(x))]
df = df.dropna(axis=1, how='all')
df["bmi"] = df["bmi"].apply(lambda x: round(x, 1))
df.to_csv("metadata.csv", index=False)
print(df)
# %%
| 23.4 | 113 | 0.63141 |
95c256321ed64a1e2f22ab370936dbb097ea26b8 | 2,622 | py | Python | preprocess/sequence_stats.py | ashish-roopan/fsgan | 1582e112d0f59cd32920ac5953baec783e088cad | [
"CC0-1.0"
] | 599 | 2020-04-14T19:28:58.000Z | 2022-03-26T11:29:37.000Z | preprocess/sequence_stats.py | ashish-roopan/fsgan | 1582e112d0f59cd32920ac5953baec783e088cad | [
"CC0-1.0"
] | 157 | 2020-04-14T21:13:43.000Z | 2022-02-07T06:30:16.000Z | preprocess/sequence_stats.py | ashish-roopan/fsgan | 1582e112d0f59cd32920ac5953baec783e088cad | [
"CC0-1.0"
] | 150 | 2020-04-14T20:40:41.000Z | 2022-03-30T10:50:21.000Z | """
Sequence statistics: Count, length, bounding boxes size.
"""
import os
from glob import glob
import pickle
from tqdm import tqdm
if __name__ == "__main__":
# Parse program arguments
import argparse
parser = argparse.ArgumentParser('detections2sequences')
parser.add_argument('input', metavar='DIR',
help='input directory')
parser.add_argument('-o', '--output', default=None, metavar='PATH',
help='output directory')
parser.add_argument('-p', '--postfix', metavar='POSTFIX', default='_dsfd_seq.pkl',
help='the files postfix to search the input directory for')
args = parser.parse_args()
main(args.input, args.output, args.postfix)
| 35.432432 | 114 | 0.622426 |
95c285b58cd596c463e5846360384f8f0b80a4d5 | 352 | py | Python | app/migrations/0004_auto_20200704_0405.py | duorah/GRanDpa-Family-Tree | 613df3fb61a8dd5eba7416ad6f8fda80e350bbe1 | [
"MIT"
] | 1 | 2020-07-13T21:03:17.000Z | 2020-07-13T21:03:17.000Z | app/migrations/0004_auto_20200704_0405.py | duorah/grandpa-family-tree | 613df3fb61a8dd5eba7416ad6f8fda80e350bbe1 | [
"MIT"
] | null | null | null | app/migrations/0004_auto_20200704_0405.py | duorah/grandpa-family-tree | 613df3fb61a8dd5eba7416ad6f8fda80e350bbe1 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.7 on 2020-07-04 04:05
from django.db import migrations
| 19.555556 | 54 | 0.602273 |
95c5e262b4da5f7adb2dec6d61c74e3194680b9a | 7,735 | py | Python | tests/test_dossier.py | openkamer/tk-api-python | 907b98ccc7602ad7e3e74f1e06f9544fbe66aba3 | [
"MIT"
] | 9 | 2017-11-16T12:39:11.000Z | 2021-10-16T19:30:52.000Z | tests/test_dossier.py | openkamer/tk-api-python | 907b98ccc7602ad7e3e74f1e06f9544fbe66aba3 | [
"MIT"
] | 1 | 2017-11-16T14:20:20.000Z | 2017-11-20T18:49:14.000Z | tests/test_dossier.py | openkamer/tk-api-python | 907b98ccc7602ad7e3e74f1e06f9544fbe66aba3 | [
"MIT"
] | 3 | 2018-09-10T18:57:39.000Z | 2020-06-09T14:13:10.000Z | import datetime
from tkapi.util import queries
from tkapi.zaak import Zaak, ZaakSoort
from tkapi.dossier import Dossier, DossierWetsvoorstel
from tkapi.document import Document
from .core import TKApiTestCase
| 39.065657 | 118 | 0.648869 |
95c7b536f4cc90da867d02e9f53e889cad554b21 | 27,649 | py | Python | Manuscript files/modflow_reference/auxfile_hexaplot.py | MaxRamgraber/Simple-AEM-Toolbox | 27751103f5e504dd675ba6225f2aee9f85d7c85d | [
"MIT"
] | 3 | 2021-06-16T12:27:22.000Z | 2022-01-04T11:21:35.000Z | Manuscript files/modflow_reference/auxfile_hexaplot.py | MaxRamgraber/Simple-AEM-Toolbox | 27751103f5e504dd675ba6225f2aee9f85d7c85d | [
"MIT"
] | null | null | null | Manuscript files/modflow_reference/auxfile_hexaplot.py | MaxRamgraber/Simple-AEM-Toolbox | 27751103f5e504dd675ba6225f2aee9f85d7c85d | [
"MIT"
] | 3 | 2021-06-17T11:20:20.000Z | 2022-01-12T09:56:56.000Z | """
This library contains several functions designed to help with the illustration of hexagonal grids
Functions:
plot_hexagaons : plots a specified data vector over a 2-D hexagon grid.
create_alpha_mask : creates an alpha shape (a concave hull), which is required for plotting contours; without it, the contour function extrapolates outside of the model area.
plot_scattered_contour : plots contour lines over an irregular grid, such as a hexagonal one.
plot_hexagons_3d : plots a 2-dimensional hexagon grid with specified z-dimensions
"""
def plot_hexagons (data, hexagon_grid_cores, hexagon_radius, hexagon_orientation = 0, colormap = 'steel', color = None, vmin = None, vmax = None, vincr = None, xlabel = None, ylabel = None, clabel = None, hide_colorbar = False, **kwargs):
"""
Call to plot a specified vector (positions relative to node IDs) in a hexagonal grid
@params:
data - Required : vector of values for hexagonal plot, positions corresponding to cell IDs (counting from zero)
hexagon_grid_cores - Required : tessellated polygons over area of interest
hexagon_radius - Required : radius of hexagons used for tessellation
hexagon_orientation - Optional : orientation of hexagon in clock-wise degrees [0 = flat top]
colormap - Optional : specify a colormap as string
vmin - Optional : externally specified min value for colorbar
vmax - Optional : externally specified max value for colorbar
vincr - Optional : specified value increment for colorbar
xlabel - Optional : string for xlabel
ylabel - Optional : string for ylabel
clabel - Optional : string for colorbar label
**kwargs - Optional : keyword arguments for matplotlib.patches.RegularPolygon
"""
import matplotlib
import numpy as np
import math
#--------------------------------------------------------------------------
# Prepare data for plotting
#--------------------------------------------------------------------------
# If not specified, define range of values
if vmin == None or vmax == None:
vmin = np.min(data)
vmax = np.max(data)
vrange = vmax-vmin
if vincr == None:
vincr = vrange/100
# Snap value range to integers
vmin = int(vmin/vincr)*vincr # minimum value for colorbar
vmax = (int(vmax/vincr)+1)*vincr # maximum value for colorbar
if color is None:
# Retrieve colormap
if colormap == 'steel':
# Create colormap 'steel'
from matplotlib.colors import LinearSegmentedColormap
cmap_steel = [(0.007843137,0.305882353,0.443137255), (0.301960784,0.592156863,0.784313725),(0.623529412,0.776470588,0.882352941)]
cm = LinearSegmentedColormap.from_list('steel', cmap_steel, N=100)
cmaps = cm
else:
cmaps = colormap
# Correct orientation
orientation = math.radians(-hexagon_orientation+30)
# Hexagon radius only goes to normal of sides
edgepoint_distance = hexagon_radius/np.cos(np.deg2rad(30))
# Retrieve colormap information
if color is None:
cmap = matplotlib.cm.get_cmap(cmaps)
#--------------------------------------------------------------------------
# Start plotting
#--------------------------------------------------------------------------
# Create empty figure
ax1 = matplotlib.pyplot.gca()
# Plot hexagons
for hex in range(len(hexagon_grid_cores[:,0])):
# Retrieve color value
if color is None:
rgba = cmap((data[hex]-vmin)/(vrange))
rgba = matplotlib.colors.rgb2hex(rgba)
else:
rgba = color
# Add the patch
ax1.add_patch(
matplotlib.patches.RegularPolygon(
(hexagon_grid_cores[hex,0], hexagon_grid_cores[hex,1]), # x and y
6, # edges
edgepoint_distance,
orientation=orientation,
facecolor = rgba,
**kwargs)
)
# Determine meaningful colorbar steps
if color is None:
colorbar_increment = vincr
colorbar_min = int(vmin/colorbar_increment)*colorbar_increment # minimum value for colorbar
colorbar_max = (int(vmax/colorbar_increment)+1)*colorbar_increment # maximum value for colorbar
colorbar_increment_numbers = int((colorbar_max-colorbar_min)/colorbar_increment+1)
colorbar_steps = []
for num in range(colorbar_increment_numbers):
colorbar_steps = colorbar_steps + [colorbar_min+num*colorbar_increment]
# Recompute the ax.dataLim
ax1.relim()
# Update ax.viewLim using the new dataLim
ax1.autoscale_view()
# Create colorbar
if hide_colorbar == False and color is None:
norm = matplotlib.colors.Normalize(vmin=vmin,vmax=vmax)
sm = matplotlib.pyplot.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
cbar = matplotlib.pyplot.colorbar(sm)
# Label plot
if xlabel != None:
matplotlib.pyplot.xlabel(xlabel)
if ylabel != None:
matplotlib.pyplot.ylabel(ylabel)
if clabel != None and not hide_colorbar and color is None:
cbar.set_label(clabel, rotation=270, labelpad=20)
def create_alpha_mask(points, distance_limit, resolution_x = 1000, resolution_y = 1000, visualization = True):
"""
Creates interpolation grid, then masks over the alpha shape spanned up by points and defined by distance_limit.
@params:
points - Required : points spanning up alpha shape
distance_limit - Required : distance threshold for removing Delaunay simplices
resolution_x - Optional : resolution for grid in x, default is 1000
resolution_y - Optional : resolution for grid in y, default is 1000
visualization - Optional : boolean for visualizing result, default is False
Returns:
grid_mask : An array containing 1 for cells inside, and 0 for cells outside
"""
import numpy as np
from scipy.spatial import Delaunay
from matplotlib.collections import LineCollection
import matplotlib.path as mplPath
#----------------------------------------------------------------------
# Create Grid
#----------------------------------------------------------------------
# Create meshgrid
xi = np.transpose(np.linspace(min(points[:,0]), max(points[:,0]), resolution_x))
yi = np.transpose(np.linspace(min(points[:,1]), max(points[:,1]), resolution_y))
X, Y = np.meshgrid(xi, yi)
# Reshape into vector
gridpoints_x = np.reshape(X, resolution_x*resolution_y)
gridpoints_y = np.reshape(Y, resolution_x*resolution_y)
# Combine into gridpoints array
gridpoints = np.transpose(np.asarray((gridpoints_x, gridpoints_y)))
#----------------------------------------------------------------------
# Create Alpha Shape
#----------------------------------------------------------------------
# Start Delaunay triangulation
tri = Delaunay(points)
# Auxiliary function for plotting, if required
if visualization == True:
import matplotlib.pyplot as plt
edges = set()
edge_points = []
def add_edge(i, j):
"""Add a line between the i-th and j-th points, if not in the list already"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add( (i, j) )
edge_points.append(points[ [i, j] ])
# Remove simplices outside of distance_limit
simplex_flag = np.zeros(len(tri.simplices[:,0])) # Flags bad simplices
counter = 0
for ia, ib, ic in tri.vertices:
# ia, ib, ic = indices of corner points of the triangle
if np.sqrt((points[ia,0]-points[ib,0])**2+(points[ia,1]-points[ib,1])**2) < distance_limit and \
np.sqrt((points[ia,0]-points[ic,0])**2+(points[ia,1]-points[ic,1])**2) < distance_limit and \
np.sqrt((points[ib,0]-points[ic,0])**2+(points[ib,1]-points[ic,1])**2) < distance_limit:
# do nothing
simplex_flag[counter] = 0
else:
# simplex has at least one side larger than threshold, flag it
simplex_flag[counter] = 1
counter += 1
tri.simplices = tri.simplices[simplex_flag == 0,:] # Remove bad simplices
tri.vertices = tri.vertices[simplex_flag == 0,:] # Remove bad simplices
# Visualize, if requested
if visualization == True:
# Mark all remaining simplices
for ia, ib, ic in tri.vertices:
add_edge(ia, ib)
add_edge(ib, ic)
add_edge(ic, ia)
# Draw them
lines = LineCollection(edge_points)
plt.figure()
plt.gca().add_collection(lines)
plt.plot(points[:,0], points[:,1], 'o')
#----------------------------------------------------------------------
# Mask over Alpha Shape
#----------------------------------------------------------------------
# Prepare point flag
flag_gridpoints = np.zeros(len(gridpoints[:,0]), dtype = np.int)
# Evaluate gridpoints
for sim in range(len(tri.simplices[:,0])):
# Print progress bar
cv = sim
mv = len(tri.simplices[:,0])-1
print('\r%s |%s| %s%% %s' % ('Masking: ', '\033[33m'+'' * int(50 * cv // mv) + '-' * (50 - int(50 * cv // mv))+'\033[0m', ("{0:." + str(1) + "f}").format(100 * (cv / float(mv))), ' Complete'), end = '\r')
# Create simplex path
bbPath = mplPath.Path(np.array([points[tri.simplices[sim,0],:],
points[tri.simplices[sim,1],:],
points[tri.simplices[sim,2],:],
points[tri.simplices[sim,0],:]]))
# Flag points that are inside this simplex
for gridpts in range(len(gridpoints[:,0])):
if flag_gridpoints[gridpts] == 0: # only process points not already allocated
if bbPath.contains_point((gridpoints[gridpts,0],gridpoints[gridpts,1])) == True:
flag_gridpoints[gridpts] = 1
# Plot, if required
if visualization == True:
plt.scatter(gridpoints[flag_gridpoints == 1,0], gridpoints[flag_gridpoints == 1,1],color = 'g')
plt.scatter(gridpoints[flag_gridpoints == 0,0], gridpoints[flag_gridpoints == 0,1],color = 'r')
# Reshape flag_gridpoints into a 2D array
global grid_mask
grid_mask = np.reshape(flag_gridpoints,(resolution_y,resolution_x))
# Return result
return grid_mask
def plot_scattered_contour(x, y, data, resolution_x=1000, resolution_y=1000,
grid_mask = None, vmin = None, vmax = None, vincr = None, suppress_clabel = False,
**kwargs):
"""
Call to plot contour of scattered data
@params:
x - Required : x-coordinate
y - Required : y-coordinate
data - Required : data for the contours
resolution_x - Optional : resolution of auxiliary grid in x
resolution_y - Optional : resolution of auxiliary grid in y
grid_mask - Optional : mask array of dimension [resolution_y,resolution_x]
vmin - Optional : min value for contour
vmax - Optional : max value for contour
vincr - Optional : increment for contour
suppress_clabel - Optional : Flag wether contours should be labeld, False by default
**kwargs - Optional : keyword arguments for matplotlib.patches.RegularPolygon
"""
import numpy as np
import matplotlib
import scipy
#--------------------------------------------------------------------------
# Integrity checks
#--------------------------------------------------------------------------
# Check if grid_mask matches meshgrid dimensions
if len(grid_mask) != 1:
if len(grid_mask[:,0]) != resolution_y or len(grid_mask[0,:]) != resolution_x:
raise Exception('Grid mask dimensions must match resolution in x and y!')
# Check if one of the cells has dried; this algorithm can't handle that yet
if vmin < -1000:
print('\033[31m'+'WARNING:'+'\033[0m'+' Dried cells detected. Contour not printed.')
return
# Extract vmin and vmax, if not specified
if vmin == None or vmax == None:
vmin = np.min(data)
vmax = np.max(data)
# Set vincr, if not specified
if vincr == None:
vincr = (vmax-vmin)/10
# Snap value range to integers
vmin = int(vmin/vincr)*vincr # minimum value for colorbar
vmax = (int(vmax/vincr)+1)*vincr # maximum value for colorbar
#--------------------------------------------------------------------------
# Prepare data for plotting
#--------------------------------------------------------------------------
# Convert source material into required format
source = np.transpose(np.asarray([x,y]))
# Create and convert target material
xi = np.transpose(np.linspace(min(x), max(x), resolution_x))
yi = np.transpose(np.linspace(min(y), max(y), resolution_y))
X, Y = np.meshgrid(xi, yi)
target = np.transpose(np.asarray([X,Y]))
# Interpolate and transpose
Z = scipy.interpolate.griddata(source, data, target)
Z = np.transpose(Z)
# Mask values, if grid_mask was specified
if len(grid_mask) != 1:
Z[grid_mask == 0] = float('NaN')
# Define function for masking
levels = np.arange(vmin,vmax,vincr)
#--------------------------------------------------------------------------
# Plot that shit
#--------------------------------------------------------------------------
CS = matplotlib.pyplot.contour(xi,yi,Z,levels=levels,**kwargs)
if not suppress_clabel:
matplotlib.pyplot.clabel(CS, inline=1, inline_spacing = 0)
return
def plot_hexagons_3d(grid, zdim, hexagon_radius, hexagon_orientation = 0, xlabel = 'x', ylabel = 'y', zlabel = 'z', clabel = 'depth', depth_colormap = 'steel', alpha = 1, **kwargs):
"""
Call to tessellate a given polygon with hexagons
@params:
grid - Required : x-y-coordinates of center of hexagons, array of form [nx2]
zdim - Required : bottom and top elevation of hexagon cells, array of form [nx2]
hexagon_radius - Required : radius of hexagons used for tessellation
hexagon_orientation - Required : orientation of hexagon in clock-wise degrees [0 = flat top]
xlabel - Optional : label for x-axis
ylabel - Optional : label for y-axis
zlabel - Optional : label for z-axis
clabel - Optional : label for colorbar
depth_colormap - Optional : string of colormap, if requested
alpha - Optional : alpha value for transparency of polygons, default is 1
**kwargs - Optional : keyword arguments for Poly3DCollection
"""
# PLOT 3D
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
import math
if depth_colormap == 'steel':
# Create colormap 'steel'
from matplotlib.colors import LinearSegmentedColormap
cmap_steel = [(0.007843137,0.305882353,0.443137255), (0.301960784,0.592156863,0.784313725),(0.623529412,0.776470588,0.882352941)]
cm = LinearSegmentedColormap.from_list('steel', cmap_steel, N=100)
cmaps = cm
else:
cmaps = depth_colormap
# Initialize figure
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# Hexagon radius only goes to normal of sides
edgepoint_distance = hexagon_radius/np.cos(np.deg2rad(30))
# Determine depth range, if colorbar is requested
vmin = np.min(zdim[:,1]-zdim[:,0])
vmax = np.max(zdim[:,1]-zdim[:,0])
c_range = vmax-vmin
# Plot hexagons
for hex in range(len(grid[:,0])):
# Reset coordinate variables
x = []
y = []
# Read top and bottom elevation
zbot = zdim[hex,0]
ztop = zdim[hex,1]
# Pre-allocate memory for coordinate matrix
Z = np.zeros((12,3))
# Determine cell color, if requested
if depth_colormap != 'None':
import matplotlib
# Retrieve colormap information
cmap = matplotlib.cm.get_cmap(cmaps)
rgba = cmap((ztop-zbot-vmin)/c_range) #cmap((zbot-vmin)/(vmax-vmin))
rgba = list(rgba)
rgba[3] = alpha
# rgba = matplotlib.colors.rgb2hex(rgba)
# Plot grid
counter = 0
for angle in range(0-hexagon_orientation, 420-hexagon_orientation, 60):
# Coordinates of edge point
x = np.append(x,grid[hex,0]+math.cos(math.radians(angle)) * edgepoint_distance)
y = np.append(y,grid[hex,1]+math.sin(math.radians(angle)) * edgepoint_distance)
# Write into coordinate matrix
if counter < 6:
Z[counter,0] = grid[hex,0]+math.cos(math.radians(angle)) * edgepoint_distance
Z[counter,1] = grid[hex,1]+math.sin(math.radians(angle)) * edgepoint_distance
Z[counter,2] = ztop
Z[6+counter,0] = grid[hex,0]+math.cos(math.radians(angle)) * edgepoint_distance
Z[6+counter,1] = grid[hex,1]+math.sin(math.radians(angle)) * edgepoint_distance
Z[6+counter,2] = zbot
counter += 1
# Vertices of hexagon sides
verts = [[Z[0],Z[1],Z[7],Z[6]],
[Z[1],Z[2],Z[8],Z[7]],
[Z[2],Z[3],Z[9],Z[8]],
[Z[3],Z[4],Z[10],Z[9]],
[Z[4],Z[5],Z[11],Z[10]],
[Z[5],Z[0],Z[6],Z[11]]]
if depth_colormap != 'None':
# Plot hexagon side
face = Poly3DCollection(verts,
**kwargs)
face.set_facecolor(rgba)
ax.add_collection3d(face)
else:
face = Poly3DCollection(verts,
**kwargs)
face.set_facecolor(rgba)
ax.add_collection3d(face)
# Vertices of hexagon top
verts = [[Z[0],Z[1],Z[2],Z[3],Z[4],Z[5]]]
# Plot hexagon top
if depth_colormap != 'None':
# Plot hexagon side
face = Poly3DCollection(verts,
**kwargs)
face.set_facecolor(rgba)
ax.add_collection3d(face)
else:
face = Poly3DCollection(verts,
**kwargs)
face.set_facecolor(rgba)
ax.add_collection3d(face)
# Vertices of hexagon bot
verts = [[Z[6],Z[7],Z[8],Z[9],Z[10],Z[11]]]
# Plot hexagon bot
if depth_colormap != 'None':
# Plot hexagon side
face = Poly3DCollection(verts,
**kwargs)
face.set_facecolor(rgba)
ax.add_collection3d(face)
else:
face = Poly3DCollection(verts,
**kwargs)
face.set_facecolor(rgba)
ax.add_collection3d(face)
# Determine meaningful colorbar steps, if colorbar was requested
if depth_colormap != 'None':
colorbar_increment = 0.1
colorbar_min = int(vmin/colorbar_increment)*colorbar_increment # minimum value for colorbar
colorbar_max = (int(vmax/colorbar_increment)+1)*colorbar_increment # maximum value for colorbar
colorbar_increment_numbers = int((colorbar_max-colorbar_min)/colorbar_increment+1)
colorbar_steps = []
for num in range(colorbar_increment_numbers):
colorbar_steps = colorbar_steps + [colorbar_min+num*colorbar_increment]
# Create colorbar
norm = matplotlib.colors.Normalize(vmin=vmin,vmax=vmax)
sm = matplotlib.pyplot.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
cbar = matplotlib.pyplot.colorbar(sm)
cbar.set_label(clabel, rotation=270, labelpad=20)
# Label axes
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
# Equal aspect scaling doesn't work yet, manual workaround
# Designate array of edges
xyzlims = np.zeros((3,2))
xyzlims[0,0] = np.min(grid[:,0])
xyzlims[0,1] = np.max(grid[:,0])
xyzlims[1,0] = np.min(grid[:,1])
xyzlims[1,1] = np.max(grid[:,1])
xyzlims[2,0] = np.min(zdim)
xyzlims[2,1] = np.max(zdim)
# Determine maximal range
maxrange = np.max([xyzlims[0,1]-xyzlims[0,0],xyzlims[1,1]-xyzlims[1,0],xyzlims[2,1]-xyzlims[2,0]])
# Determine difference to maximal range
xdif = maxrange - (xyzlims[0,1]-xyzlims[0,0])
ydif = maxrange - (xyzlims[1,1]-xyzlims[1,0])
zdif = maxrange - (xyzlims[2,1]-xyzlims[2,0])
# Set axis limits -> equal aspect
ax.set_xlim3d(xyzlims[0,0]-xdif/2,xyzlims[0,1]+xdif/2)
ax.set_ylim3d(xyzlims[1,0]-ydif/2,xyzlims[1,1]+ydif/2)
ax.set_zlim3d(xyzlims[2,0]-zdif/2,xyzlims[2,1]+zdif/2)
# Show result
plt.show()
def vulture_plot(incr = 1, elev = 40., fps = 50):
"""
Creates a short animated .gif providing a flight around the 3-D model, requiring an open, compatible 3D figure
@params:
incr - Optional : degree increment for rotation frames; defines temporal resolution of .gif (default = 1)
elev - Optional : elevation angle for camera (default = 40)
fps - Optional : frames per second for resulting .gif; defines speed of .gif display (default 50)
"""
# Import libraries
import imageio
import os
import matplotlib.pyplot as plt
# Retrieve axis
ax = plt.gca()
# Rotate, save and compile vulture plot
images = []
for cv in range(0,360,incr):
# Rotate image
ax.view_init(elev=40., azim=cv)
plt.show()
# Save it as temporary file
plt.savefig("dummy.png")
# Append it to saved movie
images.append(imageio.imread("dummy.png"))
# Remove temporary file
os.remove("dummy.png")
# Print progress bar
mv = 359 # max value
print('\r%s |%s| %s%% %s' % ('Printing: ', '\033[33m'+'' * int(50 * cv // mv) + '-' * (50 - int(50 * cv // mv))+'\033[0m', ("{0:." + str(1) + "f}").format(100 * (cv / float(mv))), ' Complete'), end = '\r')
# Compile .gif
imageio.mimsave('output_quick.gif', images,fps=fps)
def visualize_genealogy(genealogy,weights = None, rejuvenation = None,colormap = 'jet'):
"""
Creates an inline figure visualizing the particle genealogy over one resampling step.
@params:
genealogy - Required : vector describing genealogy of resampled particles, referring to indices
weights - Optional : weight of particles prior to resampling
rejuvenation - Optional : vector of booleans describing whether particles were rejuvenated
colormap - Optional : colormap string for visualization
"""
import numpy as np
from IPython import get_ipython
import matplotlib
import matplotlib.pyplot as plt
# Determine number of particles
n_particles = len(genealogy)
# Assign optional variables, if not provided
if weights is None == True:
weights = np.ones(n_particles)
# if rejuvenation is None == True:
# rejuvenation = np.ones((n_particles),dtype = np.bool)
# Switch to inline printing
get_ipython().run_line_magic('matplotlib', 'inline')
# Create dummy features for the legend
full_line = plt.Line2D([], [], color='black',label='inherited')
dashed_line = plt.Line2D([], [], linestyle = '--', color='black',label='rejuvenated')
particle = plt.Line2D([], [], linestyle = 'None', marker ='.', color='black',label='particle')
# Plot legend
plt.legend(handles=[dashed_line,full_line,particle],bbox_to_anchor=(0., -0.05, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
# Determine colormap for particles
cmap = matplotlib.cm.get_cmap(colormap)
# Extract particle colors
rgba = [None] * n_particles
for n in range(n_particles):
rgba[n] = matplotlib.colors.rgb2hex(cmap(n/(n_particles-1)))
# Create plot
for n in range(n_particles):
plt.plot([genealogy[n],n],[1,2],'--',c=rgba[genealogy[n]])
# Draw genealogy of current particle
# if rejuvenation[n] == False:
# plt.plot([genealogy[n],n],[1,2],c=rgba[genealogy[n]])
# else:
# plt.plot([genealogy[n],n],[1,2],c='w')
# plt.plot([genealogy[n],n],[1,2],'--',c=rgba[genealogy[n]])
# Scatter previous and current particle index
if weights[n] == 0: # Particle weight is zero - print as greyscale
plt.scatter(n,1,s = weights[n]/np.max(weights)*55+5,c='xkcd:medium grey')
else:
plt.scatter(n,1,s = weights[n]/np.max(weights)*55+5,c=rgba[n])
plt.scatter(n,2,s=20,c=rgba[n])
# Deactivate axes
plt.axis('off')
# Show, and revert to automatic printing
plt.show()
get_ipython().run_line_magic('matplotlib', 'qt5') | 42.66821 | 240 | 0.537054 |
95c8f1ad4e81caf4b83710c865b7efb620f7466e | 58,889 | py | Python | tests/python/self_concepts_test.py | JulianAL-01/self-concepts | d4a5ebfdadc472535777349602c775a67aaa3823 | [
"MIT"
] | 14 | 2020-07-21T21:09:25.000Z | 2022-01-30T11:00:35.000Z | tests/python/self_concepts_test.py | JulianAL-01/self-concepts | d4a5ebfdadc472535777349602c775a67aaa3823 | [
"MIT"
] | 2 | 2020-07-28T14:46:11.000Z | 2020-07-28T14:52:23.000Z | tests/python/self_concepts_test.py | JulianAL-01/self-concepts | d4a5ebfdadc472535777349602c775a67aaa3823 | [
"MIT"
] | 5 | 2020-07-28T13:50:20.000Z | 2021-07-12T22:56:11.000Z | '''
self_concepts_test
This module serves as the unit test for self_concepts
'''
import argparse, sys
sys.path.append('../../source/python')
from self_concepts import Concept
from self_concepts import Property
from self_concepts import Relationship
from self_concepts import Ontology
from self_concepts import Blackboard
from self_concepts import Agent
from self_concepts import SelfException
# Helper functions in support of concise and verbose reporting
def parseArguments():
'''Collect and return the test's arguments.'''
parser = argparse.ArgumentParser(description='Test ')
parser.add_argument('-c',
'--concise',
action='store_true',
help='test self_concept with concise results')
return parser.parse_args()
def reportHeader(message):
'''Print a report header.'''
if arguments.concise != True:
print(message)
else:
print('#', end='')
def reportSection(message):
'''Print a section header.'''
if arguments.concise != True:
print(' ' + message)
else:
print('*', end='')
def reportDetail(message):
'''Print a report detail.'''
if arguments.concise != True:
print(' ' + message)
else:
print('.', end='')
def reportDetailFailure(message):
'''Print a report failure.'''
if arguments.concise != True:
print('!!!!!!! ' + message)
else:
print('!')
exit()
def reportConceptName(concept: 'Concept'):
'''Print the name of the concept.'''
reportDetail(' Function applied to ' + concept.__class__.__name__ + ' (' + concept.name + ')')
# Various functions, classes, and instances used for testing
CONCEPT_NAME_1 = 'A well-formed concept'
CONCEPT_NAME_2 = 'A well-formed concept'
CONCEPT_NAME_3 = 'Another well-formed concept'
CONCEPT_NAME_4 = 'A well-formed concept'
c1 = Concept(CONCEPT_NAME_1)
c2 = Concept(CONCEPT_NAME_2)
c3 = AnotherConcept(CONCEPT_NAME_3)
c4 = Concept(CONCEPT_NAME_4)
PROPERTY_NAME_1 = 'A well-formed property'
PROPERTY_NAME_2 = 'A well-formed property'
PROPERTY_NAME_3 = 'Another well-formed property'
PROPERTY_NAME_4 = 'A well-formed property'
PROPERTY_VALUE_1 = 42
PROPERTY_VALUE_2 = 'A value'
PROPERTY_VALUE_3 = c1
PROPERTY_VALUE_4 = 'A value'
p1 = Property(PROPERTY_NAME_1, PROPERTY_VALUE_1)
p2 = Property(PROPERTY_NAME_2, PROPERTY_VALUE_2)
p3 = AnotherProperty(PROPERTY_NAME_3, PROPERTY_VALUE_3)
p4 = Property(PROPERTY_NAME_4, PROPERTY_VALUE_4)
RELATIONSHIP_NAME_1 = 'A well-formed relationship'
RELATIONSHIP_NAME_2 = 'A well-formed relationship'
RELATIONSHIP_NAME_3 = 'Another well-formed relationship'
RELATIONSHIP_NAME_4 = 'A well-formed relationship'
r1 = Relationship(RELATIONSHIP_NAME_1, c1, c2)
r2 = Relationship(RELATIONSHIP_NAME_2, c2, c3)
r3 = AnotherRelationship(RELATIONSHIP_NAME_3, c3, c1)
r4 = Relationship(RELATIONSHIP_NAME_4, c1, c4)
ONTOLOGY_NAME_1 = 'A well-formed ontology'
o1 = Ontology(ONTOLOGY_NAME_1)
BLACKBOARD_NAME_1 = 'A well-formed blackboard'
b1 = Blackboard(BLACKBOARD_NAME_1)
AGENT_NAME_1 = 'A well-formed agent'
AGENT_NAME_2 = 'Another well-formed agent'
AGENT_NAME_3 = 'Yet another well-formed agent'
a1 = AnotherAgent(AGENT_NAME_1)
a2 = AnotherAgent(AGENT_NAME_2)
a3 = AnotherAgent(AGENT_NAME_3)
# Concept unit test
# Property unit test
# Relationship unit test
# Ontology unit test
# Blackboard unit test
# Agent unit test
# Test all of Self's foundational classes
arguments = parseArguments()
testConcept()
testProperty()
testRelationship()
testOntology()
testBlackboard()
testAgent()
# Clean up the output stream if reporting concisely
if arguments.concise == True:
print()
| 40.252221 | 120 | 0.69558 |
95c9bf8a576fcba5f592caf1b205652fbf6c6df7 | 1,042 | py | Python | 100-200q/123.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 990 | 2018-06-05T11:49:22.000Z | 2022-03-31T08:59:17.000Z | 100-200q/123.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 1 | 2021-11-01T01:29:38.000Z | 2021-11-01T01:29:38.000Z | 100-200q/123.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 482 | 2018-06-12T22:16:53.000Z | 2022-03-29T00:23:29.000Z | '''
Say you have an array for which the ith element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most two transactions.
Note: You may not engage in multiple transactions at the same time (i.e., you must sell the stock before you buy again).
Example 1:
Input: [3,3,5,0,0,3,1,4]
Output: 6
Explanation: Buy on day 4 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
Then buy on day 7 (price = 1) and sell on day 8 (price = 4), profit = 4-1 = 3.
'''
| 30.647059 | 121 | 0.579655 |
95ca4ff47bbf69d356929cfddbfe83070e5ea793 | 2,077 | py | Python | lambdas/verify_admin.py | charvi-a/320-S20-Track1 | ac97504fc1fdedb1c311773b015570eeea8a8663 | [
"BSD-3-Clause"
] | 9 | 2019-12-30T16:32:22.000Z | 2020-03-03T20:14:47.000Z | lambdas/verify_admin.py | charvi-a/320-S20-Track1 | ac97504fc1fdedb1c311773b015570eeea8a8663 | [
"BSD-3-Clause"
] | 283 | 2020-02-03T15:16:03.000Z | 2020-05-05T03:18:59.000Z | lambdas/verify_admin.py | charvi-a/320-S20-Track1 | ac97504fc1fdedb1c311773b015570eeea8a8663 | [
"BSD-3-Clause"
] | 3 | 2020-04-16T15:23:29.000Z | 2020-05-12T00:38:41.000Z | import json
from package.query_db import query
from package.dictionary_to_list import dictionary_to_list
from package.lambda_exception import LambdaException
from boto3 import client as boto3_client
| 42.387755 | 148 | 0.639384 |
95cadfb3b8d6c3a18abd5334655fd77acc7c9759 | 4,821 | py | Python | run.py | Galaxy-SynBioCAD/rp2paths | f87ea0f64556be44af1ae717cd4246159253d029 | [
"MIT"
] | null | null | null | run.py | Galaxy-SynBioCAD/rp2paths | f87ea0f64556be44af1ae717cd4246159253d029 | [
"MIT"
] | null | null | null | run.py | Galaxy-SynBioCAD/rp2paths | f87ea0f64556be44af1ae717cd4246159253d029 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Created on September 21 2019
@author: Melchior du Lac
@description: Wrap rp2paths into a docker
"""
import argparse
import tempfile
import os
import logging
import shutil
import docker
import glob
def main(rp_pathways, rp2paths_pathways, rp2paths_compounds, timeout=30, max_steps=0, max_paths=150, unfold_compounds=False):
"""Call the docker to run rp2paths
:param rp_pathways: The path to the results RetroPath2.0 scope file
:param rp2paths_pathways: The path to the results rp2paths out_paths file
:param rp2paths_compounds: The path to the results rp2paths compounds file
:param timeout: The timeout of the function in minutes (Default: 90)
:param max_steps: The maximal number of steps WARNING: not used (Default: 0, ie. infinite)
:param max_paths: The maximal number of pathways to return WARNING: not used (Default: 150)
:param unfold_compounds: not sure WARNING: not used (Default: False)
:param rp_pathways: str
:param rp2paths_pathways: str
:param rp2paths_compounds: str
:param timeout: int
:param max_steps: int
:param max_paths: int
:param unfold_compounds: bool
:rtype: None
:return: None
"""
docker_client = docker.from_env()
image_str = 'brsynth/rp2paths-standalone'
try:
image = docker_client.images.get(image_str)
except docker.errors.ImageNotFound:
logging.warning('Could not find the image, trying to pull it')
try:
docker_client.images.pull(image_str)
image = docker_client.images.get(image_str)
except docker.errors.ImageNotFound:
logging.error('Cannot pull image: '+str(image_str))
exit(1)
with tempfile.TemporaryDirectory() as tmpOutputFolder:
if os.path.exists(rp_pathways):
shutil.copy(rp_pathways, tmpOutputFolder+'/rp_pathways.csv')
command = ['python',
'/home/tool_rp2paths.py',
'-rp_pathways',
'/home/tmp_output/rp_pathways.csv',
'-rp2paths_compounds',
'/home/tmp_output/rp2paths_compounds.csv',
'-rp2paths_pathways',
'/home/tmp_output/rp2paths_pathways.csv',
'-timeout',
str(timeout),
'-max_steps',
str(max_steps),
'-max_paths',
str(max_paths),
'-unfold_compounds',
str(unfold_compounds)]
container = docker_client.containers.run(image_str,
command,
detach=True,
stderr=True,
volumes={tmpOutputFolder+'/': {'bind': '/home/tmp_output', 'mode': 'rw'}})
container.wait()
err = container.logs(stdout=False, stderr=True)
err_str = err.decode('utf-8')
if 'ERROR' in err_str:
print(err_str)
elif 'WARNING' in err_str:
print(err_str)
if not os.path.exists(tmpOutputFolder+'/rp2paths_compounds.csv') or not os.path.exists(tmpOutputFolder+'/rp2paths_pathways.csv'):
print('ERROR: Cannot find the output file: '+str(tmpOutputFolder+'/rp2paths_compounds.csv'))
print('ERROR: Cannot find the output file: '+str(tmpOutputFolder+'/rp2paths_pathways.csv'))
else:
shutil.copy(tmpOutputFolder+'/rp2paths_pathways.csv', rp2paths_pathways)
shutil.copy(tmpOutputFolder+'/rp2paths_compounds.csv', rp2paths_compounds)
container.remove()
else:
logging.error('Cannot find one or more of the input files: '+str(rp_pathways))
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser('Enumerate the individual pathways from the results of Retropath2')
parser.add_argument('-rp_pathways', type=str)
parser.add_argument('-rp2paths_pathways', type=str)
parser.add_argument('-rp2paths_compounds', type=str)
parser.add_argument('-max_steps', type=int, default=0)
parser.add_argument('-timeout', type=int, default=30)
parser.add_argument('-max_paths', type=int, default=150)
parser.add_argument('-unfold_compounds', type=str, default='False')
params = parser.parse_args()
if params.timeout<0:
logging.error('Timeout cannot be <0 :'+str(params.timeout))
exit(1)
main(params.rp_pathways, params.rp2paths_pathways, params.rp2paths_compounds, params.timeout, params.max_steps, params.max_paths, params.unfold_compounds)
| 43.827273 | 158 | 0.611077 |
95cae2c1de14d040a592e9ed57f23f978ae86e71 | 150 | py | Python | test_cases/conftest.py | majdukovic/pybooker | b9a373d556be0481c93a528f731407ca7a47b11f | [
"MIT"
] | null | null | null | test_cases/conftest.py | majdukovic/pybooker | b9a373d556be0481c93a528f731407ca7a47b11f | [
"MIT"
] | null | null | null | test_cases/conftest.py | majdukovic/pybooker | b9a373d556be0481c93a528f731407ca7a47b11f | [
"MIT"
] | null | null | null | import pytest
from framework.services.booker_client import BookerClient
booker_client = BookerClient()
| 15 | 57 | 0.786667 |
95cb8a34cde724ada03c12bdaeb21669317ed997 | 402 | py | Python | verilator/scripts/concat_up5k.py | micro-FPGA/engine-V | 00a8f924e10fc69874d9c179f788bf037fe9c407 | [
"Apache-2.0"
] | 44 | 2018-11-19T16:49:10.000Z | 2021-12-05T10:16:24.000Z | verilator/scripts/concat_up5k.py | micro-FPGA/engine-V | 00a8f924e10fc69874d9c179f788bf037fe9c407 | [
"Apache-2.0"
] | null | null | null | verilator/scripts/concat_up5k.py | micro-FPGA/engine-V | 00a8f924e10fc69874d9c179f788bf037fe9c407 | [
"Apache-2.0"
] | 5 | 2018-12-05T23:43:21.000Z | 2020-09-03T04:36:34.000Z |
spiFile = open('spiflash.bin','wb')
# 128KB is reserved for bitstream
bitFile = open('../bitstream/mf8a18_rv32i.bin','rb')
bitData = bitFile.read(0x20000)
riscvFile = open('riscv.bin','rb')
riscvData = riscvFile.read(32768)
spiFile.write(bitData)
spiFile.seek(0x20000)
spiFile.write(riscvData)
nullData = bytearray([0])
spiFile.seek(0x27fff)
spiFile.write(nullData)
spiFile.close
bitFile.close
| 17.478261 | 52 | 0.748756 |
95cda288d497faae566e114db4bdc1e1b83b2b52 | 753 | py | Python | pyvista_gui/options.py | akaszynski/pyvista-gui | 4ed7e3a52026dfeab4e82a300b92a92f43060dda | [
"MIT"
] | 6 | 2019-11-20T20:08:42.000Z | 2022-02-24T12:24:20.000Z | pyvista_gui/options.py | akaszynski/pyvista-gui | 4ed7e3a52026dfeab4e82a300b92a92f43060dda | [
"MIT"
] | 6 | 2020-01-27T16:15:11.000Z | 2021-04-12T11:42:11.000Z | pyvista_gui/options.py | akaszynski/pyvista-gui | 4ed7e3a52026dfeab4e82a300b92a92f43060dda | [
"MIT"
] | null | null | null | """Options for saving user prefences, etc.
"""
import json
import os
import pyvista
# The options
rcParams = RcParams(
dark_mode=False,
)
# Load user prefences from last session if none exist, save defaults
try:
rcParams.load()
except:
rcParams.save()
| 19.815789 | 68 | 0.629482 |
95cdaf4dfa1b6e4f1d482661c80dff3aa859d8b1 | 11,978 | py | Python | validatearcgisenterprisedeployment.py | pheede/ArcGIS-Server-Stuff | 9b491d2f4edebec3f613182981f4e50dcc7641a3 | [
"Apache-2.0"
] | 6 | 2017-05-31T10:44:09.000Z | 2020-12-18T18:12:15.000Z | validatearcgisenterprisedeployment.py | pheede/ArcGIS-Server-Stuff | 9b491d2f4edebec3f613182981f4e50dcc7641a3 | [
"Apache-2.0"
] | 1 | 2021-09-30T21:20:59.000Z | 2021-09-30T23:55:48.000Z | validatearcgisenterprisedeployment.py | pheede/ArcGIS-Server-Stuff | 9b491d2f4edebec3f613182981f4e50dcc7641a3 | [
"Apache-2.0"
] | 2 | 2017-12-28T19:30:23.000Z | 2019-10-04T20:34:27.000Z | """This script validates an ArcGIS Enterprise deployment to ensure it is
configured properly with all the required components such as Portal for ArcGIS,
ArcGIS Server, ArcGIS Data Store and the associated configuration.
Designed for ArcGIS Enterprise 10.5 and higher."""
# Author: Philip Heede <pheede@esri.com>
# Last modified: 2017-02-18
import os
import sys
import ssl
import socket
import urllib.request
import getopt
import getpass
import json
import traceback
if not sys.version_info >= (3, 4):
print('This script requires Python 3.4 or higher: found Python %s.%s' % sys.version_info[:2])
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
| 46.972549 | 153 | 0.655034 |
95ce4cab43e2034234aed87a60cc3f00447f9524 | 4,445 | py | Python | 2020/aoc/__init__.py | amochtar/adventofcode | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | [
"MIT"
] | 1 | 2019-12-27T22:36:30.000Z | 2019-12-27T22:36:30.000Z | 2020/aoc/__init__.py | amochtar/adventofcode | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | [
"MIT"
] | null | null | null | 2020/aoc/__init__.py | amochtar/adventofcode | 292e7f00a1e19d2149d00246b0a77fedfcd3bd08 | [
"MIT"
] | null | null | null | import itertools
import re
import math
from typing import List, Tuple
def ints(text: str) -> Tuple[int, ...]:
"Return a tuple of all ints in a string"
return tuple(map(int, re.findall(r'-?\b\d+\b', text)))
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s)+1))
def manhattan(p: Tuple[int, ...], q=itertools.repeat(0)) -> Tuple[int, ...]:
"Return the manhattan distance between 2 (multi-dimensional) points"
return sum([abs(a-b) for a, b in zip(p, q)])
def king_distance(p: Tuple[int, ...], q=itertools.repeat(0)) -> Tuple[int, ...]:
"Return thenNumber of chess King moves between two points"
return max(abs(a - b) for a, b in zip(p, q))
def neighbors4(p: Tuple[int, int]) -> List[Tuple[int, int]]:
"Return the 4 neighboring cells for a given position"
x, y = p
return [
(x, y-1),
(x, y+1),
(x-1, y),
(x+1, y)
]
def neighbors8(p: Tuple[int, int]) -> List[Tuple[int, int]]:
"Return the 8 neighboring cells for a given position"
x, y = p
return [
(x-1, y-1),
(x, y-1),
(x+1, y-1),
(x-1, y),
(x+1, y),
(x-1, y+1),
(x, y+1),
(x+1, y+1)
]
def neighbors_cube(p: Tuple[int, int, int]) -> List[Tuple[int, int, int]]:
"Return the 26 neighboring cells for a given position in a 3d cube"
x, y, z = p
n = []
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
if (i, j, k) != (0, 0, 0):
n.append((x+i, y+j, z+k))
return n
def neighbors_cube4(p: Tuple[int, int, int, int]) -> List[Tuple[int, int, int, int]]:
"Return the 80 neighboring cells for a given position in a 4-d cube"
x, y, z, w = p
n = []
for i in range(-1, 2):
for j in range(-1, 2):
for k in range(-1, 2):
for l in range(-1, 2):
if (i, j, k, l) != (0, 0, 0, 0):
n.append((x+i, y+j, z+k, w+l))
return n
moves = {
'n': lambda p: (p[0], p[1]-1),
's': lambda p: (p[0], p[1]+1),
'e': lambda p: (p[0]+1, p[1]),
'w': lambda p: (p[0]-1, p[1]),
}
left_turn = {
'n': 'w',
's': 'e',
'e': 'n',
'w': 's',
}
right_turn = {
'n': 'e',
's': 'w',
'e': 's',
'w': 'n',
}
opposite = {
'n': 's',
's': 'n',
'e': 'w',
'w': 'e',
}
facing_dir = {
'n': (0, -1),
's': (0, 1),
'e': (1, 0),
'w': (-1, 0),
}
origin = (0, 0)
hex_origin = (0, 0, 0)
hex_moves = {
'ne': lambda p: (p[0]+1, p[1], p[2]-1),
'nw': lambda p: (p[0], p[1]+1, p[2]-1),
'se': lambda p: (p[0], p[1]-1, p[2]+1),
'sw': lambda p: (p[0]-1, p[1], p[2]+1),
'w': lambda p: (p[0]-1, p[1]+1, p[2]),
'e': lambda p: (p[0]+1, p[1]-1, p[2]),
}
def add_pos(a: Tuple[int, int], b: Tuple[int, int], factor: int = 1) -> Tuple[int, int]:
"Adds two position tuples"
return (a[0]+b[0]*factor, a[1]+b[1]*factor)
def sub_pos(a: Tuple[int, int], b: Tuple[int, int]) -> Tuple[int, int]:
"Subtracts the position tuple b from a"
return (a[0]-b[0], a[1]-b[1])
def mult_pos(a: Tuple[int, int], factor: int) -> Tuple[int, int]:
"Multiplies a position tuple with a given factor"
return (a[0]*factor, a[1]*factor)
def rot_left(pos: Tuple[int, int], rel: Tuple[int, int] = origin) -> Tuple[int, int]:
"Rotates a position 90 degrees left (counter clock-wise) relative to the given location (default origin)"
rel_pos = sub_pos(pos, rel)
new_pos = (rel_pos[1], -rel_pos[0])
return add_pos(new_pos, rel)
def rot_right(pos: Tuple[int, int], rel: Tuple[int, int] = origin) -> Tuple[int, int]:
"Rotates a position 90 degrees right (clock-wise) relative to the given location (default origin)"
rel_pos = sub_pos(pos, rel)
new_pos = (-rel_pos[1], rel_pos[0])
return add_pos(new_pos, rel)
def min_max(lst: List[Tuple[int, ...]]) -> Tuple[int, ...]:
"Returns the min and max values for every index in the given list of tuples"
return tuple((min(e), max(e)) for e in zip(*lst))
def mod1(a: int, b: int) -> int:
"Returns 1-based modulo"
return 1 + (a-1) % b
| 26.939394 | 109 | 0.526659 |
95ce971f5a305cd3a19578c204fef92020757f3c | 4,431 | py | Python | pi_source_code.py | cjkuhlmann/CCHack2019 | fb6eb505ac350c2dda0c36e1f33254fbeef049bf | [
"MIT"
] | null | null | null | pi_source_code.py | cjkuhlmann/CCHack2019 | fb6eb505ac350c2dda0c36e1f33254fbeef049bf | [
"MIT"
] | null | null | null | pi_source_code.py | cjkuhlmann/CCHack2019 | fb6eb505ac350c2dda0c36e1f33254fbeef049bf | [
"MIT"
] | null | null | null | import math
import time
from max30105 import MAX30105, HeartRate
import smbus
from bme280 import BME280
import socket
#from matplotlib import pyplot as plt
dev = Device()
dev.setup_sensors()
dev.setup_network()
for i in range(2):
dev.update()
while True:
try:
dev.update()
dev.upload_data()
print("sending_data")
except:
dev.setup_network()
| 28.403846 | 83 | 0.558565 |
95cead6bce011703374b48a18d5379f241d0c282 | 1,417 | py | Python | butter/mas/clients/client_factory.py | bennymeg/Butter.MAS.PythonAPI | 9641293436d989ae9c5324c2b8129f232822b248 | [
"Apache-2.0"
] | 2 | 2019-08-22T08:57:42.000Z | 2019-11-28T14:01:49.000Z | butter/mas/clients/client_factory.py | bennymeg/Butter.MAS.PythonAPI | 9641293436d989ae9c5324c2b8129f232822b248 | [
"Apache-2.0"
] | null | null | null | butter/mas/clients/client_factory.py | bennymeg/Butter.MAS.PythonAPI | 9641293436d989ae9c5324c2b8129f232822b248 | [
"Apache-2.0"
] | null | null | null | from .client_http import HttpClient
from .client_tcp import TcpClient
from .client_udp import UdpClient
from .client import Client
| 30.148936 | 81 | 0.56669 |
95ceaebae16674be2fef2960c47326152d1eb461 | 1,569 | py | Python | scrapytest/spiders/ScrapyDemo5.py | liang1024/Scrapy | bfa7ea5b2174bf91c49f4da9dadc5471acc43092 | [
"Apache-2.0"
] | null | null | null | scrapytest/spiders/ScrapyDemo5.py | liang1024/Scrapy | bfa7ea5b2174bf91c49f4da9dadc5471acc43092 | [
"Apache-2.0"
] | null | null | null | scrapytest/spiders/ScrapyDemo5.py | liang1024/Scrapy | bfa7ea5b2174bf91c49f4da9dadc5471acc43092 | [
"Apache-2.0"
] | null | null | null | import scrapy
'''
<ul class="pager">
<li class="next">
<a href="/page/2/">Next <span aria-hidden="true">→</span></a>
</li>
</ul>
shell
>>> response.css('li.next a').extract_first()
'<a href="/page/2/">Next <span aria-hidden="true"></span></a>'
hrefScrapyCSS
>>> response.css('li.next a::attr(href)').extract_first()
'/page/2/'
'''
import scrapy
'''
parse()urljoin()URL
ScrapyScrapy
'''
'''
scrapy crawl demo5
''' | 24.138462 | 116 | 0.66348 |
95cf45edd5e367889b2e72c5aaae8636bfca5ddc | 909 | py | Python | tests/test_objectives.py | theislab/AutoGeneS | 22bde0d5eba013e90edb85341e0bd9c28b82e7fd | [
"MIT"
] | 46 | 2020-02-25T14:09:21.000Z | 2022-01-20T16:42:40.000Z | tests/test_objectives.py | theislab/AutoGeneS | 22bde0d5eba013e90edb85341e0bd9c28b82e7fd | [
"MIT"
] | 16 | 2020-03-18T15:08:42.000Z | 2022-01-29T20:00:10.000Z | tests/test_objectives.py | theislab/AutoGeneS | 22bde0d5eba013e90edb85341e0bd9c28b82e7fd | [
"MIT"
] | 6 | 2020-02-13T14:23:46.000Z | 2021-12-28T16:50:50.000Z | import pytest
import numpy as np
import pandas as pd
from scipy.special import binom
import os
import sys
sys.path.insert(0, "..")
from autogenes import objectives as ga_objectives
| 23.307692 | 84 | 0.630363 |
95cf9c3a1a9e3db6fb75803b4f3891c4c503d528 | 15,563 | py | Python | digits/model/forms.py | Linda-liugongzi/DIGITS-digits-py3 | 6df5eb6972574a628b9544934518ec8dfa9c7439 | [
"BSD-3-Clause"
] | null | null | null | digits/model/forms.py | Linda-liugongzi/DIGITS-digits-py3 | 6df5eb6972574a628b9544934518ec8dfa9c7439 | [
"BSD-3-Clause"
] | null | null | null | digits/model/forms.py | Linda-liugongzi/DIGITS-digits-py3 | 6df5eb6972574a628b9544934518ec8dfa9c7439 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2014-2017, NVIDIA CORPORATION. All rights reserved.
import os
import flask
from flask_wtf import FlaskForm
import wtforms
from wtforms import validators
from digits.config import config_value
from digits.device_query import get_device, get_nvml_info
from digits import utils
from digits.utils import sizeof_fmt
from digits.utils.forms import validate_required_iff
from digits import frameworks
from flask_babel import lazy_gettext as _
| 36.791962 | 122 | 0.556512 |
95d02019dda244ece2c09a15f8673c55536ad4de | 1,155 | py | Python | 004 Sons/afinacao.py | yamadathamine/300ideiasparaprogramarPython | 331a063bbf8bcd117ae5a34324b8176a6014fc98 | [
"MIT"
] | null | null | null | 004 Sons/afinacao.py | yamadathamine/300ideiasparaprogramarPython | 331a063bbf8bcd117ae5a34324b8176a6014fc98 | [
"MIT"
] | 4 | 2020-06-09T19:10:04.000Z | 2020-06-17T18:23:47.000Z | 004 Sons/afinacao.py | yamadathamine/300ideiasparaprogramarPython | 331a063bbf8bcd117ae5a34324b8176a6014fc98 | [
"MIT"
] | null | null | null | # encoding: utf-8
# usando python 3
# Afinao - Alberto toca violo e programador.
# Precisando afinar o violo e sem diapaso por perto,
# resolveu fazer um programa para ajud-lo.
# O que ele queria era a nota L soando sem parar at que ele conseguisse afinar a
# respectiva corda do violo; as demais cordas ele poderia afinar com base na primeira.
# Escreva um programa que faz soar no alto-falante do computador a nota L (440 Hz)
# e s para quando for pressionada alguma tecla.
import numpy as np
import simpleaudio as sa
frequency = 440 # Our played note will be 440 Hz
fs = 44100 # 44100 samples per second
seconds = 3 # Note duration of 3 seconds
# Generate array with seconds*sample_rate steps, ranging between 0 and seconds
t = np.linspace(0, seconds, seconds * fs, False)
# Generate a 440 Hz sine wave
note = np.sin(frequency * t * 2 * np.pi)
# Ensure that highest value is in 16-bit range
audio = note * (2**15 - 1) / np.max(np.abs(note))
# Convert to 16-bit data
audio = audio.astype(np.int16)
# Start playback
play_obj = sa.play_buffer(audio, 1, 2, fs)
# Wait for playback to finish before exiting
play_obj.wait_done() | 35 | 88 | 0.735931 |
95d0529ff78fe4e15217221008da8dabb874d847 | 138 | py | Python | python/flask-app/data.py | zkan/100DaysOfCode | 3c713ead94a9928e2d0f8d794e49ec202dc64ba3 | [
"MIT"
] | 2 | 2019-05-01T00:32:30.000Z | 2019-11-20T05:23:05.000Z | python/flask-app/data.py | zkan/100DaysOfCode | 3c713ead94a9928e2d0f8d794e49ec202dc64ba3 | [
"MIT"
] | 15 | 2020-09-05T18:35:04.000Z | 2022-03-11T23:44:47.000Z | python/flask-app/data.py | zkan/100DaysOfCode | 3c713ead94a9928e2d0f8d794e49ec202dc64ba3 | [
"MIT"
] | null | null | null | fav_beer = {'Julian': 'White Rabbit Dark Ale',
'Bob': 'Some sort of light beer I assume',
'Mike': 'Oregano Beer'}
| 34.5 | 54 | 0.550725 |
95d185b829b29c3736cdbb9908672dc12ffef154 | 548 | py | Python | appengine/chrome_infra_packages/apps.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | 1 | 2018-01-02T05:47:07.000Z | 2018-01-02T05:47:07.000Z | appengine/chrome_infra_packages/apps.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | appengine/chrome_infra_packages/apps.py | mithro/chromium-infra | d27ac0b230bedae4bc968515b02927cf9e17c2b7 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Actual WSGI app instantiations used from app.yaml.
Extracted to a separate module to avoid calling 'initialize' in unit tests
during module loading time.
"""
import gae_ts_mon
import main
endpoints_app, frontend_app, backend_app = main.initialize()
gae_ts_mon.initialize()
gae_ts_mon.instrument_wsgi_application(frontend_app)
gae_ts_mon.instrument_wsgi_application(backend_app)
| 28.842105 | 74 | 0.810219 |
95d4bf219897990197feea13feb7cf1258d214c8 | 6,298 | py | Python | yadlt/core/layers.py | Perfect-SoftwareEngineer/Deep-Learning-Tensorflow | b191cd2c8ff9d8cb6e2c6dedcac4483fa7548366 | [
"MIT"
] | null | null | null | yadlt/core/layers.py | Perfect-SoftwareEngineer/Deep-Learning-Tensorflow | b191cd2c8ff9d8cb6e2c6dedcac4483fa7548366 | [
"MIT"
] | null | null | null | yadlt/core/layers.py | Perfect-SoftwareEngineer/Deep-Learning-Tensorflow | b191cd2c8ff9d8cb6e2c6dedcac4483fa7548366 | [
"MIT"
] | null | null | null | """Layer classes."""
from __future__ import absolute_import
import abc
import six
import tensorflow as tf
| 28.497738 | 77 | 0.563671 |
95d7f54672f221417081565b033268249f18412b | 835 | py | Python | tests/test_modules/test_builtin/test_grouppart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | tests/test_modules/test_builtin/test_grouppart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | tests/test_modules/test_builtin/test_grouppart.py | MattTaylorDLS/pymalcolm | 995a8e4729bd745f8f617969111cc5a34ce1ac14 | [
"Apache-2.0"
] | null | null | null | import unittest
from malcolm.core import call_with_params
from malcolm.modules.builtin.parts import GroupPart
| 32.115385 | 70 | 0.653892 |
95d8eae1e421c5a5d85e31ca5953813a5295d371 | 512 | py | Python | ok2_backend/common/utils.py | Mipsters/ok2-backend | 50ddbb44262749d731f4e923add205541254223d | [
"MIT"
] | 1 | 2020-02-10T17:53:58.000Z | 2020-02-10T17:53:58.000Z | ok2_backend/common/utils.py | Mipsters/ok2-backend | 50ddbb44262749d731f4e923add205541254223d | [
"MIT"
] | 6 | 2020-01-06T19:37:12.000Z | 2021-09-22T18:03:31.000Z | ok2_backend/common/utils.py | Mipsters/ok2-backend | 50ddbb44262749d731f4e923add205541254223d | [
"MIT"
] | 5 | 2019-11-18T17:39:29.000Z | 2020-07-31T16:00:21.000Z | import os
from jose import jwt
from datetime import datetime, timedelta
JWT_SECRET = 'secret'
JWT_ALGORITHM = 'HS256'
JWT_EXP_DELTA_SECONDS = 31556952 # year
| 23.272727 | 85 | 0.722656 |
95da8c78112cb6f44e754d89ffd5c8e26c67e104 | 1,238 | py | Python | backend/ai4all_api/models.py | kevromster/ai4all | 39da1a95c4e06780f5712bb6e6ecb1f570e5d639 | [
"Apache-2.0"
] | null | null | null | backend/ai4all_api/models.py | kevromster/ai4all | 39da1a95c4e06780f5712bb6e6ecb1f570e5d639 | [
"Apache-2.0"
] | null | null | null | backend/ai4all_api/models.py | kevromster/ai4all | 39da1a95c4e06780f5712bb6e6ecb1f570e5d639 | [
"Apache-2.0"
] | null | null | null | import os
from django.db import models
from ai4all_api.detection_items import DETECTION_ITEMS
from ai4all_api.notification_types import NOTIFICATION_TYPES
| 35.371429 | 86 | 0.757674 |