hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
08804de9d3324b167c6447b69cc226552d4b7bbe
| 282
|
py
|
Python
|
Mundo-1/exercicio-05.py
|
FRafaelPA/Praticando-Python
|
d8a46beceeae2ac20acf4c63f86a32cba537c896
|
[
"MIT"
] | null | null | null |
Mundo-1/exercicio-05.py
|
FRafaelPA/Praticando-Python
|
d8a46beceeae2ac20acf4c63f86a32cba537c896
|
[
"MIT"
] | null | null | null |
Mundo-1/exercicio-05.py
|
FRafaelPA/Praticando-Python
|
d8a46beceeae2ac20acf4c63f86a32cba537c896
|
[
"MIT"
] | null | null | null |
'''
Faa um programa que leia um nmero inteiro e mostre na tela o seu sucessor e seu antecessor.
'''
n = int(input('Entre com um valor: '))
antecessor = n - 1
sucessor = n + 1
msg = 'o antecessor do nmero {} {} e seu sucessor {}'.format(n, antecessor, sucessor)
print(msg)
| 23.5
| 93
| 0.673759
|
0880a4f7dffdc5894d94be459ed45b4d22287a7c
| 3,505
|
py
|
Python
|
tests/sql_parser/ast/test_insert_statement_is_parsed.py
|
vladbalmos/mitzasql
|
06c2a96eb4494095b2b72bc1454199a4940b0700
|
[
"MIT"
] | 69
|
2019-05-16T06:40:18.000Z
|
2022-03-24T06:23:49.000Z
|
tests/sql_parser/ast/test_insert_statement_is_parsed.py
|
vladbalmos/mitzasql
|
06c2a96eb4494095b2b72bc1454199a4940b0700
|
[
"MIT"
] | 36
|
2019-05-15T19:55:24.000Z
|
2021-07-22T07:07:14.000Z
|
tests/sql_parser/ast/test_insert_statement_is_parsed.py
|
vladbalmos/mitzasql
|
06c2a96eb4494095b2b72bc1454199a4940b0700
|
[
"MIT"
] | 8
|
2019-05-16T06:56:28.000Z
|
2022-02-11T02:24:12.000Z
|
import pytest
from mitzasql.sql_parser.parser import parse
from mitzasql.utils import dfs
| 26.353383
| 77
| 0.650499
|
088195a1ba4520a7f098f9cb7902e66481e7e187
| 21
|
py
|
Python
|
checkov/version.py
|
pmalkki/checkov
|
b6cdf386dd976fe27c16fed6d550756a678a5d7b
|
[
"Apache-2.0"
] | null | null | null |
checkov/version.py
|
pmalkki/checkov
|
b6cdf386dd976fe27c16fed6d550756a678a5d7b
|
[
"Apache-2.0"
] | null | null | null |
checkov/version.py
|
pmalkki/checkov
|
b6cdf386dd976fe27c16fed6d550756a678a5d7b
|
[
"Apache-2.0"
] | null | null | null |
version = '2.0.1048'
| 10.5
| 20
| 0.619048
|
0881a63860af8a2c6c4f14401c7170b38015ba3a
| 775
|
py
|
Python
|
ArraysAndSorting/MakeItAnagram.py
|
tejasnikumbh/Algorithms
|
2a2983a522be295ce95bd970a0ee8a617866992f
|
[
"BSD-2-Clause"
] | 8
|
2015-04-16T03:43:49.000Z
|
2018-08-14T22:47:03.000Z
|
ArraysAndSorting/MakeItAnagram.py
|
tejasnikumbh/Algorithms
|
2a2983a522be295ce95bd970a0ee8a617866992f
|
[
"BSD-2-Clause"
] | null | null | null |
ArraysAndSorting/MakeItAnagram.py
|
tejasnikumbh/Algorithms
|
2a2983a522be295ce95bd970a0ee8a617866992f
|
[
"BSD-2-Clause"
] | 7
|
2016-03-22T20:29:27.000Z
|
2018-09-29T18:55:47.000Z
|
# Importing standard libraires
import sys
'''
Main Function for the program. Logic is as follows
Make two frequency tables for two strings
Take overlap of both and add up the non overlapping regions (absolute values)
'''
if __name__ == "__main__":
# Parsing in the input
s1 = list(sys.stdin.readline().rstrip())
s2 = list(sys.stdin.readline().rstrip())
# Initialize the character array as a hashtable
charFreqs1 = [0]*26
charFreqs2 = [0]*26
anagram = [0]*26
# Record frequencies of characters in s1 and s2
for i in s1:
charFreqs1[ord(i) - ord('a')] += 1
for i in s2:
charFreqs2[ord(i) - ord('a')] += 1
for i in range(26):
anagram[i] = abs(charFreqs1[i] - charFreqs2[i])
print sum(anagram)
| 31
| 81
| 0.636129
|
08826307649d95fd1f1fa357479507c8385245c7
| 1,181
|
py
|
Python
|
tests/test_get_schedule.py
|
j-muller/pypuregym
|
396862047f8b5c0b1138b5c562ddb6958aaa6817
|
[
"MIT"
] | 1
|
2020-12-31T01:42:14.000Z
|
2020-12-31T01:42:14.000Z
|
tests/test_get_schedule.py
|
j-muller/pypuregym
|
396862047f8b5c0b1138b5c562ddb6958aaa6817
|
[
"MIT"
] | null | null | null |
tests/test_get_schedule.py
|
j-muller/pypuregym
|
396862047f8b5c0b1138b5c562ddb6958aaa6817
|
[
"MIT"
] | null | null | null |
from .utilities import Response
SCHEDULE_RESPONSE = b"""
{"error":{"code":200,"message":"Success"},"data":{"classes":[{
"id":113209,"sector":"F","class_type_id":48,"start_date":"2020-06-07",
"end_date":"2020-06-07","start_time":"09:00:00","end_time":"09:45:00",
"duration":"2700000","teacher_id":782,"location_id":10,"level_id":9,
"pillar_id":6,"button_status":0,"booking_id":0,
"start_datetime":"2020-06-07T09:00:00+08:00","is_free":false,
"color_code":"","is_filmed":false,"is_online":0,"is_cycling":false,
"free_class_type":0,"special_flag":null,"duration_min":45,
"class_type":{"id":48,"name":"TRX Blast",
"description":"","is_fuze":false,"pillar":{"name":"Strength",
"color":"#ed1c24","code":"strength_and_conditioning"},"level":"All Levels"},
"teacher":{"id":782,"name":"","full_name":"","image_link":"",
"type":"teacher"}}]}}
"""
| 36.90625
| 76
| 0.663844
|
0883af2fe80ecab9fbfc1b7be524e037979d920a
| 518
|
py
|
Python
|
testing/examples/talib-macd.py
|
pchaos/quanttesting
|
98331670547e8a45ba93b49f3e9c660495645114
|
[
"MIT"
] | 5
|
2020-04-08T14:14:05.000Z
|
2021-06-29T03:42:01.000Z
|
testing/examples/talib-macd.py
|
pchaos/quanttesting
|
98331670547e8a45ba93b49f3e9c660495645114
|
[
"MIT"
] | null | null | null |
testing/examples/talib-macd.py
|
pchaos/quanttesting
|
98331670547e8a45ba93b49f3e9c660495645114
|
[
"MIT"
] | 7
|
2020-04-15T15:07:39.000Z
|
2022-03-23T05:44:02.000Z
|
'''
Ta-libMACD
'''
import pandas as pd
import numpy as np
import talib as ta
import tushare as ts
from matplotlib import rc
import matplotlib.pyplot as plt
import seaborn as sns
rc('mathtext', default='regular')
sns.set_style('white')
# %matplotlib
plt.rcParams["figure.figsize"] = (20, 10)
dw = ts.get_k_data("600600")
close = dw.close.values
dw['macd'], dw['macdsignal'], dw['macdhist'] = ta.MACD(close, fastperiod=12, slowperiod=26, signalperiod=9)
dw[['close','macd','macdsignal','macdhist']].plot()
plt.show()
| 24.666667
| 107
| 0.722008
|
0883de75e3222b2bd0245697a6613a014446c4c7
| 252
|
py
|
Python
|
packages/regression_model/regression_model/__init__.py
|
abdurrehman11/deploying-machine-learning-models
|
93872e4c197df2543e492af3df718bdad1817752
|
[
"BSD-3-Clause"
] | null | null | null |
packages/regression_model/regression_model/__init__.py
|
abdurrehman11/deploying-machine-learning-models
|
93872e4c197df2543e492af3df718bdad1817752
|
[
"BSD-3-Clause"
] | null | null | null |
packages/regression_model/regression_model/__init__.py
|
abdurrehman11/deploying-machine-learning-models
|
93872e4c197df2543e492af3df718bdad1817752
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
from regression_model.config import config
from regression_model.config import logging_config
VERSION_PATH = config.PACKAGE_ROOT / 'VERSION'
with open(VERSION_PATH, 'r') as version_file:
__version__ = version_file.read().strip()
| 25.2
| 50
| 0.797619
|
0886c3adb37d4bb2d284b34954bef308daf23bd3
| 508
|
py
|
Python
|
001-Python-basico/008-desafio-pratico.py
|
clebertonf/Python-course
|
a57f405cbd27f96e0cb61128df31e9249c79a962
|
[
"MIT"
] | null | null | null |
001-Python-basico/008-desafio-pratico.py
|
clebertonf/Python-course
|
a57f405cbd27f96e0cb61128df31e9249c79a962
|
[
"MIT"
] | null | null | null |
001-Python-basico/008-desafio-pratico.py
|
clebertonf/Python-course
|
a57f405cbd27f96e0cb61128df31e9249c79a962
|
[
"MIT"
] | null | null | null |
from datetime import date
year_current_date = date.today().year
get_info("Cleberton", 28, 1.69, 75)
# Funo recebe algumas informaoes por parametro, e retorna ano de nascimento, imc
# com algumas frases customizadas
| 28.222222
| 83
| 0.687008
|
0887199a887a1fbf59285a7c42522a561d36fdf6
| 160
|
py
|
Python
|
jsons.py
|
tebeka/py2go-cheatsheet
|
14c83850876ef80c36af326ab4fc6f56344781c7
|
[
"BSD-3-Clause"
] | 13
|
2017-09-09T08:32:34.000Z
|
2022-02-28T04:32:43.000Z
|
jsons.py
|
tebeka/py2go-cheatsheet
|
14c83850876ef80c36af326ab4fc6f56344781c7
|
[
"BSD-3-Clause"
] | 3
|
2017-11-25T18:48:11.000Z
|
2017-12-30T13:00:04.000Z
|
jsons.py
|
tebeka/py2go-cheatsheet
|
14c83850876ef80c36af326ab4fc6f56344781c7
|
[
"BSD-3-Clause"
] | 2
|
2019-11-03T19:58:17.000Z
|
2020-04-28T01:14:17.000Z
|
import json
from sys import stdout
# START
data = '''{
"name": "bugs",
"age": 76
}'''
obj = json.loads(data)
json.dump(obj, stdout)
# END
print(obj)
| 10.666667
| 22
| 0.59375
|
08873554c1a8d8174ca6425485bfe2a0d0880e6a
| 2,306
|
py
|
Python
|
tests/components/speedtestdotnet/test_init.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 1
|
2021-07-08T20:09:55.000Z
|
2021-07-08T20:09:55.000Z
|
tests/components/speedtestdotnet/test_init.py
|
pcaston/core
|
e74d946cef7a9d4e232ae9e0ba150d18018cfe33
|
[
"Apache-2.0"
] | 47
|
2021-02-21T23:43:07.000Z
|
2022-03-31T06:07:10.000Z
|
tests/components/speedtestdotnet/test_init.py
|
OpenPeerPower/core
|
f673dfac9f2d0c48fa30af37b0a99df9dd6640ee
|
[
"Apache-2.0"
] | null | null | null |
"""Tests for SpeedTest integration."""
from unittest.mock import patch
import speedtest
from openpeerpower import config_entries
from openpeerpower.components import speedtestdotnet
from openpeerpower.setup import async_setup_component
from tests.common import MockConfigEntry
| 28.825
| 82
| 0.717259
|
088834b65e8fc3335e7c944aeb1e307017ece6c9
| 1,258
|
py
|
Python
|
opetuskoodi/2021_10_18/2_kerta_kertaus.py
|
mikkokotola/pythonkoodaus
|
5415b3d87dfcb65b72edb916967824304d155d9a
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
opetuskoodi/2021_10_18/2_kerta_kertaus.py
|
mikkokotola/pythonkoodaus
|
5415b3d87dfcb65b72edb916967824304d155d9a
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
opetuskoodi/2021_10_18/2_kerta_kertaus.py
|
mikkokotola/pythonkoodaus
|
5415b3d87dfcb65b72edb916967824304d155d9a
|
[
"CC-BY-4.0",
"MIT"
] | null | null | null |
# Kertaus, kerta 3
# Muuttujat ja sytteen lukeminen kyttjlt
nimi = input("Anna nimesi: ")
kengnnumero = input("Mik on kengnnumerosi: ")
print("Moi vaan, " + nimi + "! Kengnnumerosi on " + kengnnumero + ".")
# F-merkkijono
print(f"Moi vaan, {nimi}! Kengnnumerosi on {kengnnumero}.")
# Numerot
# Iklaskuri
syntymvuosi = input("Mik on syntymvuotesi? ")
syntymvuosi = int(syntymvuosi) # Muunnetaan merkkijono kokonaisluvuksi, jotta voimme laskea sill
ik = 2021 - syntymvuosi
print(f"Iksi vuoden 2021 lopussa on {ik}")
# Laskin, joka osaa kertoa lukuja
luku1 = int(input("Anna luku: "))
luku2 = int(input("Anna toinen luku: "))
tulos = luku1 * luku2
print(f"{luku1} * {luku2} = {tulos}")
# Laskin, joka laskee kolmen luvun summan
summa = 0
luku = int(input("Ensimminen luku: "))
summa = summa + luku
luku = int(input("Toinen luku: "))
summa = summa + luku
luku = int(input("kolmas luku: "))
summa = summa + luku
print(f"Lukujen summa: {summa}")
# Minklaisia laskuja voi laskea
print(5+2)
print(5-2)
print(5*2)
print(5/2)
print(5//2)
print(5%2)
print(2 + 2 * 3)
print((2 + 2) * 3)
# Liukuluvut = desimaaliluvut
luku1 = 4.0
luku2 = 1.5
tulos = luku1 - luku2
print(f"Tulos on {tulos}")
print(f"{luku1} - {luku2} = {tulos}")
| 21.689655
| 99
| 0.683625
|
0888b580bb9eb1968da656fe5efb329d6602a748
| 616
|
py
|
Python
|
case/xpath.py
|
xierensong/learnPython
|
33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455
|
[
"MIT"
] | null | null | null |
case/xpath.py
|
xierensong/learnPython
|
33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455
|
[
"MIT"
] | null | null | null |
case/xpath.py
|
xierensong/learnPython
|
33f9891d8a8ed39772ff9bcbeb1e5cff6f3b5455
|
[
"MIT"
] | 1
|
2018-10-11T08:20:44.000Z
|
2018-10-11T08:20:44.000Z
|
import requests
from lxml import etree
if __name__ == '__main__':
headers = {"User-Agent":'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
url = 'https://www.apache.org/dist/ant/'
sourceHTML = requests.get(url, headers = headers)
selector = etree.HTML(sourceHTML.text)
folder_list = selector.xpath('//pre[position()=1]/a[@href]')
for elmt in folder_list:
#
href_TT = elmt.get('href')
print('href_TT ', href_TT)
if href_TT[len(href_TT)-1] == '/':
print('folder_list', elmt.attrib)
| 41.066667
| 144
| 0.63474
|
088c4b3ec59271d9af6031e07d4cb3e300f061c4
| 62
|
py
|
Python
|
hermes1d/__init__.py
|
certik/hermes1d-llnl
|
8e3b76fd3022af90e5c4c3923337a422d79604d5
|
[
"BSD-3-Clause"
] | 1
|
2016-08-18T23:21:55.000Z
|
2016-08-18T23:21:55.000Z
|
hermes1d/__init__.py
|
certik/hermes1d-llnl
|
8e3b76fd3022af90e5c4c3923337a422d79604d5
|
[
"BSD-3-Clause"
] | null | null | null |
hermes1d/__init__.py
|
certik/hermes1d-llnl
|
8e3b76fd3022af90e5c4c3923337a422d79604d5
|
[
"BSD-3-Clause"
] | null | null | null |
from h1d_wrapper.h1d_wrapper import Element, Mesh, Linearizer
| 31
| 61
| 0.854839
|
088ddea79b72540b919336ee600c90b0505ded86
| 5,132
|
py
|
Python
|
jelm/tests/unit/test_jelm_class.py
|
endremborza/jelm
|
6916bbd4ceb909ad3350c56d3a149bdb97671489
|
[
"MIT"
] | null | null | null |
jelm/tests/unit/test_jelm_class.py
|
endremborza/jelm
|
6916bbd4ceb909ad3350c56d3a149bdb97671489
|
[
"MIT"
] | null | null | null |
jelm/tests/unit/test_jelm_class.py
|
endremborza/jelm
|
6916bbd4ceb909ad3350c56d3a149bdb97671489
|
[
"MIT"
] | null | null | null |
import pytest
from jelm import Jelm, Node, Edge
from jelm.tests.network_case_set_class import NetwokCaseTemplate
| 25.034146
| 77
| 0.639517
|
088f0e150b58a95dbcc3bacf169c6bdc57e4eedc
| 6,582
|
py
|
Python
|
trajectory_prediction/evaluation.py
|
libai2019/dataset-api
|
2f793821864f32bd210c17060a70682488bb74e0
|
[
"Apache-2.0"
] | 385
|
2018-07-02T22:21:25.000Z
|
2022-03-28T13:12:47.000Z
|
trajectory_prediction/evaluation.py
|
libai2019/dataset-api
|
2f793821864f32bd210c17060a70682488bb74e0
|
[
"Apache-2.0"
] | 102
|
2018-08-01T10:40:40.000Z
|
2022-03-16T10:32:44.000Z
|
trajectory_prediction/evaluation.py
|
libai2019/dataset-api
|
2f793821864f32bd210c17060a70682488bb74e0
|
[
"Apache-2.0"
] | 98
|
2018-07-12T18:36:42.000Z
|
2022-03-20T04:38:03.000Z
|
'''
Evaluation code for trajectory prediction.
We record the objects in the last frame of every sequence in test dataset as considered objects, which is stored in considered_objects.txt.
We compare the error between your predicted locations in the next 3s(six positions) and the ground truth for these considered objects.
To run this script, make sure that your results are in required format.
'''
import os
import argparse
import numpy as np
if __name__ == '__main__':
main()
| 36.364641
| 139
| 0.628836
|
0891695cf058c07ea805662895cf40325fd7ce37
| 2,561
|
py
|
Python
|
shellfoundry/commands/install_command.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | null | null | null |
shellfoundry/commands/install_command.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:21:02.000Z
|
2021-03-25T23:21:02.000Z
|
shellfoundry/commands/install_command.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | null | null | null |
# !/usr/bin/python
# -*- coding: utf-8 -*-
import click
import os
try:
# Python 2.x version
from urllib2 import HTTPError, URLError
except:
# Python 3.x version
from urllib.error import HTTPError, URLError
from shellfoundry.exceptions import FatalError
from shellfoundry.utilities.config_reader import Configuration, CloudShellConfigReader
from shellfoundry.utilities.installer import ShellInstaller
from shellfoundry.utilities.shell_config_reader import ShellConfigReader
from shellfoundry.utilities.shell_package import ShellPackage
from shellfoundry.utilities.shell_package_installer import ShellPackageInstaller
| 43.40678
| 119
| 0.705193
|
0893025cc2e6d02ad0cc2a38ee4b17db36c8a68d
| 9,064
|
py
|
Python
|
sam_actions/scripts/gps_fix_server.py
|
Jollerprutt/sam_common
|
dd8b43b3c69eee76fe0c35a98db9dfb67f2b79f2
|
[
"BSD-3-Clause"
] | 1
|
2020-06-09T18:23:53.000Z
|
2020-06-09T18:23:53.000Z
|
sam_actions/scripts/gps_fix_server.py
|
Jollerprutt/sam_common
|
dd8b43b3c69eee76fe0c35a98db9dfb67f2b79f2
|
[
"BSD-3-Clause"
] | 3
|
2020-10-06T09:46:03.000Z
|
2021-03-10T13:40:44.000Z
|
sam_actions/scripts/gps_fix_server.py
|
Jollerprutt/sam_common
|
dd8b43b3c69eee76fe0c35a98db9dfb67f2b79f2
|
[
"BSD-3-Clause"
] | 5
|
2020-01-20T18:33:55.000Z
|
2020-12-29T12:34:22.000Z
|
#!/usr/bin/python
import rospy
from rospy import ROSException
from std_msgs.msg import Header, Bool
from std_srvs.srv import SetBool
from geometry_msgs.msg import PoseWithCovarianceStamped, Point, Quaternion
from sensor_msgs.msg import NavSatFix, NavSatStatus
from sam_msgs.msg import GetGPSFixAction, GetGPSFixFeedback, GetGPSFixResult
from sam_msgs.msg import PercentStamped
import actionlib
import tf_conversions
import tf
from tf.transformations import quaternion_from_euler, quaternion_multiply
from geodesy import utm
import math
import numpy as np
if __name__ == "__main__":
rospy.init_node('gps_fix_server', anonymous=False) #True)
check_server = GPSFixServer(rospy.get_name())
rospy.spin()
| 42.553991
| 143
| 0.645521
|
08967bfbf25d6987de9933fc65d4f932dbcd6e60
| 1,307
|
py
|
Python
|
src/model/RoleProxy.py
|
JulienGrv/puremvc-python-demo-PySide-employeeadmin
|
b076493ac34254e665b485259b0a7122fa9cfde4
|
[
"BSD-3-Clause"
] | 4
|
2017-08-26T10:18:10.000Z
|
2020-07-28T19:50:54.000Z
|
src/model/RoleProxy.py
|
JulienGrv/puremvc-python-demo-PySide-employeeadmin
|
b076493ac34254e665b485259b0a7122fa9cfde4
|
[
"BSD-3-Clause"
] | null | null | null |
src/model/RoleProxy.py
|
JulienGrv/puremvc-python-demo-PySide-employeeadmin
|
b076493ac34254e665b485259b0a7122fa9cfde4
|
[
"BSD-3-Clause"
] | 3
|
2020-09-22T12:17:14.000Z
|
2021-07-16T12:28:18.000Z
|
# -*- coding: utf-8 -*-
from puremvc.patterns.proxy import Proxy
from .. import ApplicationFacade
| 27.808511
| 72
| 0.61974
|
0896a00f400830a8eb41593559f65d607a6a09c6
| 1,358
|
py
|
Python
|
flappy-brird/utils/radio.py
|
victorathanasio/Personal-projects
|
94c870179cec32aa733a612a6faeb047df16d977
|
[
"MIT"
] | null | null | null |
flappy-brird/utils/radio.py
|
victorathanasio/Personal-projects
|
94c870179cec32aa733a612a6faeb047df16d977
|
[
"MIT"
] | null | null | null |
flappy-brird/utils/radio.py
|
victorathanasio/Personal-projects
|
94c870179cec32aa733a612a6faeb047df16d977
|
[
"MIT"
] | null | null | null |
import pygame
import os
| 25.148148
| 77
| 0.611193
|
0896e29401ea1989cb26ef01107f5729035c11a7
| 4,405
|
py
|
Python
|
app/__main__.py
|
pablohawz/tfg-Scan-Paint-clone
|
056cd50d9e4274620cf085a41ed9d326e16dd47b
|
[
"MIT"
] | null | null | null |
app/__main__.py
|
pablohawz/tfg-Scan-Paint-clone
|
056cd50d9e4274620cf085a41ed9d326e16dd47b
|
[
"MIT"
] | null | null | null |
app/__main__.py
|
pablohawz/tfg-Scan-Paint-clone
|
056cd50d9e4274620cf085a41ed9d326e16dd47b
|
[
"MIT"
] | null | null | null |
# This Python file uses the following encoding: utf-8
from app.package.views.Calibrate_view import CalibrateView
from app.package.controllers.Calibrate_controller import CalibrateController
from app.package.models.Calibrate_model import CalibrateModel
import sys
import matplotlib
from PySide2.QtWidgets import QApplication
from PySide2 import QtCore
from .package.models.NewProjectModel import NewProjectModel
from .package.models.DataAcquisitionModel import DataAcquisitionModel
from .package.models.DisplayResultsModel import DisplayResultsModel
from .package.controllers.Navigator import Navigator
from .package.controllers.NewProjectController import NewProjectController
from .package.controllers.DataAcquisitionController import (
DataAcquisitionController)
from .package.controllers.DisplayResultsController import (
DisplayResultsController)
from .package.views.MainWindow import MainWindow
from .package.views.NewProjectView import NewProjectView
from .package.views.DataAcquisitionView import DataAcquisitionView
from .package.views.DisplayResultsView import DisplayResultsView
sys._excepthook = sys.excepthook
sys.excepthook = exception_hook
matplotlib.use('tkagg')
if __name__ == "__main__":
main()
# if __name__ == "__main__":
# import cProfile
# cProfile.run('main()', 'output.dat')
# import pstats
# from pstats import SortKey
# with open("output_time.dat", "w") as f:
# p = pstats.Stats("output.dat", stream=f)
# p.sort_stats("time").print_stats()
# with open("output_calls.dat", "w") as f:
# p = pstats.Stats("output.dat", stream=f)
# p.sort_stats("calls").print_stats()
| 33.120301
| 76
| 0.711691
|
0897118d6f2834e2b8c74ba12247412406dbd2c7
| 557
|
py
|
Python
|
Trakttv.bundle/Contents/Libraries/Shared/playhouse/berkeleydb.py
|
disrupted/Trakttv.bundle
|
24712216c71f3b22fd58cb5dd89dad5bb798ed60
|
[
"RSA-MD"
] | 1,346
|
2015-01-01T14:52:24.000Z
|
2022-03-28T12:50:48.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/playhouse/berkeleydb.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 474
|
2015-01-01T10:27:46.000Z
|
2022-03-21T12:26:16.000Z
|
Trakttv.bundle/Contents/Libraries/Shared/playhouse/berkeleydb.py
|
alcroito/Plex-Trakt-Scrobbler
|
4f83fb0860dcb91f860d7c11bc7df568913c82a6
|
[
"RSA-MD"
] | 191
|
2015-01-02T18:27:22.000Z
|
2022-03-29T10:49:48.000Z
|
import datetime
import decimal
from playhouse.sqlite_ext import *
# Peewee assumes that the `pysqlite2` module was compiled against the
# BerkeleyDB SQLite libraries.
from pysqlite2 import dbapi2 as berkeleydb
berkeleydb.register_adapter(decimal.Decimal, str)
berkeleydb.register_adapter(datetime.date, str)
berkeleydb.register_adapter(datetime.time, str)
| 27.85
| 69
| 0.779174
|
089a04fda175104b7a74e5689381760d2e0c8310
| 1,513
|
py
|
Python
|
PyEEA/analysis/SimulationAnalysisEngine.py
|
ThomasJFR/PyEEA
|
7927ee5ff1de8d3cf9e9654899ea4c2c0284519c
|
[
"MIT"
] | 1
|
2020-06-15T03:16:06.000Z
|
2020-06-15T03:16:06.000Z
|
PyEEA/analysis/SimulationAnalysisEngine.py
|
ThomasJFR/PyEEA
|
7927ee5ff1de8d3cf9e9654899ea4c2c0284519c
|
[
"MIT"
] | 1
|
2020-06-19T04:56:21.000Z
|
2020-06-19T04:56:21.000Z
|
PyEEA/analysis/SimulationAnalysisEngine.py
|
ThomasJFR/PyEEA
|
7927ee5ff1de8d3cf9e9654899ea4c2c0284519c
|
[
"MIT"
] | null | null | null |
from numpy.random import standard_normal
from numbers import Number
def simulation_analysis(project, sim_dict, iterations=250, valuator=None):
"""
Purpose:
Analyses the effects of uncertainty of a system by performing a Monte Carlo simulation.
Args:
project: An instance of Project to perform the simulation on
sim_dict: A dict where the key is the name of the cashflow to simulate and the value
is either a number defining the standard deviation for the cashflow as a percentage, or a
function defining some way to modify the cashflow by an amount
"""
# Make every sim_fun value a callable, converting numbers to stdev functions
for key in sim_dict:
if isinstance(sim_dict[key], Number):
stdev = sim_dict[key]
sim_dict[key] = std_dist
valuator = valuator or project.npw
if not callable(valuator):
return TypeError("Valuator must be a callable construct!")
# Perform the simulation
valuations = []
for _ in range(iterations):
with project as p:
for key in sim_dict:
sim_fun = sim_dict[key]
n_cashflows = len(p[key])
for n in range(n_cashflows):
cf = p[key][n]
cf.amount += sim_fun(cf.amount)
valuations.append(valuator())
return valuations
| 35.186047
| 110
| 0.61996
|
089bce5935bf86ce3e21aaf4f16ec72196bcf521
| 433
|
py
|
Python
|
bot/handlers/common.py
|
slawiko/remindmelater_bot
|
76e46f7a42ee2eb02d0b06eea1eb2b8c6f76cb9e
|
[
"Apache-2.0"
] | null | null | null |
bot/handlers/common.py
|
slawiko/remindmelater_bot
|
76e46f7a42ee2eb02d0b06eea1eb2b8c6f76cb9e
|
[
"Apache-2.0"
] | 2
|
2017-06-11T11:07:30.000Z
|
2017-06-24T05:37:34.000Z
|
bot/handlers/common.py
|
slawiko/remindmelater_bot
|
76e46f7a42ee2eb02d0b06eea1eb2b8c6f76cb9e
|
[
"Apache-2.0"
] | null | null | null |
import logging
from telegram.ext import CommandHandler
logger = logging.getLogger(__name__)
| 18.041667
| 69
| 0.699769
|
089c7e0aea70b01bb6426d16f39e585a80906239
| 307
|
py
|
Python
|
src/papierstat/datasets/data_helper.py
|
sdpython/papierstat
|
f69de884c59ada30b58224dca39f2a44d92122c1
|
[
"MIT"
] | 7
|
2019-03-21T09:52:31.000Z
|
2021-01-17T16:56:27.000Z
|
src/papierstat/datasets/data_helper.py
|
sdpython/papierstat
|
f69de884c59ada30b58224dca39f2a44d92122c1
|
[
"MIT"
] | 33
|
2018-02-08T23:56:57.000Z
|
2021-02-10T23:55:43.000Z
|
src/papierstat/datasets/data_helper.py
|
sdpython/papierstat
|
f69de884c59ada30b58224dca39f2a44d92122c1
|
[
"MIT"
] | 1
|
2021-02-11T09:16:33.000Z
|
2021-02-11T09:16:33.000Z
|
# -*- coding: utf-8 -*-
"""
@file
@brief Fonctions retournant des jeux de donnes.
"""
import os
def get_data_folder():
"""
Retourne le rpertoire de donnes inclus dans ce module.
"""
this = os.path.dirname(__file__)
data = os.path.join(this, "data")
return os.path.abspath(data)
| 19.1875
| 60
| 0.641694
|
089f58dcbc654f5aa5c0245f1cc3a918e10ba168
| 464
|
py
|
Python
|
ollivanders/offer/models.py
|
paradox-particle/diagon-alley
|
184a58da34046319c96b6d5535646497d80c4970
|
[
"MIT"
] | null | null | null |
ollivanders/offer/models.py
|
paradox-particle/diagon-alley
|
184a58da34046319c96b6d5535646497d80c4970
|
[
"MIT"
] | null | null | null |
ollivanders/offer/models.py
|
paradox-particle/diagon-alley
|
184a58da34046319c96b6d5535646497d80c4970
|
[
"MIT"
] | null | null | null |
from django.db import models
| 24.421053
| 46
| 0.661638
|
089fb8ac7ea2e682cd5404a50c0fbd9b93f873d7
| 6,470
|
py
|
Python
|
qa/advancedloggingplugin/advancedlogging.py
|
robertstarmer/aurora
|
ae98329c8dc466dea2c8040203624f0bfc1c7cfe
|
[
"Apache-2.0"
] | 23
|
2015-01-22T22:23:35.000Z
|
2021-10-21T23:08:29.000Z
|
qa/advancedloggingplugin/advancedlogging.py
|
sysbot/aurora
|
9e319d4eb8c760cf84cb80ed2959cd52976af11c
|
[
"Apache-2.0"
] | 2
|
2015-10-13T00:47:45.000Z
|
2019-05-06T14:52:33.000Z
|
qa/advancedloggingplugin/advancedlogging.py
|
sysbot/aurora
|
9e319d4eb8c760cf84cb80ed2959cd52976af11c
|
[
"Apache-2.0"
] | 27
|
2015-03-18T19:39:30.000Z
|
2022-03-11T00:58:09.000Z
|
import os
from os.path import join
import traceback
from bs4 import BeautifulSoup
from nose.plugins import Plugin
| 30.956938
| 75
| 0.52813
|
08a0d32c04f17aef2a43162c106a80b10c85518c
| 999
|
py
|
Python
|
setup.py
|
sunghyunzz/aiohttp-toolbox
|
1948a1962b3bd4071f234719b6683b55cd03d6f0
|
[
"MIT"
] | 6
|
2016-11-21T08:38:34.000Z
|
2019-02-20T12:56:16.000Z
|
setup.py
|
sunghyunzz/aiohttp-toolbox
|
1948a1962b3bd4071f234719b6683b55cd03d6f0
|
[
"MIT"
] | 1
|
2017-07-20T02:20:03.000Z
|
2017-07-20T02:20:03.000Z
|
setup.py
|
sunghyunzz/aiohttp-toolbox
|
1948a1962b3bd4071f234719b6683b55cd03d6f0
|
[
"MIT"
] | 2
|
2017-07-20T02:20:44.000Z
|
2019-02-21T13:37:37.000Z
|
"""
aiohttp-ultrajson
-----------------
Integrates UltraJSON with your aiohttp application.
"""
from setuptools import setup
setup(
name='aiohttp-ultrajson',
version='0.1.0',
url='https://github.com/sunghyunzz/aiohttp-ultrajson',
license='MIT',
author='sunghyunzz',
author_email='me@sunghyunzz.com',
description='Integrates UltraJSON with your aiohttp application.',
long_description=__doc__,
py_modules=['aiohttp_ultrajson'],
zip_safe=False,
platforms='any',
install_requires=[
'aiohttp>2',
'ujson>=1.34'
],
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Internet :: WWW/HTTP',
'Framework :: AsyncIO'
]
)
| 27
| 70
| 0.608609
|
08a23fb05230d43cddbfc2a0b66eda446175157f
| 322
|
py
|
Python
|
PTA/PAT_B/Python3/B1061_AC.py
|
StrayDragon/OJ-Solutions
|
b31b11c01507544aded2302923da080b39cf2ba8
|
[
"MIT"
] | 1
|
2019-05-13T10:09:55.000Z
|
2019-05-13T10:09:55.000Z
|
PTA/PAT_B/Python3/B1061_AC.py
|
StrayDragon/OJ-Solutions
|
b31b11c01507544aded2302923da080b39cf2ba8
|
[
"MIT"
] | null | null | null |
PTA/PAT_B/Python3/B1061_AC.py
|
StrayDragon/OJ-Solutions
|
b31b11c01507544aded2302923da080b39cf2ba8
|
[
"MIT"
] | null | null | null |
n, m = map(int, input().split())
scores = list(map(int, input().split()))
answers = list(map(int, input().split()))
for i in range(n):
actuals = list(map(int, input().split()))
result = 0
for i, score in enumerate(scores):
if actuals[i] == answers[i]:
result += score
print(result)
| 21.466667
| 45
| 0.568323
|
08a37f1f4c2faa26bde495db95f37f4816d7caf0
| 12,652
|
py
|
Python
|
dh/network/__init__.py
|
dhaase-de/dh-python-dh
|
40b04407e5f67ec261f559263718ec2b2588dabb
|
[
"MIT"
] | null | null | null |
dh/network/__init__.py
|
dhaase-de/dh-python-dh
|
40b04407e5f67ec261f559263718ec2b2588dabb
|
[
"MIT"
] | null | null | null |
dh/network/__init__.py
|
dhaase-de/dh-python-dh
|
40b04407e5f67ec261f559263718ec2b2588dabb
|
[
"MIT"
] | null | null | null |
"""
Tools for network communication.
"""
import abc
import io
import json
import socket
import struct
import sys
import time
import zlib
import dh.ejson
import dh.utils
# NumPy is only needed for some parts and is optional
try:
import numpy as np
except ImportError as e:
_NUMPY_ERROR = e
else:
_NUMPY_ERROR = None
###
#%% socket message types
###
###
#%% extended socket with support for multiple message types
###
###
#%% socket servers/clients
###
| 30.858537
| 163
| 0.641084
|
08a4afc96f7c56b3ec32526d5ad975c5272d1d27
| 1,225
|
py
|
Python
|
setup.py
|
danizen/apache-replay
|
5e5cc8d0df693f2367d188d71099041c6a65317f
|
[
"MIT"
] | null | null | null |
setup.py
|
danizen/apache-replay
|
5e5cc8d0df693f2367d188d71099041c6a65317f
|
[
"MIT"
] | null | null | null |
setup.py
|
danizen/apache-replay
|
5e5cc8d0df693f2367d188d71099041c6a65317f
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name = 'apache-replay',
version = '0.0.3',
url = 'https://github.com/danizen/apache-replay.git',
author = 'Daniel Davis',
author_email = 'dan@danizen.net',
description = 'Facilitates replaying of Apache files in Common Log and Combined Log format',
long_description = get_readme(),
long_description_content_type='text/markdown; charset=UTF-8; variant=CommonMark',
packages = ['apache_replay'],
entry_points={
'console_scripts': [
'apache-replay=apache_replay.script:main',
]
},
install_requires = ['attrs', 'requests'],
tests_require = ['attrs', 'requests', 'pytest', 'pytest-pythonpath', 'pytest-cov', 'tox'],
classifiers = [
'Development Status :: 3 - Alpha',
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Software Development :: Testing :: Traffic Generation',
]
)
| 33.108108
| 96
| 0.625306
|
08a6713846bc912e38363c64df0ddb98d1d40470
| 464
|
py
|
Python
|
setup.py
|
duytintruong/do_more
|
3a306da78ca302d2963cc7bae5f17e668168b595
|
[
"MIT"
] | null | null | null |
setup.py
|
duytintruong/do_more
|
3a306da78ca302d2963cc7bae5f17e668168b595
|
[
"MIT"
] | null | null | null |
setup.py
|
duytintruong/do_more
|
3a306da78ca302d2963cc7bae5f17e668168b595
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name='do_more',
packages=['do_more'],
version='0.1.0',
description='A library enhancing pydoit features.',
author='Duy Tin Truong',
author_email='',
url='https://github.com/duytintruong/do_more',
download_url='https://github.com/duytintruong/do_more/archive/0.1.0.tar.gz',
keywords=['pipeline', 'data', 'doit'],
classifiers=[],
install_requires=[
'doit>=0.31.1',
],
)
| 27.294118
| 80
| 0.637931
|
08a7afeb8a1abc10ec91968f8b8eddea6a7e071a
| 16,361
|
py
|
Python
|
qtable/engine.py
|
ihgazni2/qtable
|
269bb1052d7c7aeeae4d0b1024746fae38870c40
|
[
"MIT"
] | null | null | null |
qtable/engine.py
|
ihgazni2/qtable
|
269bb1052d7c7aeeae4d0b1024746fae38870c40
|
[
"MIT"
] | null | null | null |
qtable/engine.py
|
ihgazni2/qtable
|
269bb1052d7c7aeeae4d0b1024746fae38870c40
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import elist.elist as elel
import edict.edict as eded
import tlist.tlist as tltl
import copy
__all__ = [
'_append_col',
'_append_cols',
'_append_row',
'_append_rows',
'_cn2clocs',
'_col',
'_cols',
'_columns_map',
'_crop',
'_get_clocs',
'_get_rlocs',
'_getitem',
'_index_map',
'_insert_col',
'_insert_cols',
'_insert_row',
'_insert_rows',
'_ltd_index_first',
'_ltd_index_last',
'_name2ilocs',
'_prepend_col',
'_prepend_cols',
'_prepend_row',
'_prepend_rows',
'_reindex_cols',
'_reindex_rows',
'_rename_cols',
'_rename_rows',
'_repl_col',
'_repl_cols',
'_repl_row',
'_repl_rows',
'_rmcol',
'_rmcols',
'_rmrow',
'_rmrows',
'_rn2rlocs',
'_row',
'_rows',
'_setitem',
'_subtb',
'_swapcol',
'_swaprow',
'_transpose',
'_fliplr',
'_flipud'
]
#all operations will generate a new Qtable(copy.deepcopy), and will not change the original Qtable
#columns col-names-list no-duplicate-names-permitted
#index rowname-names-list no-duplicate-names-permitted
#df pd.DataFrame
# index_map = _index_map(df)
# columns_map = _columns_map(df)
# _getitem(df,rowname,colname,rloc=0,cloc=0)
# rloc relative-row-position
# cloc relative-col-position
#rn ---------------------rowname
#cn ---------------------colname
| 27.40536
| 110
| 0.603753
|
08a840735e2065bf6687b54ab836fe21b29363da
| 901
|
py
|
Python
|
src/npgru/predictor/tensorflow_predictor.py
|
grainpowder/gru-forward-numpy-app
|
efd24f9f397d51e7e18bdad5cba12451ad69d3de
|
[
"MIT"
] | null | null | null |
src/npgru/predictor/tensorflow_predictor.py
|
grainpowder/gru-forward-numpy-app
|
efd24f9f397d51e7e18bdad5cba12451ad69d3de
|
[
"MIT"
] | null | null | null |
src/npgru/predictor/tensorflow_predictor.py
|
grainpowder/gru-forward-numpy-app
|
efd24f9f397d51e7e18bdad5cba12451ad69d3de
|
[
"MIT"
] | null | null | null |
from typing import List, Tuple
import sentencepiece as spm
import tensorflow as tf
import tensorflow.keras as keras
from npgru.predictor.category_predictor import CategoryPredictor
from npgru.preprocessor.model_file import get_model_dir
| 39.173913
| 116
| 0.751387
|
08a9c217d15d92822c4d608156162c3fb67806ee
| 358
|
py
|
Python
|
eo_sensors/migrations/0005_auto_20210425_1946.py
|
dymaxionlabs/satlomas-back
|
f4568f6535755fd4a2432ecc661a264872206c6c
|
[
"Apache-2.0"
] | 1
|
2021-02-18T20:11:25.000Z
|
2021-02-18T20:11:25.000Z
|
eo_sensors/migrations/0005_auto_20210425_1946.py
|
dymaxionlabs/satlomas-back
|
f4568f6535755fd4a2432ecc661a264872206c6c
|
[
"Apache-2.0"
] | 7
|
2020-06-09T14:54:43.000Z
|
2021-09-22T21:00:13.000Z
|
eo_sensors/migrations/0005_auto_20210425_1946.py
|
dymaxionlabs/satlomas-back
|
f4568f6535755fd4a2432ecc661a264872206c6c
|
[
"Apache-2.0"
] | 1
|
2020-05-08T20:42:49.000Z
|
2020-05-08T20:42:49.000Z
|
# Generated by Django 3.1.6 on 2021-04-25 19:46
from django.db import migrations
| 19.888889
| 57
| 0.592179
|
08ab2b8e0ae8691b40dba63be074cd70a395c8c1
| 503
|
py
|
Python
|
algorithms/course_1/assignments/frac_knapsack/code.py
|
ideahitme/coursera
|
af44c8d817481d4f9025205284f109d95a9bb45d
|
[
"MIT"
] | null | null | null |
algorithms/course_1/assignments/frac_knapsack/code.py
|
ideahitme/coursera
|
af44c8d817481d4f9025205284f109d95a9bb45d
|
[
"MIT"
] | null | null | null |
algorithms/course_1/assignments/frac_knapsack/code.py
|
ideahitme/coursera
|
af44c8d817481d4f9025205284f109d95a9bb45d
|
[
"MIT"
] | null | null | null |
import math
line = raw_input().strip().split()
N = int(line[0])
cap = float(line[1])
items = []
for _ in xrange(N):
items.append(map(float, raw_input().split()))
items = sorted(items, cmp=custcmp)
answer = 0.0
index = 0
while cap > 0 and index < N:
cur = items[index]
to_add = min(cur[1], cap)
answer += to_add*(cur[0]/cur[1])
cap -= to_add
index+=1
print answer
| 14.371429
| 46
| 0.600398
|
08ab2c42e46cc085323887951f27802509bc2c01
| 1,131
|
py
|
Python
|
pythonDesafios/desafio058.py
|
mateusdev7/desafios-python
|
6160ddc84548c7af7f5775f9acabe58238f83008
|
[
"MIT"
] | null | null | null |
pythonDesafios/desafio058.py
|
mateusdev7/desafios-python
|
6160ddc84548c7af7f5775f9acabe58238f83008
|
[
"MIT"
] | null | null | null |
pythonDesafios/desafio058.py
|
mateusdev7/desafios-python
|
6160ddc84548c7af7f5775f9acabe58238f83008
|
[
"MIT"
] | null | null | null |
from random import randint
from time import sleep
opcao = 123
cont = 0
while opcao != 0:
print('-=-' * 20)
print('Vou pensar em um nmero entre 0 e 10, quer tentar adivinhar?')
print('-=-' * 20)
print('\n[ 1 ] Sim [ 0 ] No')
opcao = int(input('Escolha uma das opes acima\n>'))
if opcao == 1:
computador = randint(0, 10) # O computador sorteia um nmero de 0 a 10
usuario = int(input('\nEscolha um nmero entre 0 e 10: ').strip())
cont += 1
while usuario != computador:
if usuario < computador:
print('Mais... Tente novamente')
else:
print('Menos... Tente novamente')
usuario = int(input('Insira outro nmero: '))
cont += 1
if usuario == computador:
print('\nPARABNS. Voc ACERTOU!!!')
print('Calculando a quantide de tentivas necessrias...')
sleep(1)
print('-=-' * 15)
print(f'Voc precisou de {cont} tentativa(s) para acertar.')
print('-=-'* 15)
elif opcao == 0:
print('Voc saiu do jogo.')
| 35.34375
| 78
| 0.535809
|
08ab8c8ec2777c51be6f0455ab77ed9f159c8995
| 1,896
|
py
|
Python
|
FeatureEngineeringPy_DataScience/demo153_rarecategories.py
|
mahnooranjum/Programming_DataScience
|
f7a4215d4615b3f8460c3a1944a585628cf6930d
|
[
"MIT"
] | null | null | null |
FeatureEngineeringPy_DataScience/demo153_rarecategories.py
|
mahnooranjum/Programming_DataScience
|
f7a4215d4615b3f8460c3a1944a585628cf6930d
|
[
"MIT"
] | null | null | null |
FeatureEngineeringPy_DataScience/demo153_rarecategories.py
|
mahnooranjum/Programming_DataScience
|
f7a4215d4615b3f8460c3a1944a585628cf6930d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Demo153_RareCategories.ipynb
## Rare Categories
- Labels
- The number of labels in the dataset are different
- __high cardinality__ refers to uniqueness of data values
- The lower the cardinality, the more duplicated elements in a column
- A column with the lowest possible cardinality would have the same value for every row
- Highly cardinal variables dominate tree based algorithms
- Labels may only be present in the training data set, but not in the test data set
- Labels may appear in the test set that were not present in the training set
__Tree methods are biased towards variables with many labels__
"""
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from google.colab import drive
drive.mount('/content/gdrive')
data = pd.read_csv("gdrive/My Drive/Colab Notebooks/FeatureEngineering/train.csv")
cat_cols = ['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked']
for i in cat_cols:
print('Number of categories in the variable {}: {}'.format(i,len(data[i].unique())))
print('Total rows: {}'.format(len(data)))
data['Sex'].value_counts()
data['Cabin_processed'] = data['Cabin'].astype(str).str[0]
data['Cabin_processed_X'] = data['Cabin'].astype(str).str[1]
cat_cols = [ 'Sex', 'Embarked', 'Cabin_processed']
for i in cat_cols:
sns.catplot(x=i, kind='count', data=data)
data['Cabin_processed'].value_counts() / len(data)
for i in cat_cols:
sns.catplot(x=i,data=data, hue='Survived', kind='count', palette="ch:.25")
"""### Transform Rare Labels"""
_temp = pd.Series(data['Cabin_processed'].value_counts() / len(data))
_temp.sort_values(ascending=False)
_temp
_temp = pd.Series(data['Cabin_processed'].value_counts() / len(data))
_temp
for i in _labels:
data['Cabin_processed'].replace(i, 'rare', inplace=True)
_temp = pd.Series(data['Cabin_processed'].value_counts() / len(data))
_temp
| 26.704225
| 88
| 0.728903
|
08ac25798ceabd59a9de5ca3ae55d5d23549ad85
| 6,640
|
py
|
Python
|
adios-1.9.0/utils/skel/lib/skel_params.py
|
swatisgupta/Adaptive-compression
|
b97a1d3d3e0e968f59c7023c7367a7efa9f672d0
|
[
"BSD-2-Clause"
] | null | null | null |
adios-1.9.0/utils/skel/lib/skel_params.py
|
swatisgupta/Adaptive-compression
|
b97a1d3d3e0e968f59c7023c7367a7efa9f672d0
|
[
"BSD-2-Clause"
] | null | null | null |
adios-1.9.0/utils/skel/lib/skel_params.py
|
swatisgupta/Adaptive-compression
|
b97a1d3d3e0e968f59c7023c7367a7efa9f672d0
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
import sys
import os
import argparse
import adios
import skel_settings
import skel_bpls
# Command line parsing is chained together. This is stage two. The first stage happens in ../bin/skel
# TODO: Get rid of this in favor of chained version, above.
if __name__ == "__main__":
main()
| 37.514124
| 181
| 0.613253
|
08ac337e36cbf17a299188a90d4c593630ec7136
| 786
|
py
|
Python
|
centraldogma/util.py
|
line/centraldogma-python
|
2248e8d7d660c0535aa747a70742ddd2bb0a5268
|
[
"Apache-2.0"
] | 8
|
2021-12-02T00:51:35.000Z
|
2022-01-07T09:49:08.000Z
|
centraldogma/util.py
|
line/centraldogma-python
|
2248e8d7d660c0535aa747a70742ddd2bb0a5268
|
[
"Apache-2.0"
] | 8
|
2021-11-22T03:37:17.000Z
|
2022-02-14T10:02:31.000Z
|
centraldogma/util.py
|
line/centraldogma-python
|
2248e8d7d660c0535aa747a70742ddd2bb0a5268
|
[
"Apache-2.0"
] | 4
|
2021-11-22T03:48:39.000Z
|
2021-12-31T05:42:43.000Z
|
# Copyright 2021 LINE Corporation
#
# LINE Corporation licenses this file to you under the Apache License,
# version 2.0 (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
| 39.3
| 78
| 0.720102
|
08ad2e5befe9beab57f5cfbb4752e8b8f6f82193
| 3,834
|
py
|
Python
|
Build/site_scons/msvs_preprocessed.py
|
Syeberman/nohtyP
|
59d7214a5a5474a03c54f45d79ad4fd037989a79
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Build/site_scons/msvs_preprocessed.py
|
Syeberman/nohtyP
|
59d7214a5a5474a03c54f45d79ad4fd037989a79
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
Build/site_scons/msvs_preprocessed.py
|
Syeberman/nohtyP
|
59d7214a5a5474a03c54f45d79ad4fd037989a79
|
[
"CNRI-Python-GPL-Compatible"
] | null | null | null |
"""Provides a Preprocessed action for the Microsoft Visual Studio compilers.
"""
import os
import SCons.Action
import SCons.Util
import preprocessed_builder
# XXX These are internal to SCons and may change in the future...but it's unlikely
from SCons.Tool.msvc import CSuffixes, CXXSuffixes, msvc_batch_key
# TODO Contribute this back to SCons
# XXX Adapted from SCons' msvc_output_flag
def msvc_pp_output_flag(target, source, env, for_signature):
"""
Returns the correct /Fi flag for batching.
If batching is disabled or there's only one source file, then we
return an /Fi string that specifies the target explicitly. Otherwise,
we return an /Fi string that just specifies the first target's
directory (where the Visual C/C++ compiler will put the .i files).
"""
# TODO /Fi is not supported on Visual Studio 9.00 (2008) and earlier
# https://msdn.microsoft.com/en-us/library/8z9z0bx6(v=vs.90).aspx
# Fixing MSVC_BATCH mode. Previous if did not work when MSVC_BATCH
# was set to False. This new version should work better. Removed
# len(source)==1 as batch mode can compile only one file
# (and it also fixed problem with compiling only one changed file
# with batch mode enabled)
if not 'MSVC_BATCH' in env or env.subst('$MSVC_BATCH') in ('0', 'False', '', None):
return '/Fi$TARGET'
else:
# The Visual C/C++ compiler requires a \ at the end of the /Fi
# option to indicate an output directory. We use os.sep here so
# that the test(s) for this can be run on non-Windows systems
# without having a hard-coded backslash mess up command-line
# argument parsing.
return '/Fi${TARGET.dir}' + os.sep
CPreprocessedAction = SCons.Action.Action("$PPCCCOM", "$PPCCCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
CXXPreprocessedAction = SCons.Action.Action("$PPCXXCOM", "$PPCXXCOMSTR",
batch_key=msvc_batch_key,
targets='$CHANGED_TARGETS')
| 41.673913
| 139
| 0.684142
|
08af1415b9293340224b8360402e471dbf0548c7
| 5,716
|
py
|
Python
|
django_geoip/south_migrations/0001_initial.py
|
mandalay-rp/django-geoip
|
2608cb15cdd7678c2ff923aff2437b1a861b8e6b
|
[
"MIT"
] | 38
|
2015-01-10T06:44:12.000Z
|
2021-11-16T10:53:43.000Z
|
django_geoip/south_migrations/0001_initial.py
|
mandalay-rp/django-geoip
|
2608cb15cdd7678c2ff923aff2437b1a861b8e6b
|
[
"MIT"
] | 28
|
2015-01-11T08:44:06.000Z
|
2019-07-25T19:04:10.000Z
|
django_geoip/south_migrations/0001_initial.py
|
mandalay-rp/django-geoip
|
2608cb15cdd7678c2ff923aff2437b1a861b8e6b
|
[
"MIT"
] | 56
|
2015-01-11T08:30:57.000Z
|
2021-10-01T05:57:00.000Z
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| 53.420561
| 147
| 0.601645
|
08af904e7f82a923beed7c2fa65793eb9bf02793
| 878
|
py
|
Python
|
popbl_servicesapp/flask_app/order/application/api_client.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | null | null | null |
popbl_servicesapp/flask_app/order/application/api_client.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | 1
|
2021-06-02T00:57:11.000Z
|
2021-06-02T00:57:11.000Z
|
popbl_servicesapp/flask_app/order/application/api_client.py
|
xetxezarreta/master-popbl1
|
253880b9ba358f63f666893cdbbffe7391fcd096
|
[
"MIT"
] | null | null | null |
import requests
import json
from os import environ
from .models import Order, Piece
from .BLConsul import BLConsul
GATEWAY_PORT = environ.get("HAPROXY_PORT")
GATEWAY_ADDRESS = environ.get("HAPROXY_IP")
MACHINE_SERVICE = "machine"
PAYMENT_SERVICE = "payment"
DELIVERY_SERVICE = "delivery"
AUTH_SERVICE = "auth"
CA_CERT = environ.get("RABBITMQ_CA_CERT")
consul = BLConsul.get_instance()
| 29.266667
| 100
| 0.693622
|
08b0009d58869628b97762ea4dfa9d97bd3f4777
| 429
|
py
|
Python
|
envdsys/envdaq/migrations/0006_controllerdef_component_map.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 1
|
2021-11-06T19:22:53.000Z
|
2021-11-06T19:22:53.000Z
|
envdsys/envdaq/migrations/0006_controllerdef_component_map.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 25
|
2019-06-18T20:40:36.000Z
|
2021-07-23T20:56:48.000Z
|
envdsys/envdaq/migrations/0006_controllerdef_component_map.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | null | null | null |
# Generated by Django 3.1.7 on 2021-02-26 21:54
from django.db import migrations, models
| 22.578947
| 79
| 0.615385
|
08b08e4c091db6970d8bd9b3e8f858f92dfeb9ac
| 2,569
|
py
|
Python
|
polya/modules/congruence_closure_module.py
|
holtzermann17/polya
|
6d611bf47185249a96f4cf7ee9b3884bc70a15ac
|
[
"Apache-2.0"
] | 24
|
2015-01-01T18:21:40.000Z
|
2021-08-29T01:56:14.000Z
|
polya/modules/congruence_closure_module.py
|
holtzermann17/polya
|
6d611bf47185249a96f4cf7ee9b3884bc70a15ac
|
[
"Apache-2.0"
] | 1
|
2018-09-06T17:53:13.000Z
|
2018-09-07T13:57:39.000Z
|
polya/modules/congruence_closure_module.py
|
holtzermann17/polya
|
6d611bf47185249a96f4cf7ee9b3884bc70a15ac
|
[
"Apache-2.0"
] | 4
|
2017-02-08T15:04:09.000Z
|
2021-05-02T15:13:05.000Z
|
####################################################################################################
#
# congruence_closure_module.py
#
# Authors:
# Jeremy Avigad
# Rob Lewis
#
# This module maintains a union-find structure for terms in Blackboard, which is currently only used
# for congruence closure. It should perhaps be integrated differently into Blackboard.
#
# Contains a set for each equality class (up to constant multiples) of terms, and tracks which terms
# appear as arguments to which function terms.
#
####################################################################################################
import polya.main.terms as terms
import polya.main.messages as messages
import polya.util.timer as timer
import fractions
import itertools
| 36.7
| 100
| 0.54963
|
08b3ea49c776eba1ca9a6e036f7a93721ad3e46b
| 3,280
|
py
|
Python
|
build.py
|
Jackcava/mappingToFHIR
|
3189b55121a50ee1c4734227cde6da58ed6cb576
|
[
"MIT"
] | null | null | null |
build.py
|
Jackcava/mappingToFHIR
|
3189b55121a50ee1c4734227cde6da58ed6cb576
|
[
"MIT"
] | null | null | null |
build.py
|
Jackcava/mappingToFHIR
|
3189b55121a50ee1c4734227cde6da58ed6cb576
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import csv
| 34.893617
| 78
| 0.576524
|
08b53292c5c752e44fcf8b466dc5d84fa3ed0ec7
| 231
|
py
|
Python
|
server.py
|
LuisAlbizo/luisalbizo.github.io
|
823cac2c184686eb5056f9e1d3d0790f9a2233e1
|
[
"MIT"
] | null | null | null |
server.py
|
LuisAlbizo/luisalbizo.github.io
|
823cac2c184686eb5056f9e1d3d0790f9a2233e1
|
[
"MIT"
] | null | null | null |
server.py
|
LuisAlbizo/luisalbizo.github.io
|
823cac2c184686eb5056f9e1d3d0790f9a2233e1
|
[
"MIT"
] | null | null | null |
import http.server
import os
import socketserver
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("127.0.0.1", 8080), Handler)
print("server:\thttp://127.0.0.1:8080\n\nlog:")
httpd.serve_forever()
| 19.25
| 60
| 0.757576
|
08b53ae263a1ae583483ba9e1d84efca2906ad4a
| 2,109
|
py
|
Python
|
sources-filter-list.py
|
kerberizer/wikimedia-scripts
|
18b78d5cc0042d5efcb355a65f4309fb4ae97eaf
|
[
"CC0-1.0"
] | null | null | null |
sources-filter-list.py
|
kerberizer/wikimedia-scripts
|
18b78d5cc0042d5efcb355a65f4309fb4ae97eaf
|
[
"CC0-1.0"
] | null | null | null |
sources-filter-list.py
|
kerberizer/wikimedia-scripts
|
18b78d5cc0042d5efcb355a65f4309fb4ae97eaf
|
[
"CC0-1.0"
] | 1
|
2016-07-31T07:26:33.000Z
|
2016-07-31T07:26:33.000Z
|
#!/usr/bin/env python3
import locale
import sys
from datetime import datetime as dt
import pywikibot as pwb
if __name__ == '__main__':
main(sys.argv)
# vim: set ts=4 sts=4 sw=4 tw=100 et:
| 31.477612
| 98
| 0.543385
|
08b87f9a1caf36c7bc295170c1ae9c29a566dd2b
| 1,494
|
py
|
Python
|
Python scripts/Mail/search-and-modify-inbox-mail.py
|
shartrooper/My-python-scripts
|
5c3a8db4ed9a75bd9ab4b29153a788d9e6c5d28c
|
[
"MIT"
] | null | null | null |
Python scripts/Mail/search-and-modify-inbox-mail.py
|
shartrooper/My-python-scripts
|
5c3a8db4ed9a75bd9ab4b29153a788d9e6c5d28c
|
[
"MIT"
] | null | null | null |
Python scripts/Mail/search-and-modify-inbox-mail.py
|
shartrooper/My-python-scripts
|
5c3a8db4ed9a75bd9ab4b29153a788d9e6c5d28c
|
[
"MIT"
] | null | null | null |
Python 3.8.3 (tags/v3.8.3:6f8c832, May 13 2020, 22:20:19) [MSC v.1925 32 bit (Intel)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> # pip install imapclient // pip install pyzmail
>>> import imapclient
>>> conn= imapclient.IMAPClient('imap.gmail.com', ssl=True) #True to use SSL encryption
>>> conn.login('example2@mail.com','whatever')
>>> conn.select_folter('INBOX',readonly= True)
>>> UIDs = conn.search(['SINCE 20-Aug-2015']) #return a list of unique IDs for mails
>>> rawMessage=conn.fetch(['mail int UID number to fetch'],['BODY[]','FLAGS'])
>>> import pyzmail
>>> pyzmail.PyzMessage.factory(rawMessage['same UID Number passed to rawMessage'][b'BODY'])
>>> message=pyzmail.PyzMessage.factory(rawMessage['same UID Number passed to rawMessage'][b'BODY'])
T
>>> message.get_subject() #mail's subject
>>> message.get_addresses('from')
>>> message.get_addresses('to')
>>> message.get_addresses('bcc')
>>> message.text_part # return len and type
>>> message.text_part #None if doesn't have html
>>> message.html_part == None # True
>>> message.text_part.get_payload().decode('UTF-8')
>>> message.text_part.charset
>>> conn.list_folders()
>>> conn.select_folder('INBOX',readonly=False) #to modify the inbox
>>> UIDS= conn.search(['ON 24-Aug-2015'])
>>> conn.delete_messages(['UIDs to delete'])
>>> ''' Full documentation ar: https://imapclient.readthedocs.org http://www.magiksys.net/pyzmail '''
| 33.954545
| 101
| 0.680054
|
08bc79ee80be4534b8a825bbd1af9247d2662a71
| 2,633
|
py
|
Python
|
diagnosis/system_diagnosis/inference.py
|
opengauss-mirror/openGauss-AI
|
449ce3cac81ced74dd56edf76709553411b0814a
|
[
"MulanPSL-1.0"
] | 1
|
2021-12-22T08:31:07.000Z
|
2021-12-22T08:31:07.000Z
|
diagnosis/system_diagnosis/inference.py
|
opengauss-mirror/openGauss-AI
|
449ce3cac81ced74dd56edf76709553411b0814a
|
[
"MulanPSL-1.0"
] | null | null | null |
diagnosis/system_diagnosis/inference.py
|
opengauss-mirror/openGauss-AI
|
449ce3cac81ced74dd56edf76709553411b0814a
|
[
"MulanPSL-1.0"
] | 3
|
2021-12-16T13:55:57.000Z
|
2022-02-24T09:53:49.000Z
|
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
*/
import pickle, os, json
import numpy as np
import train
from argparse import ArgumentParser
from pprint import pprint
anomaly_type_num = 10
n_neighbors = 5
# data_explore()
# 0"cpu_saturation",
# 1"io_saturation",
# 2"database_backup",
# 3"table_restore",
# 4"poorly_physical_design",
# 5"poorly_written_query",
# 6"workload_spike",
# 7"flush_log",
# 8"vacuum_analyze",
# 9"lock_contention",
X_train_path = "./model/X_train.npy"
y_train_path = "./model/y_train.npy"
alpha_vec_path = "./model/anomaly_vec.npy"
if __name__ == "__main__":
parser = ArgumentParser(description="")
parser.add_argument("--vec_path")
args = parser.parse_args()
X_train, y_train, alpha_vec = np.array([]), np.array([]), np.array([])
if os.path.isfile(X_train_path)==False or os.path.isfile(y_train_path)==False:
train.generate_X_y()
if os.path.isfile(alpha_vec_path)==False:
train.generate_anomaly_alpha()
X_train = np.load(X_train_path)
y_train = np.load(y_train_path)
alpha_vec = np.load(alpha_vec_path)
new_vec = np.load(args.vec_path)
root_cause_id = kNN(alpha_vec, X_train, y_train, new_vec)
build_description(root_cause_id)
| 28.010638
| 87
| 0.689708
|
08bd8918199d2e5006f69cc8ccd6b3fde0ba16d8
| 1,850
|
py
|
Python
|
python/test_golden_master.py
|
AEGISoft/GildedRose-Refactoring-Kata
|
a81452de5b6831fa6c4f42b15f827ecf6ef29807
|
[
"MIT"
] | null | null | null |
python/test_golden_master.py
|
AEGISoft/GildedRose-Refactoring-Kata
|
a81452de5b6831fa6c4f42b15f827ecf6ef29807
|
[
"MIT"
] | null | null | null |
python/test_golden_master.py
|
AEGISoft/GildedRose-Refactoring-Kata
|
a81452de5b6831fa6c4f42b15f827ecf6ef29807
|
[
"MIT"
] | null | null | null |
import unittest
from gilded_rose import Item, GildedRose
if __name__ == '__main__':
unittest.main()
| 31.355932
| 87
| 0.617838
|
08bee3076eef6096e40c84d40b43d0ef450a6e30
| 895
|
py
|
Python
|
utils/randomdata.py
|
M1d0r1/py_mantis
|
8d2b05601b9240e76e2e07b50770e39df5bcade9
|
[
"Apache-2.0"
] | null | null | null |
utils/randomdata.py
|
M1d0r1/py_mantis
|
8d2b05601b9240e76e2e07b50770e39df5bcade9
|
[
"Apache-2.0"
] | null | null | null |
utils/randomdata.py
|
M1d0r1/py_mantis
|
8d2b05601b9240e76e2e07b50770e39df5bcade9
|
[
"Apache-2.0"
] | null | null | null |
import random
import string
| 22.948718
| 107
| 0.61676
|
08c039eccfb3500006401f61d37873f932777364
| 1,120
|
py
|
Python
|
douyin/hot/trend.py
|
miaotiaotech/DouYin
|
e996ad99ce27e0d13f2856c497fd4b4f05f95b56
|
[
"MIT"
] | 657
|
2018-10-24T16:58:04.000Z
|
2022-03-15T03:58:04.000Z
|
douyin/hot/trend.py
|
1997lw/DouYin
|
5859f4db5258ad10926fddaa2b4074c85581d419
|
[
"MIT"
] | 15
|
2018-10-30T09:40:11.000Z
|
2020-08-09T13:58:31.000Z
|
douyin/hot/trend.py
|
1997lw/DouYin
|
5859f4db5258ad10926fddaa2b4074c85581d419
|
[
"MIT"
] | 249
|
2018-10-25T07:12:14.000Z
|
2022-02-21T07:49:58.000Z
|
from douyin.utils import fetch
from douyin.config import hot_trend_url, common_headers
from douyin.utils.tranform import data_to_music, data_to_topic
from douyin.structures.hot import HotTrend
from douyin.utils.common import parse_datetime
# define trend query params
query = {
'version_code': '2.9.1',
'count': '10',
}
| 32
| 99
| 0.633036
|
08c1a85992031481a6829f933c45c2206c709fa4
| 288
|
py
|
Python
|
hashing/hashing.py
|
subhamsagar524/Learn-Blockchain
|
316f30ed9d43f6ab806ca87b9b83c0237ef69828
|
[
"MIT"
] | null | null | null |
hashing/hashing.py
|
subhamsagar524/Learn-Blockchain
|
316f30ed9d43f6ab806ca87b9b83c0237ef69828
|
[
"MIT"
] | null | null | null |
hashing/hashing.py
|
subhamsagar524/Learn-Blockchain
|
316f30ed9d43f6ab806ca87b9b83c0237ef69828
|
[
"MIT"
] | 1
|
2020-03-13T06:32:46.000Z
|
2020-03-13T06:32:46.000Z
|
# Import the hashing Library
import hashlib
# Get the string as input
word = input("Enter the word for Hashing: ")
# Get the hashing
hashed_code = hashlib.sha256(word.encode())
final = hashed_code.hexdigest()
# Print the result
print("Hashed with 256 bit: ")
print(final)
| 20.571429
| 45
| 0.704861
|
08c3c73fb071c563aa6c6cb9106af9a4e78d2bdf
| 1,416
|
py
|
Python
|
bookmarks/bookmarks/models.py
|
tom-henderson/bookmarks
|
5515bedf1008da3e97caf0ed5867bcf983b375b1
|
[
"MIT"
] | 6
|
2017-01-09T22:59:31.000Z
|
2022-01-06T01:40:57.000Z
|
bookmarks/bookmarks/models.py
|
tom-henderson/bookmarks
|
5515bedf1008da3e97caf0ed5867bcf983b375b1
|
[
"MIT"
] | 30
|
2016-09-13T07:30:26.000Z
|
2022-02-07T22:49:03.000Z
|
bookmarks/bookmarks/models.py
|
tom-henderson/bookmarks
|
5515bedf1008da3e97caf0ed5867bcf983b375b1
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
from django.dispatch import receiver
from django.conf import settings
from taggit.managers import TaggableManager
import requests
| 28.897959
| 74
| 0.631356
|
08c3ea3ed3c0d6241f479fa852ed05c431f46706
| 797
|
py
|
Python
|
vernam cipher.py
|
BenMiller3/Vernam-Cipher
|
19f7a447bc8080c8e275b96a85d359f4e187a4d3
|
[
"MIT"
] | null | null | null |
vernam cipher.py
|
BenMiller3/Vernam-Cipher
|
19f7a447bc8080c8e275b96a85d359f4e187a4d3
|
[
"MIT"
] | null | null | null |
vernam cipher.py
|
BenMiller3/Vernam-Cipher
|
19f7a447bc8080c8e275b96a85d359f4e187a4d3
|
[
"MIT"
] | null | null | null |
"""
Vernam Cipher
Benjamin D. Miller
Takes a key, and a message
Encripts the message using the key
"""
""" * TEST CASES * """
vernam(9,"hello world")
vernam(14,"TEST_CASE 34!")
vernam("test","test")
| 27.482759
| 91
| 0.567127
|
08c47e02acc3cf4c516e8edc1336ab1be1430cd8
| 421
|
py
|
Python
|
utils.py
|
c0derabbit/talk
|
26673fde934ef51e76002ea6ddc65bdb42720865
|
[
"MIT"
] | null | null | null |
utils.py
|
c0derabbit/talk
|
26673fde934ef51e76002ea6ddc65bdb42720865
|
[
"MIT"
] | 1
|
2017-05-25T20:37:54.000Z
|
2017-05-26T07:33:00.000Z
|
utils.py
|
c0derabbit/talk
|
26673fde934ef51e76002ea6ddc65bdb42720865
|
[
"MIT"
] | null | null | null |
from datetime import datetime as d
| 30.071429
| 100
| 0.64133
|
08c693a49ad9f776684155a7c2f26843f0a00070
| 3,694
|
py
|
Python
|
fineract/objects/org.py
|
mobidevke/py-fineract
|
712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd
|
[
"Apache-2.0"
] | 7
|
2019-03-11T16:17:33.000Z
|
2020-10-22T21:57:51.000Z
|
fineract/objects/org.py
|
mobidevke/py-fineract
|
712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd
|
[
"Apache-2.0"
] | 3
|
2019-11-05T20:22:16.000Z
|
2019-12-11T17:09:04.000Z
|
fineract/objects/org.py
|
mobidevke/py-fineract
|
712b0c20686accd7d7e0a2356ccaf59c5fe4f7dd
|
[
"Apache-2.0"
] | 2
|
2020-11-19T16:00:36.000Z
|
2021-11-19T09:36:13.000Z
|
from fineract.objects.currency import Currency
from fineract.objects.fineract_object import FineractObject
from fineract.objects.types import ChargeTimeType, ChargeAppliesTo, ChargeCalculationType, ChargePaymentMode
| 36.574257
| 117
| 0.647266
|
08c6e61cafacb0416494f10178b2d50c3d4b7ef8
| 1,736
|
py
|
Python
|
Heap/PathWithMinEffort.py
|
karan2808/Python-Data-Structures-and-Algorithms
|
a4b39ddf7297541d90dc4efcaab883f928281abd
|
[
"MIT"
] | 2
|
2021-01-31T03:42:01.000Z
|
2021-01-31T03:43:08.000Z
|
Heap/PathWithMinEffort.py
|
karan2808/Python-Data-Structures-and-Algorithms
|
a4b39ddf7297541d90dc4efcaab883f928281abd
|
[
"MIT"
] | null | null | null |
Heap/PathWithMinEffort.py
|
karan2808/Python-Data-Structures-and-Algorithms
|
a4b39ddf7297541d90dc4efcaab883f928281abd
|
[
"MIT"
] | 1
|
2021-01-31T03:42:02.000Z
|
2021-01-31T03:42:02.000Z
|
from heapq import heapify, heappop, heappush
if __name__ == "__main__":
main()
| 36.166667
| 123
| 0.506336
|
08c9e9c176a984ea5d15821ab3616cd2313fc432
| 1,427
|
wsgi
|
Python
|
vagrant/catalog/StuffMart.wsgi
|
cpwhidden/StuffMart
|
a192b8cad8942d0bfddb3af861f1e48c460e28cf
|
[
"MIT"
] | null | null | null |
vagrant/catalog/StuffMart.wsgi
|
cpwhidden/StuffMart
|
a192b8cad8942d0bfddb3af861f1e48c460e28cf
|
[
"MIT"
] | null | null | null |
vagrant/catalog/StuffMart.wsgi
|
cpwhidden/StuffMart
|
a192b8cad8942d0bfddb3af861f1e48c460e28cf
|
[
"MIT"
] | null | null | null |
activate_this = '/var/www/html/venv/bin/activate_this.py'
execfile(activate_this, dict(__file__=activate_this))
import sys, os, logging
from flask_apscheduler import APScheduler
sys.path.insert(0, 'var/www/html/StuffMart/vagrant/catalog')
logging.basicConfig(stream=sys.stderr)
from server import flask as application
application.secret_key = 'qPHE[Cht}*kSCVango3i'
application.config['APP_DIR'] = os.path.abspath(os.path.dirname(__file__))
application.config['WHOOSH_BASE'] = 'server/whoosh'
application.config['PRODUCT_IMAGES_FOLDER'] = 'vagrant/catalog/server/static/product_images/'
application.config['JOBS'] = [
{
'id': 'buildNewlyAddedRSSFeed',
'func': 'server.views:buildNewlyAddedRSSFeed',
'trigger': 'interval',
'seconds': (60*60)
},
{
'id': 'buildNewlyAddedAtomFeed',
'func': 'server.views:buildNewlyAddedAtomFeed',
'trigger': 'interval',
'seconds': (60*60)
},
{
'id': 'buildNewlyAddedRSSFeedAtStartup',
'func': 'server.views:buildNewlyAddedRSSFeed'
},
{
'id': 'buildNewlyAddedAtomFeedAtStartup',
'func': 'server.views:buildNewlyAddedAtomFeed'
}
]
application.config['SCHEDULER_VIEWS_ENABLED'] = True
application.debug = True
scheduler = APScheduler()
scheduler.init_app(application)
scheduler.start()
| 34.804878
| 93
| 0.658725
|
08cc589cc9423942aa94cc3bb343109a1f7cba67
| 18,161
|
py
|
Python
|
tests/strategies/test_horizontal.py
|
rohith-bs/dgraphpandas
|
29e91e2e7bb1d5d991ab94709a2d7e27f7dd7316
|
[
"MIT"
] | 1
|
2022-02-28T17:34:11.000Z
|
2022-02-28T17:34:11.000Z
|
tests/strategies/test_horizontal.py
|
rohith-bs/dgraphpandas
|
29e91e2e7bb1d5d991ab94709a2d7e27f7dd7316
|
[
"MIT"
] | null | null | null |
tests/strategies/test_horizontal.py
|
rohith-bs/dgraphpandas
|
29e91e2e7bb1d5d991ab94709a2d7e27f7dd7316
|
[
"MIT"
] | 1
|
2021-04-10T19:57:05.000Z
|
2021-04-10T19:57:05.000Z
|
import unittest
from unittest.mock import patch, Mock
import pandas as pd
from pandas.testing import assert_frame_equal
from parameterized import parameterized
from dgraphpandas.strategies.horizontal import horizontal_transform
| 34.395833
| 125
| 0.472
|
08cedf482cda63c943ec43e8d04a65c278427e19
| 378
|
py
|
Python
|
clienteTCP.py
|
planetacomputer/pythonsecurity
|
5b808512afae5bc221715f37f91a0294f4800f19
|
[
"MIT"
] | null | null | null |
clienteTCP.py
|
planetacomputer/pythonsecurity
|
5b808512afae5bc221715f37f91a0294f4800f19
|
[
"MIT"
] | null | null | null |
clienteTCP.py
|
planetacomputer/pythonsecurity
|
5b808512afae5bc221715f37f91a0294f4800f19
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python # This is client.py file
import socket # Import socket module
s = socket.socket() # Create a socket object
#host = socket.gethostname() # Get local machine name
host = socket.gethostbyname("localhost")
print host
port = 53 # Reserve a port for your service.
s.connect((host, port))
print s.recv(1024)
s.close
| 29.076923
| 59
| 0.637566
|
08ceeff12c2a6ee62212a18498cd6880997296e3
| 1,759
|
py
|
Python
|
application/routes.py
|
N-A-Podgornov/CFT-MLC
|
ded9267c5b8053a15bdcc67be9f83097749cfb13
|
[
"Apache-2.0"
] | null | null | null |
application/routes.py
|
N-A-Podgornov/CFT-MLC
|
ded9267c5b8053a15bdcc67be9f83097749cfb13
|
[
"Apache-2.0"
] | null | null | null |
application/routes.py
|
N-A-Podgornov/CFT-MLC
|
ded9267c5b8053a15bdcc67be9f83097749cfb13
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
from flask import render_template, redirect, url_for, request
from werkzeug.utils import secure_filename
from config import Config
from application import app
from application.model import Model
| 30.859649
| 106
| 0.704377
|
08cfc63dc9bcf57b5303ab14c053f28fd612cafc
| 4,095
|
py
|
Python
|
tests/test_onnxml_imputer_converter.py
|
vumichien/hummingbird
|
8981e11ce2536167c329a5d9d20e81125a792fe4
|
[
"MIT"
] | 2,772
|
2020-05-04T21:03:40.000Z
|
2022-03-30T11:00:03.000Z
|
tests/test_onnxml_imputer_converter.py
|
vumichien/hummingbird
|
8981e11ce2536167c329a5d9d20e81125a792fe4
|
[
"MIT"
] | 486
|
2020-05-05T00:45:44.000Z
|
2022-03-15T01:02:31.000Z
|
tests/test_onnxml_imputer_converter.py
|
vumichien/hummingbird
|
8981e11ce2536167c329a5d9d20e81125a792fe4
|
[
"MIT"
] | 232
|
2019-11-02T22:06:38.000Z
|
2022-03-25T07:36:17.000Z
|
"""
Tests onnxml Imputer converter
"""
import unittest
import warnings
import numpy as np
import torch
from sklearn.impute import SimpleImputer
from hummingbird.ml._utils import onnx_ml_tools_installed, onnx_runtime_installed, lightgbm_installed
from hummingbird.ml import convert
if onnx_runtime_installed():
import onnxruntime as ort
if onnx_ml_tools_installed():
from onnxmltools import convert_sklearn
from onnxmltools.convert.common.data_types import FloatTensorType as FloatTensorType_onnx
if __name__ == "__main__":
unittest.main()
| 40.147059
| 125
| 0.70696
|
08d1b0407331ee4e1921fc4b74a0794639337160
| 7,520
|
py
|
Python
|
rs_etl.py
|
jlauman/data_engineering_project_03
|
722c0f5226ed29c00d6b33e64da5982fe0be69e0
|
[
"MIT"
] | null | null | null |
rs_etl.py
|
jlauman/data_engineering_project_03
|
722c0f5226ed29c00d6b33e64da5982fe0be69e0
|
[
"MIT"
] | null | null | null |
rs_etl.py
|
jlauman/data_engineering_project_03
|
722c0f5226ed29c00d6b33e64da5982fe0be69e0
|
[
"MIT"
] | null | null | null |
import configparser, os, glob, csv, json, hashlib, time
import pandas as pd
import psycopg2
from pprint import pprint
from rs_sql_queries import staging_events_insert, staging_songs_insert
from rs_sql_queries import insert_table_queries
import boto3
from botocore import UNSIGNED
from botocore.config import Config
DEND_BUCKET='udacity-dend'
# global lookup table
NAME_TO_GENDER = {}
def load_gender_lookup():
"""Load lookup dictionary to find gender given a name.
"""
base_path = os.getcwd() + '/data/names'
for root, dirs, files in os.walk(base_path):
file_paths = glob.glob(os.path.join(root,'*.txt'))
for file_path in file_paths:
print('names: %s' % file_path)
with open(file_path) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
# pprint(row)
NAME_TO_GENDER[row[0]] = row[1]
# pprint(NAME_TO_GENDER)
True
def get_object_paths(s3, bucket, prefix):
"""List objects in S3 bucket with given prefix.
Uses paginator to ensure a complete list of object paths is returned.
"""
# r1 = s3.list_objects(Bucket=DEND_BUCKET, Prefix=prefix)
# r2 = list(map(lambda obj: obj['Key'], r1['Contents']))
# r3 = list(filter(lambda str: str.endswith('.json'), r2))
# s3 client does not need to be closed
object_paths = []
paginator = s3.get_paginator('list_objects')
pages = paginator.paginate(Bucket=bucket, Prefix=prefix)
for page in pages:
# print("len(page['Contents'])=" + str(len(page['Contents'])))
r1 = list(map(lambda obj: obj['Key'], page['Contents']))
r2 = list(filter(lambda str: str.endswith('.json'), r1))
object_paths.extend(r2)
print('%s/%s total object paths = %d' % (bucket, prefix, len(object_paths)))
time.sleep(2)
return object_paths
def load_staging_log_data(cur, conn):
"""Load song-play event records into s_songplay_event table.
"""
# import pdb; pdb.set_trace()
# load log_data (events) into s_event table
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
file_paths = get_object_paths(s3, DEND_BUCKET, 'log_data')
pprint(file_paths)
for file_path in file_paths:
sql = str(staging_events_insert)
print('log_data: %s' % file_path)
obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)
str1 = obj1['Body'].read().decode('utf-8').strip()
df = pd.read_json(str1, lines=True)
df = df[df.page == 'NextSong']
df['timestamp'] = pd.to_datetime(df['ts'], unit='ms')
df['year'] = df['timestamp'].dt.year
df['week'] = df['timestamp'].dt.weekofyear
df['month'] = df['timestamp'].dt.month
df['day'] = df['timestamp'].dt.day
df['hour'] = df['timestamp'].dt.hour
df['weekday'] = df['timestamp'].dt.weekday
# pprint(df)
for index, row in df.iterrows():
# create a sha256 hash for event's unique id
event_id = hashlib.sha256((str(row.userId) + ' ' + str(row.sessionId) + ' ' + row.timestamp.strftime('%Y%m%d%H%M') + ' ' + row.song).encode('utf-8')).hexdigest()
str1 = ("(" +
"'" + event_id + "', " +
"'" + row.artist.replace("'", "''") + "', " +
"'" + row.auth + "', " +
"'" + row.firstName.replace("'", "''") + "', " +
"" + str(row.itemInSession) + ", " +
"'" + row.lastName.replace("'", "''") + "', " +
"'" + NAME_TO_GENDER[row.firstName] + "', " +
"" + str(row.length) + ", " +
"'" + row.level + "', " +
"'" + row.location.replace("'", "''") + "', " +
"'" + row.method + "', " +
"'" + row.page + "', " +
"'" + str(row.registration) + "', " +
"'" + str(row.sessionId) + "', " +
"'" + row.song.replace("'", "''") + "', " +
"'" + str(row.status) + "', " +
"'" + row.timestamp.strftime('%Y-%m-%d %H') + "', " +
"" + str(row.year) + ", " +
"" + str(row.week) + ", " +
"" + str(row.month) + ", " +
"" + str(row.day) + ", " +
"" + str(row.hour) + ", " +
"" + str(row.weekday) + ", " +
"'" + row.userAgent.replace("'", "''") + "', " +
"'" + str(row.userId) + "'" +
"),\n")
sql += str1
sql = ''.join(sql).strip()[:-1] + ';'
# print(sql)
# import pdb; pdb.set_trace()
cur.execute(sql)
conn.commit()
def load_staging_song_data(cur, conn):
"""Load song records into s_song staging table.
"""
sql = str(staging_songs_insert)
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
file_paths = get_object_paths(s3, DEND_BUCKET, 'song_data')
pprint(file_paths)
for file_path in file_paths:
print('song_data: %s' % file_path)
obj1 = s3.get_object(Bucket='udacity-dend', Key=file_path)
str1 = obj1['Body'].read().decode('utf-8').strip()
data = json.loads(str1)
if data['year'] == 0: data['year'] = None
# fix link string...
if str(data['artist_location']).startswith('<a'): data['artist_location'] = None
# pprint(data)
str2 = ("(" +
"'" + data['artist_id'] + "', " +
"" + (str(data['artist_latitude']) if not data['artist_latitude'] == None else 'null') + ", " +
"'" + str(data['artist_location']).replace("'", "''") + "', " +
"" + (str(data['artist_longitude']) if not data['artist_longitude'] == None else 'null') + ", " +
"'" + str(data['artist_name']).replace("'", "''") + "', " +
"" + str(data['duration']) + ", " +
"" + str(data['num_songs']) + ", " +
"'" + data['song_id'] + "', " +
"'" + str(data['title']).replace("'", "''") + "', " +
"" + (str(data['year']) if not data['year'] == None else 'null') + "" +
"),\n")
sql += str2
# print(str2)
# batch inserts at 8k character threshold
if len(sql) > 8192:
print(' 8k insert...')
sql = ''.join(sql).strip()[:-1] + ';'
cur.execute(sql)
conn.commit()
sql = str(staging_songs_insert)
print('last insert...')
sql = ''.join(sql).strip()[:-1] + ';'
# print(sql)
# import pdb; pdb.set_trace()
cur.execute(sql)
conn.commit()
def insert_tables(cur, conn):
"""Populate staging, dimension and fact tables.
The fact table must be the last item in the query list.
"""
for query in insert_table_queries:
if query.strip() != "":
pprint(query)
cur.execute(query)
conn.commit()
def main():
"""Run Redshift ETL for staging, dimension and fact tables.
"""
config = configparser.ConfigParser()
config.read('rs_dwh.cfg')
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(*config['CLUSTER'].values()))
cur = conn.cursor()
load_gender_lookup()
load_staging_tables(cur, conn)
insert_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
| 38.367347
| 173
| 0.529255
|
08d269d1ebc51a6ac75c04bc8fcc26f6ea8bd98e
| 1,002
|
py
|
Python
|
cron/weather.py
|
joedanz/flask-weather
|
fe35aa359da6f5d7f942d97837403e153b5c5ede
|
[
"Apache-2.0"
] | 1
|
2017-08-25T18:55:11.000Z
|
2017-08-25T18:55:11.000Z
|
cron/weather.py
|
joedanz/flask-weather
|
fe35aa359da6f5d7f942d97837403e153b5c5ede
|
[
"Apache-2.0"
] | null | null | null |
cron/weather.py
|
joedanz/flask-weather
|
fe35aa359da6f5d7f942d97837403e153b5c5ede
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import json, urllib2, datetime
from sqlite3 import dbapi2 as sqlite3
# zip codes to log
zipcodes = ['07740','11210','33139','90210']
# configuration
DATABASE = '../db/weather.db'
SECRET_KEY = 'hackerati'
DEBUG = True
# open database
db = sqlite3.connect(DATABASE)
for zipcode in zipcodes:
# pull weather from API
weather_api = urllib2.urlopen('http://api.openweathermap.org/data/2.5/weather?zip='+zipcode+',us')
weather_data = weather_api.read()
weather_api.close()
weather = json.loads(weather_data)
# convert from kelvin to fahrenheit
temp_val = (((weather['main']['temp']-273.15)*9)/5)+32
humidity_val = weather['main']['humidity']
print zipcode,
print temp_val,
print humidity_val
# insert db entry
db.execute('insert into weather (zipcode, temp, humidity, stamp) values (?, ?, ?, ?)',
[zipcode, int(temp_val), int(humidity_val), datetime.datetime.utcnow()])
db.commit()
# close database
db.close()
| 26.368421
| 102
| 0.673653
|
08d4af28a19751f94bb0827b79075eb4b7ae0ea7
| 234
|
py
|
Python
|
tests/test_print_as_discovered.py
|
acardos/git_inspector
|
ee194a62606ddb882ce0736618bae053e6b8521d
|
[
"MIT"
] | 4
|
2021-12-06T15:35:19.000Z
|
2022-01-23T23:17:38.000Z
|
tests/test_print_as_discovered.py
|
acardos/git_inspector
|
ee194a62606ddb882ce0736618bae053e6b8521d
|
[
"MIT"
] | 12
|
2021-03-31T09:14:40.000Z
|
2022-01-31T10:01:25.000Z
|
tests/test_print_as_discovered.py
|
acardos/git_inspector
|
ee194a62606ddb882ce0736618bae053e6b8521d
|
[
"MIT"
] | 1
|
2022-01-22T11:37:08.000Z
|
2022-01-22T11:37:08.000Z
|
from git import Repo
from git_inspector import find_git_directories
| 19.5
| 69
| 0.794872
|
08d50632dbe42cde10ed75ee126dd035ddf3804a
| 3,480
|
py
|
Python
|
src/frontend/function_transforms/pass_div_zero.py
|
mfeliu/gelpia
|
30c6c1030165b26bf5f84613316f6fc2ce3ebe8b
|
[
"MIT"
] | null | null | null |
src/frontend/function_transforms/pass_div_zero.py
|
mfeliu/gelpia
|
30c6c1030165b26bf5f84613316f6fc2ce3ebe8b
|
[
"MIT"
] | null | null | null |
src/frontend/function_transforms/pass_div_zero.py
|
mfeliu/gelpia
|
30c6c1030165b26bf5f84613316f6fc2ce3ebe8b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
try:
from gelpia import bin_dir
except:
print("gelpia not found, gaol_repl must be in your PATH\n")
bin_dir = ""
from pass_utils import *
from output_flatten import flatten
import re
import sys
import subprocess
import os.path as path
if __name__ == "__main__":
try:
runmain()
except KeyboardInterrupt:
print("\nGoodbye")
| 23.355705
| 67
| 0.611207
|
08d52a54cf446718a15b7b80b28b2ccd05586869
| 2,150
|
py
|
Python
|
setup.py
|
bearroast/django-estimators
|
5dd72694dab6725335214543a59104c4de504037
|
[
"MIT"
] | 46
|
2016-09-13T06:33:30.000Z
|
2022-01-08T00:55:37.000Z
|
setup.py
|
bearroast/django-estimators
|
5dd72694dab6725335214543a59104c4de504037
|
[
"MIT"
] | 14
|
2016-09-10T04:56:30.000Z
|
2017-11-28T04:12:43.000Z
|
setup.py
|
bearroast/django-estimators
|
5dd72694dab6725335214543a59104c4de504037
|
[
"MIT"
] | 19
|
2016-09-20T23:53:26.000Z
|
2022-01-08T00:55:39.000Z
|
import os
from pip.req import parse_requirements
from setuptools import find_packages, setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements(
os.path.join(os.path.dirname(__file__), 'requirements.txt'), session=False)
reqs = [str(ir.req) for ir in install_reqs]
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-estimators',
version='0.2.1',
packages=find_packages(),
include_package_data=True,
install_requires=reqs,
license='MIT License', # example license
description='A django model to persist and track machine learning models',
long_description=README,
url='https://github.com/fridiculous/django-estimators',
author='Simon Frid',
author_email='simon.frid@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.9',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
# Replace these appropriately if you are stuck on Python 2.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Version Control',
],
keywords='''scikit-learn, sklearn, machine learning, artificial intelligence, ml,
ai, estimators, version, versioning, benchmark, persist, storage, track, models,
repository, evaluation, workflow'''
)
| 40.566038
| 88
| 0.670698
|
08d5314ae1e6b39701c18dfc2466ee45cde74ef6
| 7,517
|
py
|
Python
|
ip_group.py
|
vectranetworks/csv-to-ip-group
|
f8f53f979c62c3db161fcb7fdc3b7ebb26842055
|
[
"MIT"
] | null | null | null |
ip_group.py
|
vectranetworks/csv-to-ip-group
|
f8f53f979c62c3db161fcb7fdc3b7ebb26842055
|
[
"MIT"
] | null | null | null |
ip_group.py
|
vectranetworks/csv-to-ip-group
|
f8f53f979c62c3db161fcb7fdc3b7ebb26842055
|
[
"MIT"
] | null | null | null |
import csv
import ipaddress
import logging.handlers
import sys
import argparse
try:
import vat.vectra as vectra
import requests
except Exception as error:
print('\nMissing import requirements: {}\n'.format(str(error)))
sys.exit(0)
LOG = logging.getLogger(__name__)
INVALID_CHARS = ['~', '#', '$', '^', '+', '=', '<', '>', '?', ';']
SUB_CHAR = '_'
# Suppress Detect certificate warning
requests.packages.urllib3.disable_warnings()
def ip_subnet(subnet_string):
"""
Called with string that represents an IP subnet with CIDR or netmask in dotted decimal format
Validates string represents valid subnet and removes host bits
Returns string representation of subnet in CIDR format
:param subnet_string: string representing subnet in CIDR w.x.y.z/n or netmask w.x.y.z/aa.bb.cc.dd format
:return: returns string representation of subnet in CIDR format
"""
try:
ipaddress.IPv4Network(subnet_string)
except (ipaddress.AddressValueError, ipaddress.NetmaskValueError) as error:
LOG.info('Subnet {} format error, {}'.format(subnet_string, error))
return
except ValueError as error:
LOG.info('{}, removing host bits'.format(error))
subnet = ipaddress.IPv4Network(subnet_string, strict=False)
return str(subnet)
def sub_bad_chars(string, sub=SUB_CHAR):
"""
Substitute unsupported characters in string representing group
:param string: original string
:param sub: substitution character, default defined in SUB_CHAR
:return: returns the original string with any illegal characters substituted
"""
for bad_char in INVALID_CHARS:
string = string.replace(bad_char, sub)
return string
def group_exists(group_name, brain):
"""
Determines if group exists
Called with initialized vectra client and name of group
:param group_name: group name
:param brain: initialized Vectra Client object
:return: True if group exists, False otherwise
"""
group_iterator = brain.get_all_groups(name=group_name)
for item in group_iterator:
if item.json()['count'] > 0:
for group in item.json()['results']:
if group['name'] == group_name:
return {'name': group['name'], 'id': group['id']}
return False
def create_group(name, subnet, brain, descr=''):
"""
Creates group and adds supplied subnet, and description if supplied
:param name: group name
:param subnet: CIDR subnet string
:param brain: initialized Vectra Client object
:param descr: group description, optional
"""
if bool(descr):
brain.create_group(name=name, description=descr, type='ip', members=list(subnet))
else:
brain.create_group(name=name, type='ip', members=list(subnet))
def update_group(grp_id, subnet, brain, descr=''):
"""
Updates existing group with supplied subnet, and description if supplied
:param grp_id: group ID
:param subnet: CIDR subnet string
:param brain: initialized Vectra Client object
:param descr: group description, optional
"""
if bool(descr):
brain.update_group(group_id=grp_id, description=descr, members=subnet, append=True)
else:
brain.update_group(group_id=grp_id, members=subnet, append=True)
def main():
"""
Supplied with valid CSV file containing 3 or 4 columns of data, iterates over rows and creates or updates groups
Supports CSV files with following format examples with or without header row
group 1,192.168.1.0/255.255.255.0,group1 description
group 2,10.1.1.0/24,group2 description
"""
args = obtain_args()
sub_char = args.sub_char if args.sub_char else SUB_CHAR
log_level = logging.DEBUG if args.verbose else logging.INFO
logging.basicConfig(level=log_level, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
if len(sys.argv) == 1:
print('Run python3 ip_group.py -h for help.')
sys.exit()
file = args.file
with open(file, newline='') as csvfile:
vc = vectra.VectraClientV2_1(url='https://' + args.brain, token=args.token, verify=False)
reader = csv.reader(csvfile)
for row in reader:
if len(row) < 3 or len(row) > 4:
LOG.info('Invalid number of columns in row, skipping')
continue
if len(row) == 4:
LOG.debug('Number of rows 4: {}'.format(len(row)))
subnet = ip_subnet('{}/{}'.format(row[1], row[2]))
description = sub_bad_chars(row[3], sub_char)
elif len(row) == 3:
LOG.debug('Number of rows 3: {}'.format(len(row)))
subnet = ip_subnet(row[1])
description = sub_bad_chars(row[2], sub_char)
group_name = sub_bad_chars(row[0], sub_char)
if subnet is not None:
"""group_obj False or {'name': 'somename', 'id':'123'}"""
group_obj = group_exists(group_name, vc)
if not group_obj:
# Group does not exist, creating
LOG.info('Group does not exist, creating. group:{}, subnet:{}, description:{}'.format(
group_name, subnet, description))
create_group(group_name, [str(subnet)], vc, description)
else:
LOG.info('Group exists, updating. group:{}, subnet:{}, description:{}'.format(
group_name, subnet, description))
update_group(group_obj['id'], [str(subnet)], vc, description)
else:
LOG.info('Invalid subnet, skipping')
if __name__ == '__main__':
main()
| 39.151042
| 118
| 0.596648
|
08d5fc45e5a26919b46ae56fd9e3cb2d53ede3e7
| 512
|
py
|
Python
|
BasicSyntax/DataType.py
|
Fjaxzhy/top.kagurayayoi.learn.Python
|
af2ad3b7da85fb0af1668d3751c0342b16d0966f
|
[
"MIT"
] | null | null | null |
BasicSyntax/DataType.py
|
Fjaxzhy/top.kagurayayoi.learn.Python
|
af2ad3b7da85fb0af1668d3751c0342b16d0966f
|
[
"MIT"
] | 11
|
2021-03-29T08:50:16.000Z
|
2021-03-31T08:46:55.000Z
|
BasicSyntax/DataType.py
|
Fjaxzhy/top.kagurayayoi.learn.Python
|
af2ad3b7da85fb0af1668d3751c0342b16d0966f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Python
#
#
# Number / String / Tuple
# List / Dictionary / Set
# Number
# Int
IntNum = 100
# Float
FloatNum = 100.10
# Boolean // True:1 False:0
BoolNum = True
# Complex
ComplexNum = 1.00j
# String
Str = ""
# List
List = ['a', 'b', 1, 2]
# Tuple
Tup = ('a', 'b', 1, 2)
# Set
Set = {'a', 'b', 1, 2}
# Dictionary
Dict = {'key1': 'value1', 'key2': 'value2'}
| 14.628571
| 43
| 0.59375
|
08d5febbe68f3f78281ba4430f7b17df3067f244
| 3,526
|
py
|
Python
|
tcex/api/tc/v3/_gen/_gen_args_abc.py
|
GShepherdTC/tcex
|
70b1199b8bb9e63f53e2ba792489267108c909cd
|
[
"Apache-2.0"
] | null | null | null |
tcex/api/tc/v3/_gen/_gen_args_abc.py
|
GShepherdTC/tcex
|
70b1199b8bb9e63f53e2ba792489267108c909cd
|
[
"Apache-2.0"
] | null | null | null |
tcex/api/tc/v3/_gen/_gen_args_abc.py
|
GShepherdTC/tcex
|
70b1199b8bb9e63f53e2ba792489267108c909cd
|
[
"Apache-2.0"
] | null | null | null |
"""Generate Docs for ThreatConnect API"""
# standard library
import importlib
import sys
from abc import ABC
from typing import Any, Optional
# first-party
from tcex.api.tc.v3._gen._gen_abc import GenerateABC
| 33.264151
| 89
| 0.574022
|
08d6edb44ef1415e69d5e8564970749ce00f431c
| 382
|
py
|
Python
|
rename_smpls.py
|
Chartiza/bulls
|
e4e7895a37a0335572dea50f2cbaae2737b3cd5f
|
[
"MIT"
] | null | null | null |
rename_smpls.py
|
Chartiza/bulls
|
e4e7895a37a0335572dea50f2cbaae2737b3cd5f
|
[
"MIT"
] | null | null | null |
rename_smpls.py
|
Chartiza/bulls
|
e4e7895a37a0335572dea50f2cbaae2737b3cd5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
sootv = {}
#Read file sootvetstviya
for l in open ("filesootv"):
data = l.strip().split("\t")
if data[0] not in sootv:
sootv[data[0]] = data[1]
#Read FinalReport file
for l in open('Ire30_GP'):
data = l.strip().split("\t")
if data[1] in sootv:
print(data[0]+"\t"+sootv[data[1]]+"\t"+data[2]+"\t"+data[3]+"\t"+"\t"+data[4]+"\t"+data[5])
| 23.875
| 94
| 0.570681
|
08d8b9d0bef0b39a979fb0521f02328c098ccbd7
| 448
|
py
|
Python
|
3day/Quiz01_1.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
3day/Quiz01_1.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
3day/Quiz01_1.py
|
jsjang93/joony
|
62f7a325094c887212b894932263bf84500e0f03
|
[
"MIT"
] | null | null | null |
# Quiz01_1.py
items = {"":1000,"":900,"":500,"":700,"":800}
print("=== ====")
print("[][][][][] ")
print(" --> ) , ")
# item, price
item = input() # ,
items2 = item.strip().split(',')
price = pItems(*items2,**items)
print(" : {0} ".format(price) )
| 22.4
| 58
| 0.564732
|
08d8e05ba83fd1eb90111af5408ae91ffdf11318
| 2,619
|
py
|
Python
|
src/custom_arch/custom_alexnet.py
|
joeyseash/PruneTrain
|
5adb367eb90b7e1e38251f8e3a8e7eb65b167aa0
|
[
"Apache-2.0"
] | 1
|
2021-10-03T00:57:32.000Z
|
2021-10-03T00:57:32.000Z
|
src/custom_arch/custom_alexnet.py
|
VictorSuciu/prunetrain
|
ef84a88ef8a34f8e79de783ffdb9d3b82545dc3b
|
[
"Apache-2.0"
] | null | null | null |
src/custom_arch/custom_alexnet.py
|
VictorSuciu/prunetrain
|
ef84a88ef8a34f8e79de783ffdb9d3b82545dc3b
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019 Sangkug Lym
Copyright 2019 The University of Texas at Austin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from .arch_utils import layerUtil
arch = {}
arch[0] = {'name':'conv1', 'kernel_size':11, 'stride':4, 'padding':5, 'bias':True}
arch[1] = {'name':'conv2', 'kernel_size':5, 'stride':1, 'padding':2, 'bias':True}
arch[2] = {'name':'conv3', 'kernel_size':3, 'stride':1, 'padding':1, 'bias':True}
arch[3] = {'name':'conv4', 'kernel_size':3, 'stride':1, 'padding':1, 'bias':True}
arch[4] = {'name':'conv5', 'kernel_size':3, 'stride':1, 'padding':1, 'bias':True}
arch[5] = {'name':'pool', 'kernel_size':2, 'stride':2}
arch[6] = {'name':'relu'}
arch[7] = {'name':'fc', 'out_chs':'num_classes'}
| 34.012987
| 104
| 0.658267
|
08da6b4771c11626f2b1e4199314a129e0c7bb3d
| 3,089
|
py
|
Python
|
app/ml/train_data.py
|
curioswati/scrapy-tsa
|
50556880125412e0b8d925fb46c41c44dd31fb37
|
[
"MIT"
] | 2
|
2020-01-15T05:17:23.000Z
|
2020-08-13T01:50:00.000Z
|
app/ml/train_data.py
|
curioswati/scrapy-tsa
|
50556880125412e0b8d925fb46c41c44dd31fb37
|
[
"MIT"
] | null | null | null |
app/ml/train_data.py
|
curioswati/scrapy-tsa
|
50556880125412e0b8d925fb46c41c44dd31fb37
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import numpy
import csv
import re, nltk
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem.porter import PorterStemmer
from sklearn.linear_model import LogisticRegression
# from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
if __name__ == "__main__":
train_data = {
"text": [],
"sentiment": []
}
raw_count = 0
with open('Sentiment Analysis Dataset.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile)
headers = next(csvreader, None)
for line in csvreader:
train_data["text"].append(line[3].strip())
train_data["sentiment"].append(int(line[1]))
# raw_count += 1
# if raw_count >= 1000:
# break
raw_count = 0
with open('training.1600000.processed.noemoticon.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile)
for line in csvreader:
try:
train_data["text"].append(line[5].strip())
except Exception as e:
print e
print "line", line
print line[5]
exit(0)
if int(line[0]) == 4:
train_data["sentiment"].append(1)
else:
train_data["sentiment"].append(0)
# raw_count += 1
# if raw_count >= 1000:
# break
print train_data["text"][:3]
print train_data["sentiment"][:3]
print numpy.unique(numpy.array(train_data["sentiment"]))
print "data extracted"
# exit(0)
stemmer = PorterStemmer()
vectorizer = CountVectorizer(
analyzer = 'word',
tokenizer = tokenize,
lowercase = True,
stop_words = 'english',
max_features = 100,
encoding='utf-8'
)
print "creating corpus_data_features"
X_train_counts = vectorizer.fit_transform(train_data["text"])
# tf_transformer = TfidfTransformer(use_idf=False).fit(X_train_counts)
# X_train_tf = tf_transformer.transform(X_train_counts)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
print "X_train_tfidf.shape", X_train_tfidf.shape
print "training"
model = MultinomialNB().fit(X_train_tfidf, train_data["sentiment"])
joblib.dump(model, 'twitter_MultinomialNB_model.pkl', compress=1)
joblib.dump(vectorizer, 'vectorizer.pkl', compress=1)
joblib.dump(tfidf_transformer, 'tfidf_transformer.pkl', compress=1)
docs_new = ['God is love', 'OpenGL on the GPU is fast', "it was a very fantastic experience"]
X_new_counts = vectorizer.transform(docs_new)
X_new_tfidf = tfidf_transformer.transform(X_new_counts)
predicted = model.predict(X_new_tfidf)
print "predicted", predicted
print model.score(X_train_tfidf, train_data["sentiment"])
| 26.86087
| 94
| 0.717708
|
08dc052ecc3d96e2ef3efe41624a974268f5c7b0
| 2,596
|
py
|
Python
|
DIP/exercises/ex10/pca.py
|
apeyrard/sjtu-work
|
ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a
|
[
"MIT"
] | 1
|
2022-03-26T10:04:05.000Z
|
2022-03-26T10:04:05.000Z
|
DIP/exercises/ex10/pca.py
|
apeyrard/sjtu-work
|
ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a
|
[
"MIT"
] | null | null | null |
DIP/exercises/ex10/pca.py
|
apeyrard/sjtu-work
|
ca98fec3c83b81ed9091bdc968cb5ad8a74d1d6a
|
[
"MIT"
] | 1
|
2022-03-26T10:04:06.000Z
|
2022-03-26T10:04:06.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
import sys
import os
from PIL import Image
import numpy as np
size = None
matrix_x = None
for image in os.listdir('./washington'):
try:
print(image)
with Image.open(os.path.join('./washington',image)) as im:
imgVector = np.array(list(im.getdata()))
imgVector = imgVector.reshape(1, imgVector.shape[0])
try:
matrix_x = np.vstack((matrix_x, imgVector))
except:
matrix_x = imgVector
except FileNotFoundError as e:
sys.exit("Error : file not found")
#matrix_x = np.array([[0,1,1,1],
#[0,0,1,0],
#[0,0,0,1]
#])
#mean vector
K = matrix_x.shape[1]
print('K', K)
nb = matrix_x.shape[0]
print('nb', nb)
mx = np.zeros((nb, 1))
for x in range(K):
for y in range(nb):
mx[y] += matrix_x[y, x]
mx = mx/K
#covar matrix
cx = np.zeros((nb,nb))
for x in range(K):
tmp = (matrix_x[:,x])
tmp = tmp.reshape(tmp.shape[0],1)
cx += np.dot(tmp,tmp.T) - np.dot(mx,mx.T)
cx = cx/K
eigenvalues, eigenvectors = np.linalg.eig(cx)
#tri
eival = np.zeros(eigenvalues.shape)
eivec = np.zeros(eigenvectors.shape)
j = 0
for _ in range(nb):
maxval = eigenvalues.max()
for i in range(eigenvalues.shape[0]):
val = eigenvalues[i]
if val == maxval:
eival[j] = val
eigenvalues[i] = 0
eivec[j] = eigenvectors[i]
j += 1
break
#pruning eivec
pruning = 2
eivec = eivec[:pruning,:]
print(eivec)
matrix_y = np.zeros((pruning, matrix_x.shape[1]))
for i in range(K):
tmp = (matrix_x[:,i]).reshape(nb, 1)
truc = np.dot(eivec,(tmp-mx))
matrix_y[:, i] = truc.reshape(truc.shape[0])
#reconstruction
matrix_x2 = np.zeros(matrix_x.shape)
for i in range(K):
tmp = (matrix_y[:,i])
tmp = tmp.reshape(tmp.shape[0], 1)
matrix_x2[:, i] = np.array((np.dot(eivec.T,tmp)+mx).reshape(nb))
data = np.vsplit(matrix_x2, 6)
for i,item in enumerate(data):
item = list(rescale(item.reshape(item.shape[1])))
newIm = Image.new(im.mode, im.size)
newIm.putdata(item)
newIm.show()
diff = item - matrix_x[i]
epsilon = 0.1
print(diff)
for j,val in enumerate(diff):
if abs(val) < epsilon:
diff[j] = 0
print(diff)
diff = rescale(diff)
newIm = Image.new(im.mode, im.size)
newIm.putdata(list(diff))
newIm.show()
| 23.6
| 68
| 0.573575
|
08dc36bae83be55acec0ed61f76a33d11f4bb8a1
| 1,677
|
py
|
Python
|
organisations/migrate-entities/script.py
|
jbarnes/aws-python-script-collection
|
bf2accf60b8c14af89fab3a210c4df6a3b2e0ba9
|
[
"MIT"
] | null | null | null |
organisations/migrate-entities/script.py
|
jbarnes/aws-python-script-collection
|
bf2accf60b8c14af89fab3a210c4df6a3b2e0ba9
|
[
"MIT"
] | null | null | null |
organisations/migrate-entities/script.py
|
jbarnes/aws-python-script-collection
|
bf2accf60b8c14af89fab3a210c4df6a3b2e0ba9
|
[
"MIT"
] | null | null | null |
import boto3
import sys
if __name__ == "__main__":
if len(sys.argv) > 2:
print("[ERROR] You have passed in an invalid target-id, example target-id is ou-zhz0-prn5fmbc")
sys.exit()
else:
print("[INFO] Valid argument detected, proceeding with account migration")
destination_id = str(sys.argv[1])
# Gather source ids
with open("source_ids.txt") as f:
source_ids = f.read().splitlines()
l = len(source_ids)
print("[INFO] Detected {} source id(s) to be migrated".format(l))
print("[INFO] Beginning processing of source id(s)...")
# Process the source ids for migration
client = boto3.client("organizations")
for source_id in source_ids:
print("[INFO] Now attempting to move source id: {}".format(source_id))
get_parent = client.list_parents(ChildId=source_id)
parent_id = get_parent["Parents"][0]["Id"]
try:
response = client.move_account(
AccountId=source_id, SourceParentId=parent_id, DestinationParentId=destination_id
)
print(
"[INFO] Successfully moved source id: {} to target id: {}".format(
source_id, destination_id
)
)
except client.exceptions.DuplicateAccountException:
print(
"[NOTICE] Source id: {} is already migrated to target id: {}".format(
source_id, destination_id
)
)
print("[INFO] Successfully migrated required accounts.")
| 35.680851
| 103
| 0.556947
|
08dcaa11c309d6ad11738f4ba7bc30c87f71fe32
| 274
|
py
|
Python
|
templates/python.py
|
limacat76/Polyglot-Study
|
ec71186d4dfbecebf372eb11affd9b5a2b76e47a
|
[
"MIT"
] | null | null | null |
templates/python.py
|
limacat76/Polyglot-Study
|
ec71186d4dfbecebf372eb11affd9b5a2b76e47a
|
[
"MIT"
] | null | null | null |
templates/python.py
|
limacat76/Polyglot-Study
|
ec71186d4dfbecebf372eb11affd9b5a2b76e47a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from sys import argv
from sys import stdin
from sys import stdout
alp = len(argv)
if alp > 1 and argv[1] == "--version":
print ('version 0.1')
quit()
if alp > 1 and argv[1] == "--help":
print ('ctrl+d to quit')
quit()
print('todo')
| 17.125
| 38
| 0.605839
|
08dfdc660c21f835d66e36e8817b005006f680b0
| 1,142
|
py
|
Python
|
Transcribing_DNA_into_RNA.py
|
hedianeposselt/ROSALIND
|
6d53167e1ec98fc22992da7cd74d385a97baa870
|
[
"Apache-2.0"
] | 1
|
2022-03-29T10:17:56.000Z
|
2022-03-29T10:17:56.000Z
|
Transcribing_DNA_into_RNA.py
|
hedianeposselt/ROSALIND
|
6d53167e1ec98fc22992da7cd74d385a97baa870
|
[
"Apache-2.0"
] | null | null | null |
Transcribing_DNA_into_RNA.py
|
hedianeposselt/ROSALIND
|
6d53167e1ec98fc22992da7cd74d385a97baa870
|
[
"Apache-2.0"
] | null | null | null |
# https://rosalind.info/problems/rna/
# Transcribing DNA into RNA exercise from ROSALIND
DNA = "ACAACAAAGGATCGGCGAGGAGCTGGTTAATCTCGATTCTAACAAAGGCCTCTTGAGTGACATAAAGTTGCTGTTCGGCCCCCGTTGCAGCCAAGCCTAGACTCGAGCGGGGTCTACCTCTGTAAACCCAAGTCGCAGGCCAAGGGCATTTTAACCCCCAAAGTTAGATACGTCGATTGAGTGCGCACTCCCTAACTTCAGACAGGATGGCGCTTAGCACTGGTTAGGTCCCTCATTAGAGGCTTACACGGGACCCCAGCGATCTGCAGGGCTACATGAACCGGCGATACCTGCAACCCTTCACGTGTGGTGCGAGTGCTGGACCCATGCACGGGCCCAAGAAGCGGGAGCACCCACGGCCTGAGCCTGTAGCTTCATACTTAGAGTAACACCTATAAGTTCTCCGTTTCACGTTATTTTACTTAACAAAGCACATCGATGGGCGGACGTACGAGCCGAGCCTCGTCCCCATTTACTCAAGTAACCAAGTCATTGTTTAGTCTATGGTAGGCTCTTTGATTGGGTACGCCGCAGCCATCCGCACACTTGCAGGGCTTTAGTCCGAACTCGTTCAAAGGGTTCGACGTACAACAGCGCCTACTAAATCCCCGCCTTGTAACGGAAGACGTGTGGGACCTCTTGAAACATCTTCGACCATACATCTCCATTTTAACAATGAAGCTGTATCAGTGGTCAGTCTTACTATGCCTGCACTCAGCAACAAGGGGCGCGATGATGTAGTCAGCGTGCCCAGATTCAGTACGGACAGTCAAGTGCGATCTTTCTGGGTCGCGCGGCTGGTGGTAATGAGAATGTTCTTACCTGACAAGTAATGCTTCTTCCAATCGTGCTGGGGGCAAGGTTTATTCTCTCTTAACCTGTTGCTCATCTCTAGCGATAACTGGTGCATGATCAATTTGCGG"
RNA = ""
for nucleotide in DNA:
if nucleotide == "T":
RNA = RNA + "U"
else:
RNA = RNA + nucleotide
print( RNA )
| 81.571429
| 931
| 0.932574
|
08e02a9b4adc8aa43eb49a2fd41a870ebd71dbaa
| 516
|
py
|
Python
|
micro_admin/migrations/0010_auto_20160804_1044.py
|
lance0145/micro-finance
|
1ba6339a9d05ff2f20b020b97a233c766b2ee6e0
|
[
"MIT"
] | 72
|
2015-09-18T07:23:20.000Z
|
2022-03-23T14:35:46.000Z
|
micro_admin/migrations/0010_auto_20160804_1044.py
|
mohbadar/micro-finance
|
00fc9ad1e09cd6658aa5fa0dd991cf18fe2927a6
|
[
"MIT"
] | 68
|
2015-01-03T13:44:40.000Z
|
2021-06-10T20:00:23.000Z
|
micro_admin/migrations/0010_auto_20160804_1044.py
|
mohbadar/micro-finance
|
00fc9ad1e09cd6658aa5fa0dd991cf18fe2927a6
|
[
"MIT"
] | 73
|
2015-02-10T07:03:42.000Z
|
2022-02-24T21:11:01.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-04 10:44
from __future__ import unicode_literals
from django.db import migrations
| 25.8
| 162
| 0.629845
|
08e07a97a9f3cede768ff174381cda6e3a2e9847
| 3,823
|
py
|
Python
|
ProgrammersGuideExamples/provisioning.py
|
mrhorrible78/PyU4V
|
5b9274fd6f5f80a4a6e7aa487e348fa91f6f315c
|
[
"MIT"
] | null | null | null |
ProgrammersGuideExamples/provisioning.py
|
mrhorrible78/PyU4V
|
5b9274fd6f5f80a4a6e7aa487e348fa91f6f315c
|
[
"MIT"
] | null | null | null |
ProgrammersGuideExamples/provisioning.py
|
mrhorrible78/PyU4V
|
5b9274fd6f5f80a4a6e7aa487e348fa91f6f315c
|
[
"MIT"
] | null | null | null |
# The MIT License (MIT)
# Copyright (c) 2016 Dell Inc. or its subsidiaries.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
from PyU4V import U4VConn
ru = U4VConn(u4v_version='84')
PARSER = argparse.ArgumentParser(description='This python scrtipt is a basic '
'VMAX REST recipe provisioning '
'multiple sized volume for an '
'application.\n'
'python provisioning.py -sg TEST '
'-ig initiators.txt -pg ports.txt'
' -cap 1')
RFLAGS = PARSER.add_argument_group('Required arguments')
RFLAGS.add_argument('-sg', required=True, help='Storage group name, typically '
'the application name '
'e.g. oraclefinace')
RFLAGS.add_argument('-ig', required=True, help='Filename containing initiators'
',one per line '
'e.g. 10000000c9873cae')
RFLAGS.add_argument('-pg', required=True, help='Filename containing list of '
'ports one per line, '
'e.g. FA-1D:25')
RFLAGS.add_argument('-cap', required=True, help='Capacity in GB')
# Assign parameters to command line arguments
ARGS = PARSER.parse_args()
sgname = ARGS.sg
hba_file = ARGS.ig
port_file = ARGS.pg
appname = "REST_" + sgname
sg_id = appname + "_SG"
ig_id = appname + "_IG"
pg_id = appname + "_PG"
mv_id = appname + "_MV"
requested_capacity = ARGS.cap
initiator_list = ru.common.create_list_from_file(hba_file)
provision_storage()
| 43.443182
| 86
| 0.625948
|
08e17200183b1b4c4b38978e4c91346462570f54
| 8,227
|
py
|
Python
|
quickdraw-doodle-recognition/gcloud/common.py
|
yasserglez/kaggle_titanic
|
7a4857ec9a99c31eb53a91dda3ad9ecd5b647278
|
[
"MIT"
] | 2
|
2019-09-29T02:26:58.000Z
|
2020-03-06T07:38:58.000Z
|
quickdraw-doodle-recognition/gcloud/common.py
|
yasserglez/kaggle_titanic
|
7a4857ec9a99c31eb53a91dda3ad9ecd5b647278
|
[
"MIT"
] | 2
|
2018-12-17T04:32:09.000Z
|
2019-10-22T00:31:06.000Z
|
quickdraw-doodle-recognition/gcloud/common.py
|
yasserglez/kaggle
|
7a4857ec9a99c31eb53a91dda3ad9ecd5b647278
|
[
"MIT"
] | null | null | null |
import struct
import itertools
import numpy as np
from bitarray import bitarray
RANDOM_SEED = 2387613
IMAGE_SIZE = 128
BATCH_SIZE = 2048
# Assign an integer to each word to be predicted.
WORD2LABEL = {
'The Eiffel Tower': 0,
'The Great Wall of China': 1,
'The Mona Lisa': 2,
'airplane': 3,
'alarm clock': 4,
'ambulance': 5,
'angel': 6,
'animal migration': 7,
'ant': 8,
'anvil': 9,
'apple': 10,
'arm': 11,
'asparagus': 12,
'axe': 13,
'backpack': 14,
'banana': 15,
'bandage': 16,
'barn': 17,
'baseball': 19,
'baseball bat': 18,
'basket': 20,
'basketball': 21,
'bat': 22,
'bathtub': 23,
'beach': 24,
'bear': 25,
'beard': 26,
'bed': 27,
'bee': 28,
'belt': 29,
'bench': 30,
'bicycle': 31,
'binoculars': 32,
'bird': 33,
'birthday cake': 34,
'blackberry': 35,
'blueberry': 36,
'book': 37,
'boomerang': 38,
'bottlecap': 39,
'bowtie': 40,
'bracelet': 41,
'brain': 42,
'bread': 43,
'bridge': 44,
'broccoli': 45,
'broom': 46,
'bucket': 47,
'bulldozer': 48,
'bus': 49,
'bush': 50,
'butterfly': 51,
'cactus': 52,
'cake': 53,
'calculator': 54,
'calendar': 55,
'camel': 56,
'camera': 57,
'camouflage': 58,
'campfire': 59,
'candle': 60,
'cannon': 61,
'canoe': 62,
'car': 63,
'carrot': 64,
'castle': 65,
'cat': 66,
'ceiling fan': 67,
'cell phone': 68,
'cello': 69,
'chair': 70,
'chandelier': 71,
'church': 72,
'circle': 73,
'clarinet': 74,
'clock': 75,
'cloud': 76,
'coffee cup': 77,
'compass': 78,
'computer': 79,
'cookie': 80,
'cooler': 81,
'couch': 82,
'cow': 83,
'crab': 84,
'crayon': 85,
'crocodile': 86,
'crown': 87,
'cruise ship': 88,
'cup': 89,
'diamond': 90,
'dishwasher': 91,
'diving board': 92,
'dog': 93,
'dolphin': 94,
'donut': 95,
'door': 96,
'dragon': 97,
'dresser': 98,
'drill': 99,
'drums': 100,
'duck': 101,
'dumbbell': 102,
'ear': 103,
'elbow': 104,
'elephant': 105,
'envelope': 106,
'eraser': 107,
'eye': 108,
'eyeglasses': 109,
'face': 110,
'fan': 111,
'feather': 112,
'fence': 113,
'finger': 114,
'fire hydrant': 115,
'fireplace': 116,
'firetruck': 117,
'fish': 118,
'flamingo': 119,
'flashlight': 120,
'flip flops': 121,
'floor lamp': 122,
'flower': 123,
'flying saucer': 124,
'foot': 125,
'fork': 126,
'frog': 127,
'frying pan': 128,
'garden': 130,
'garden hose': 129,
'giraffe': 131,
'goatee': 132,
'golf club': 133,
'grapes': 134,
'grass': 135,
'guitar': 136,
'hamburger': 137,
'hammer': 138,
'hand': 139,
'harp': 140,
'hat': 141,
'headphones': 142,
'hedgehog': 143,
'helicopter': 144,
'helmet': 145,
'hexagon': 146,
'hockey puck': 147,
'hockey stick': 148,
'horse': 149,
'hospital': 150,
'hot air balloon': 151,
'hot dog': 152,
'hot tub': 153,
'hourglass': 154,
'house': 156,
'house plant': 155,
'hurricane': 157,
'ice cream': 158,
'jacket': 159,
'jail': 160,
'kangaroo': 161,
'key': 162,
'keyboard': 163,
'knee': 164,
'ladder': 165,
'lantern': 166,
'laptop': 167,
'leaf': 168,
'leg': 169,
'light bulb': 170,
'lighthouse': 171,
'lightning': 172,
'line': 173,
'lion': 174,
'lipstick': 175,
'lobster': 176,
'lollipop': 177,
'mailbox': 178,
'map': 179,
'marker': 180,
'matches': 181,
'megaphone': 182,
'mermaid': 183,
'microphone': 184,
'microwave': 185,
'monkey': 186,
'moon': 187,
'mosquito': 188,
'motorbike': 189,
'mountain': 190,
'mouse': 191,
'moustache': 192,
'mouth': 193,
'mug': 194,
'mushroom': 195,
'nail': 196,
'necklace': 197,
'nose': 198,
'ocean': 199,
'octagon': 200,
'octopus': 201,
'onion': 202,
'oven': 203,
'owl': 204,
'paint can': 205,
'paintbrush': 206,
'palm tree': 207,
'panda': 208,
'pants': 209,
'paper clip': 210,
'parachute': 211,
'parrot': 212,
'passport': 213,
'peanut': 214,
'pear': 215,
'peas': 216,
'pencil': 217,
'penguin': 218,
'piano': 219,
'pickup truck': 220,
'picture frame': 221,
'pig': 222,
'pillow': 223,
'pineapple': 224,
'pizza': 225,
'pliers': 226,
'police car': 227,
'pond': 228,
'pool': 229,
'popsicle': 230,
'postcard': 231,
'potato': 232,
'power outlet': 233,
'purse': 234,
'rabbit': 235,
'raccoon': 236,
'radio': 237,
'rain': 238,
'rainbow': 239,
'rake': 240,
'remote control': 241,
'rhinoceros': 242,
'river': 243,
'roller coaster': 244,
'rollerskates': 245,
'sailboat': 246,
'sandwich': 247,
'saw': 248,
'saxophone': 249,
'school bus': 250,
'scissors': 251,
'scorpion': 252,
'screwdriver': 253,
'sea turtle': 254,
'see saw': 255,
'shark': 256,
'sheep': 257,
'shoe': 258,
'shorts': 259,
'shovel': 260,
'sink': 261,
'skateboard': 262,
'skull': 263,
'skyscraper': 264,
'sleeping bag': 265,
'smiley face': 266,
'snail': 267,
'snake': 268,
'snorkel': 269,
'snowflake': 270,
'snowman': 271,
'soccer ball': 272,
'sock': 273,
'speedboat': 274,
'spider': 275,
'spoon': 276,
'spreadsheet': 277,
'square': 278,
'squiggle': 279,
'squirrel': 280,
'stairs': 281,
'star': 282,
'steak': 283,
'stereo': 284,
'stethoscope': 285,
'stitches': 286,
'stop sign': 287,
'stove': 288,
'strawberry': 289,
'streetlight': 290,
'string bean': 291,
'submarine': 292,
'suitcase': 293,
'sun': 294,
'swan': 295,
'sweater': 296,
'swing set': 297,
'sword': 298,
't-shirt': 299,
'table': 300,
'teapot': 301,
'teddy-bear': 302,
'telephone': 303,
'television': 304,
'tennis racquet': 305,
'tent': 306,
'tiger': 307,
'toaster': 308,
'toe': 309,
'toilet': 310,
'tooth': 311,
'toothbrush': 312,
'toothpaste': 313,
'tornado': 314,
'tractor': 315,
'traffic light': 316,
'train': 317,
'tree': 318,
'triangle': 319,
'trombone': 320,
'truck': 321,
'trumpet': 322,
'umbrella': 323,
'underwear': 324,
'van': 325,
'vase': 326,
'violin': 327,
'washing machine': 328,
'watermelon': 329,
'waterslide': 330,
'whale': 331,
'wheel': 332,
'windmill': 333,
'wine bottle': 334,
'wine glass': 335,
'wristwatch': 336,
'yoga': 337,
'zebra': 338,
'zigzag': 339,
}
LABEL2WORD = dict((v, k) for k, v in WORD2LABEL.items())
# https://docs.python.org/3/library/itertools.html#recipes
| 20.880711
| 95
| 0.519874
|
08e17bfd02380a8da82eb6cb901cf80fe395ede2
| 34,734
|
py
|
Python
|
feature.py
|
TimothyChen225/AFC-X
|
901a0019b7c153804570c480c3da4825776dbf02
|
[
"MIT"
] | null | null | null |
feature.py
|
TimothyChen225/AFC-X
|
901a0019b7c153804570c480c3da4825776dbf02
|
[
"MIT"
] | null | null | null |
feature.py
|
TimothyChen225/AFC-X
|
901a0019b7c153804570c480c3da4825776dbf02
|
[
"MIT"
] | null | null | null |
from collections import Counter
from Bio import SeqIO
import numpy as np
import warnings
import math
warnings.filterwarnings(action='ignore', category=UserWarning, module='gensim')
from gensim.models import Word2Vec
Max_length = 100 # maximum length of used peptides
# AFC-T, AFC-CP
# AFC-C based on main dataset
# AFC-C based on alternate dataset
| 31.040214
| 241
| 0.501324
|
08e364b287cb5954101aa31b3cb5304b7c80b252
| 35
|
py
|
Python
|
excel4lib/macro/analysis/__init__.py
|
aaaddress1/boobsnail
|
c0c2067d7271ca76ee721998d28e8c3c81a48397
|
[
"MIT"
] | 169
|
2021-05-26T13:35:16.000Z
|
2021-09-06T08:04:19.000Z
|
excel4lib/macro/analysis/__init__.py
|
H4xl0r/boobsnail
|
c0c2067d7271ca76ee721998d28e8c3c81a48397
|
[
"MIT"
] | 2
|
2021-06-01T13:46:37.000Z
|
2021-07-12T19:06:37.000Z
|
excel4lib/macro/analysis/__init__.py
|
H4xl0r/boobsnail
|
c0c2067d7271ca76ee721998d28e8c3c81a48397
|
[
"MIT"
] | 29
|
2021-05-27T17:28:29.000Z
|
2021-09-04T19:24:50.000Z
|
from .excel4_anti_analysis import *
| 35
| 35
| 0.857143
|
08e511f1a5de576d29d0f24338c61be5e0fb82ee
| 2,250
|
py
|
Python
|
multiband_melgan/dataset.py
|
AppleHolic/multiband_melgan
|
e0864d0fc205c3bdf5e19c77753e105e29a2641b
|
[
"MIT"
] | 41
|
2020-06-24T08:07:23.000Z
|
2022-01-24T16:39:54.000Z
|
multiband_melgan/dataset.py
|
AppleHolic/multiband_melgan
|
e0864d0fc205c3bdf5e19c77753e105e29a2641b
|
[
"MIT"
] | 2
|
2020-06-24T08:02:15.000Z
|
2020-11-23T02:56:42.000Z
|
multiband_melgan/dataset.py
|
AppleHolic/multiband_melgan
|
e0864d0fc205c3bdf5e19c77753e105e29a2641b
|
[
"MIT"
] | 5
|
2020-07-03T04:00:50.000Z
|
2020-11-04T03:24:48.000Z
|
import numpy as np
import librosa
import os
from pytorch_sound.data.meta.ljspeech import LJSpeechMeta
from torch.utils.data import Dataset, DataLoader
from typing import Tuple
| 34.090909
| 106
| 0.679111
|
08e85c7f00798390cfd21fa3cd1b2758063f698c
| 3,830
|
py
|
Python
|
yasmss/sparkmapper/sparkmapper.py
|
AshirwadPradhan/yasmss
|
8b8b7108a3a437f0c757f19225a0c2082dbbd488
|
[
"MIT"
] | null | null | null |
yasmss/sparkmapper/sparkmapper.py
|
AshirwadPradhan/yasmss
|
8b8b7108a3a437f0c757f19225a0c2082dbbd488
|
[
"MIT"
] | 2
|
2019-09-22T03:27:20.000Z
|
2019-09-22T13:56:35.000Z
|
yasmss/sparkmapper/sparkmapper.py
|
AshirwadPradhan/yasmss
|
8b8b7108a3a437f0c757f19225a0c2082dbbd488
|
[
"MIT"
] | 2
|
2019-09-15T13:10:41.000Z
|
2019-10-29T11:20:10.000Z
|
"""Get the parsed query from the driver and apply transformation and action based on the
query template
"""
import time
import pyspark.sql.functions as f
from pyspark.sql import SparkSession
from pyspark.sql.types import IntegerType, StringType, StructField, StructType
import yaml
from schema import schema
with open("config.yaml", 'r') as file:
data = yaml.load(file, Loader=yaml.FullLoader)
baseURI = data['pathconfig']['host_ip_port'] + \
'/' + data['pathconfig']['input_dir']
table_format = '.csv'
| 33.304348
| 91
| 0.621671
|
08ee02203bdf0fc6105effa49f09100d9294242e
| 8,412
|
py
|
Python
|
main_gui.py
|
vedymin/All_IPG_Move
|
b8b079fd471709731a7550cec3a5add3db409b81
|
[
"MIT"
] | null | null | null |
main_gui.py
|
vedymin/All_IPG_Move
|
b8b079fd471709731a7550cec3a5add3db409b81
|
[
"MIT"
] | null | null | null |
main_gui.py
|
vedymin/All_IPG_Move
|
b8b079fd471709731a7550cec3a5add3db409b81
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main_gui.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
| 54.980392
| 116
| 0.714812
|
08f00026b0a8d4a6cccad1a88563ce7a5b83f749
| 1,522
|
py
|
Python
|
src/config.py
|
DQiaole/ZITS
|
5f7a060167790789d5e29a3d14d3c2ef8a34e765
|
[
"Apache-2.0"
] | 40
|
2022-03-02T06:12:43.000Z
|
2022-03-30T02:17:02.000Z
|
src/config.py
|
DQiaole/ZITS
|
5f7a060167790789d5e29a3d14d3c2ef8a34e765
|
[
"Apache-2.0"
] | 6
|
2022-03-06T03:53:14.000Z
|
2022-03-31T06:36:34.000Z
|
src/config.py
|
DQiaole/ZITS
|
5f7a060167790789d5e29a3d14d3c2ef8a34e765
|
[
"Apache-2.0"
] | 5
|
2022-03-04T06:39:44.000Z
|
2022-03-28T04:58:32.000Z
|
import os
import yaml
DEFAULT_CONFIG = {
'SEED': 10, # random seed
'BATCH_SIZE': 8, # input batch size for training
'INPUT_SIZE': 256, # input image size for training 0 for original size
'MAX_ITERS': 1e6, # maximum number of iterations to train the model
'SAVE_INTERVAL': 1000, # how many iterations to wait before saving model (0: never)
'SAMPLE_INTERVAL': 1000, # how many iterations to wait before sampling (0: never)
'SAMPLE_SIZE': 12, # number of images to sample
'EVAL_INTERVAL': 0, # how many iterations to wait before model evaluation (0: never)
'LOG_INTERVAL': 10, # how many iterations to wait before logging training status (0: never)
}
| 35.395349
| 107
| 0.557819
|
08f0432c93f8f390bd7d7a71479785cb462167ba
| 8,786
|
py
|
Python
|
examples/acados_python/test/generate_c_code.py
|
besticka/acados
|
32767a19aed01a15b5e7b83ebc6ddbd669a47954
|
[
"BSD-2-Clause"
] | null | null | null |
examples/acados_python/test/generate_c_code.py
|
besticka/acados
|
32767a19aed01a15b5e7b83ebc6ddbd669a47954
|
[
"BSD-2-Clause"
] | null | null | null |
examples/acados_python/test/generate_c_code.py
|
besticka/acados
|
32767a19aed01a15b5e7b83ebc6ddbd669a47954
|
[
"BSD-2-Clause"
] | null | null | null |
#
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
from acados_template import *
import acados_template as at
from export_ode_model import *
import numpy as np
import scipy.linalg
from ctypes import *
import json
import argparse
# set to 'True' to generate test data
GENERATE_DATA = False
LOCAL_TEST = False
TEST_TOL = 1e-8
if LOCAL_TEST is True:
FORMULATION = 'LS'
SOLVER_TYPE = 'SQP_RTI'
QP_SOLVER = 'FULL_CONDENSING_QPOASES'
INTEGRATOR_TYPE = 'IRK'
else:
parser = argparse.ArgumentParser(description='test Python interface on pendulum example.')
parser.add_argument('--FORMULATION', dest='FORMULATION',
default='LS',
help='FORMULATION: linear least-squares (LS) or nonlinear \
least-squares (NLS) (default: LS)')
parser.add_argument('--QP_SOLVER', dest='QP_SOLVER',
default='PARTIAL_CONDENSING_HPIPM',
help='QP_SOLVER: PARTIAL_CONDENSING_HPIPM, FULL_CONDENSING_HPIPM, ' \
'FULL_CONDENSING_HPIPM (default: PARTIAL_CONDENSING_HPIPM)')
parser.add_argument('--INTEGRATOR_TYPE', dest='INTEGRATOR_TYPE',
default='ERK',
help='INTEGRATOR_TYPE: explicit (ERK) or implicit (IRK) ' \
' Runge-Kutta (default: ERK)')
parser.add_argument('--SOLVER_TYPE', dest='SOLVER_TYPE',
default='SQP_RTI',
help='SOLVER_TYPE: (full step) sequential quadratic programming (SQP) or ' \
' real-time iteration (SQP-RTI) (default: SQP-RTI)')
args = parser.parse_args()
FORMULATION = args.FORMULATION
FORMULATION_values = ['LS', 'NLS']
if FORMULATION not in FORMULATION_values:
raise Exception('Invalid unit test value {} for parameter FORMULATION. Possible values are' \
' {}. Exiting.'.format(FORMULATION, FORMULATION_values))
QP_SOLVER = args.QP_SOLVER
QP_SOLVER_values = ['PARTIAL_CONDENSING_HPIPM', 'FULL_CONDENSING_HPIPM', 'FULL_CONDENSING_QPOASES']
if QP_SOLVER not in QP_SOLVER_values:
raise Exception('Invalid unit test value {} for parameter QP_SOLVER. Possible values are' \
' {}. Exiting.'.format(QP_SOLVER, QP_SOLVER_values))
INTEGRATOR_TYPE = args.INTEGRATOR_TYPE
INTEGRATOR_TYPE_values = ['ERK', 'IRK']
if INTEGRATOR_TYPE not in INTEGRATOR_TYPE:
raise Exception('Invalid unit test value {} for parameter INTEGRATOR_TYPE. Possible values are' \
' {}. Exiting.'.format(INTEGRATOR_TYPE, INTEGRATOR_TYPE_values))
SOLVER_TYPE = args.SOLVER_TYPE
SOLVER_TYPE_values = ['SQP', 'SQP-RTI']
if SOLVER_TYPE not in SOLVER_TYPE:
raise Exception('Invalid unit test value {} for parameter SOLVER_TYPE. Possible values are' \
' {}. Exiting.'.format(SOLVER_TYPE, SOLVER_TYPE_values))
# print test setting
print("Running test with:\n\tformulation:", FORMULATION, "\n\tqp solver: ", QP_SOLVER,\
"\n\tintergrator: ", INTEGRATOR_TYPE, "\n\tsolver: ", SOLVER_TYPE)
# create render arguments
ocp = acados_ocp_nlp()
# export model
model = export_ode_model()
# set model_name
ocp.model = model
Tf = 2.0
nx = model.x.size()[0]
nu = model.u.size()[0]
ny = nx + nu
ny_e = nx
N = 50
# set ocp_nlp_dimensions
nlp_dims = ocp.dims
nlp_dims.nx = nx
nlp_dims.ny = ny
nlp_dims.ny_e = ny_e
nlp_dims.nbx = 0
nlp_dims.nbu = nu
nlp_dims.nu = model.u.size()[0]
nlp_dims.N = N
# set weighting matrices
nlp_cost = ocp.cost
if FORMULATION == 'LS':
nlp_cost.cost_type = 'LINEAR_LS'
nlp_cost.cost_type_e = 'LINEAR_LS'
elif FORMULATION == 'NLS':
nlp_cost.cost_type = 'NONLINEAR_LS'
nlp_cost.cost_type_e = 'NONLINEAR_LS'
else:
raise Exception('Unknown FORMULATION. Possible values are \'LS\' and \'NLS\'.')
Q = np.eye(4)
Q[0,0] = 1e0
Q[1,1] = 1e2
Q[2,2] = 1e-3
Q[3,3] = 1e-2
R = np.eye(1)
R[0,0] = 1e0
unscale = N/Tf
Q = Q * unscale
R = R * unscale
if FORMULATION == 'NLS':
nlp_cost.W = scipy.linalg.block_diag(R, Q)
else:
nlp_cost.W = scipy.linalg.block_diag(Q, R)
nlp_cost.W_e = Q/unscale
Vx = np.zeros((ny, nx))
Vx[0,0] = 1.0
Vx[1,1] = 1.0
Vx[2,2] = 1.0
Vx[3,3] = 1.0
nlp_cost.Vx = Vx
Vu = np.zeros((ny, nu))
Vu[4,0] = 1.0
nlp_cost.Vu = Vu
Vx_e = np.zeros((ny_e, nx))
Vx_e[0,0] = 1.0
Vx_e[1,1] = 1.0
Vx_e[2,2] = 1.0
Vx_e[3,3] = 1.0
nlp_cost.Vx_e = Vx_e
if FORMULATION == 'NLS':
x = SX.sym('x', 4, 1)
u = SX.sym('u', 1, 1)
ocp.cost_r.expr = vertcat(u, x)
ocp.cost_r.x = x
ocp.cost_r.u = u
ocp.cost_r.name = 'lin_res'
ocp.cost_r.ny = nx + nu
ocp.cost_r_e.expr = x
ocp.cost_r_e.x = x
ocp.cost_r_e.name = 'lin_res'
ocp.cost_r_e.ny = nx
nlp_cost.yref = np.zeros((ny, ))
nlp_cost.yref_e = np.zeros((ny_e, ))
# setting bounds
Fmax = 2.0
nlp_con = ocp.constraints
nlp_con.lbu = np.array([-Fmax])
nlp_con.ubu = np.array([+Fmax])
nlp_con.x0 = np.array([0.0, 3.14, 0.0, 0.0])
nlp_con.idxbu = np.array([0])
# set QP solver
ocp.solver_options.qp_solver = QP_SOLVER
ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'
ocp.solver_options.integrator_type = INTEGRATOR_TYPE
ocp.solver_options.sim_method_num_stages = 2
ocp.solver_options.sim_method_num_steps = 5
# set prediction horizon
ocp.solver_options.tf = Tf
ocp.solver_options.nlp_solver_type = SOLVER_TYPE
# set header path
ocp.acados_include_path = '../../../../include'
ocp.acados_lib_path = '../../../../lib'
acados_solver = generate_solver(ocp, json_file = 'acados_ocp.json')
Nsim = 100
simX = np.ndarray((Nsim, nx))
simU = np.ndarray((Nsim, nu))
for i in range(Nsim):
status = acados_solver.solve()
if status !=0:
print("acados failure! Exiting. \n")
sys.exit(status)
# get solution
x0 = acados_solver.get(0, "x")
u0 = acados_solver.get(0, "u")
for j in range(nx):
simX[i,j] = x0[j]
for j in range(nu):
simU[i,j] = u0[j]
# update initial condition
x0 = acados_solver.get(1, "x")
acados_solver.set(0, "lbx", x0)
acados_solver.set(0, "ubx", x0)
# update reference
for j in range(N):
acados_solver.set(j, "yref", np.array([0, 0, 0, 0, 0]))
acados_solver.set(N, "yref", np.array([0, 0, 0, 0]))
# dump result to JSON file for unit testing
test_file_name = 'test_data/generate_c_code_out_' + FORMULATION + '_' + QP_SOLVER + '_' + \
INTEGRATOR_TYPE + '_' + SOLVER_TYPE + '.json'
if GENERATE_DATA:
with open(test_file_name, 'w') as f:
json.dump({"simX": simX.tolist(), "simU": simU.tolist()}, f, indent=4, sort_keys=True)
else:
with open(test_file_name, 'r') as f:
test_data = json.load(f)
simX_error = np.linalg.norm(test_data['simX'] - simX)
simU_error = np.linalg.norm(test_data['simU'] - simU)
if simX_error > TEST_TOL or simU_error > TEST_TOL:
raise Exception("Python acados test failure with accuracies {:.2E} and {:.2E} ({:.2E} required) on pendulum example! Exiting.\n".format(simX_error, simU_error, TEST_TOL))
else:
print('Python test passed with accuracy {:.2E}'.format(max(simU_error, simX_error)))
| 31.604317
| 178
| 0.669019
|
08f05b58d9116c10d8df8fa1c928dc1cf428e826
| 2,820
|
py
|
Python
|
collectors/cpustats.py
|
vijayanant/kunai
|
0dfe169731eaceb1bba66e12715b3968d2a3de20
|
[
"MIT"
] | 1
|
2020-04-12T21:05:46.000Z
|
2020-04-12T21:05:46.000Z
|
collectors/cpustats.py
|
vijayanant/kunai
|
0dfe169731eaceb1bba66e12715b3968d2a3de20
|
[
"MIT"
] | null | null | null |
collectors/cpustats.py
|
vijayanant/kunai
|
0dfe169731eaceb1bba66e12715b3968d2a3de20
|
[
"MIT"
] | null | null | null |
import httplib # Used only for handling httplib.HTTPException (case #26701)
import os
import sys
import platform
import re
import urllib
import urllib2
import traceback
import time
from StringIO import StringIO
from multiprocessing import subprocess
from kunai.log import logger
from kunai.collector import Collector
| 32.790698
| 114
| 0.510638
|
08f3eae9e91dde600e2781b52aa83909fff87587
| 1,560
|
py
|
Python
|
prob_h.py
|
ShinjiKatoA16/icpc2017ucsy
|
de1954620036e8025b7b4c1b469e6b8c57af212e
|
[
"MIT"
] | null | null | null |
prob_h.py
|
ShinjiKatoA16/icpc2017ucsy
|
de1954620036e8025b7b4c1b469e6b8c57af212e
|
[
"MIT"
] | null | null | null |
prob_h.py
|
ShinjiKatoA16/icpc2017ucsy
|
de1954620036e8025b7b4c1b469e6b8c57af212e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
2017 ICPC at UCSY
Problem-H: Sum Square
'''
import sys
def parse_tc(tc):
'''
Input: Test Case
Update:
Return: None
'''
x = list(map(int,tc.infile.readline().split()))
tc.dataset = x[0]
tc.max_num = x[1]
tc.base = x[2]
tc.a0 = x[3]
return
def solve(tc):
'''
Input: Test Case
Return: None
'''
parse_tc(tc)
ak = tc.a0
ssd_list = [ak]
for i in range(tc.max_num):
ssd_val = ssd(tc.base, ak)
if ssd_val in ssd_list:
index_k = ssd_list.index(ssd_val)
print(tc.dataset, len(ssd_list)+1, len(ssd_list)-index_k)
ssd_list.append(ssd_val)
prt_list(ssd_list[index_k:])
break
ssd_list.append(ssd_val)
ak = ssd_val
else:
print(tc.dataset, tc.max_num, 0)
print(ak)
return
##
## Main routine
##
if __name__ == '__main__':
tc = TestCase()
tc.infile = sys.stdin
tc.t = int(tc.infile.readline())
for i in range(tc.t):
solve(tc)
if tc.infile != sys.stdin:
tc.infile.close()
| 16.956522
| 69
| 0.501923
|
08f52305d640784da8d8fb26cf618726da107b3a
| 2,357
|
py
|
Python
|
spark_mapinput.py
|
pw2393/spark-bitcoin-parser
|
320eb30ffcc462b0607655d2f4002a82590fd120
|
[
"MIT"
] | 11
|
2017-09-29T05:37:57.000Z
|
2022-02-04T06:34:17.000Z
|
spark_mapinput.py
|
pw2393/spark-bitcoin-parser
|
320eb30ffcc462b0607655d2f4002a82590fd120
|
[
"MIT"
] | 2
|
2018-07-03T12:19:50.000Z
|
2019-10-19T21:24:53.000Z
|
spark_mapinput.py
|
pw2393/spark-bitcoin-parser
|
320eb30ffcc462b0607655d2f4002a82590fd120
|
[
"MIT"
] | 3
|
2019-11-09T13:01:07.000Z
|
2021-12-03T04:20:29.000Z
|
"""
Author: Peng Wu
License: MIT
"""
# Initialize Spark Context: local multi-threads
from pyspark import SparkConf, SparkContext
output_folder = './csv/'
# with open(output_folder+'viz_txedge.csv', 'w') as f:
# pass
# def formatted_print_2(keyValue):
# with open(output_folder+'viz_txedge.csv', 'a') as f:
# f.write('{},{},{},{}\n'.format(keyValue[0][0], keyValue[1][1][0], keyValue[1][0][0], keyValue[1][0][2]))
# metafinal.foreach(formatted_print_2)
# #print metafinal.first()
if __name__ == "__main__":
import sys
if len(sys.argv) != 1:
print "\n\tUSAGE:\n\t\tspark-submit spark_mapinput.py"
sys.exit()
import time
start_time = time.time()
main()
print("--- %s seconds ---" % (time.time() - start_time))
| 28.39759
| 117
| 0.602036
|
08f5c575bcbcd0ee74f875b6fd32a403f396576c
| 6,819
|
py
|
Python
|
neuralpredictors/layers/readouts/factorized.py
|
kellirestivo/neuralpredictors
|
57205a90d2e3daa5f8746c6ef6170be9e35cb5f5
|
[
"MIT"
] | 9
|
2020-11-26T18:22:32.000Z
|
2022-01-22T15:51:52.000Z
|
neuralpredictors/layers/readouts/factorized.py
|
kellirestivo/neuralpredictors
|
57205a90d2e3daa5f8746c6ef6170be9e35cb5f5
|
[
"MIT"
] | 60
|
2020-10-21T15:32:28.000Z
|
2022-02-25T10:38:16.000Z
|
neuralpredictors/layers/readouts/factorized.py
|
mohammadbashiri/neuralpredictors
|
8e60c9ce91f83e3dcaa1b3dbe4422e1509ccbd5f
|
[
"MIT"
] | 21
|
2020-10-21T09:29:17.000Z
|
2022-02-07T10:04:46.000Z
|
import torch
from torch import nn as nn
import numpy as np
from .base import Readout
def initialize(self, mean_activity=None):
"""
Initializes the mean, and sigma of the Gaussian readout along with the features weights
"""
if mean_activity is None:
mean_activity = self.mean_activity
self.spatial.data.normal_(0, self.init_noise)
self._features.data.normal_(0, self.init_noise)
if self._shared_features:
self.scales.data.fill_(1.0)
if self.bias is not None:
self.initialize_bias(mean_activity=mean_activity)
def initialize_features(self, match_ids=None, shared_features=None):
"""
The internal attribute `_original_features` in this function denotes whether this instance of the FullGuassian2d
learns the original features (True) or if it uses a copy of the features from another instance of FullGaussian2d
via the `shared_features` (False). If it uses a copy, the feature_l1 regularizer for this copy will return 0
"""
c, w, h = self.in_shape
if match_ids is not None:
assert self.outdims == len(match_ids)
n_match_ids = len(np.unique(match_ids))
if shared_features is not None:
assert shared_features.shape == (
n_match_ids,
c,
), f"shared features need to have shape ({n_match_ids}, {c})"
self._features = shared_features
self._original_features = False
else:
self._features = nn.Parameter(
torch.Tensor(n_match_ids, c)
) # feature weights for each channel of the core
self.scales = nn.Parameter(torch.Tensor(self.outdims, 1)) # feature weights for each channel of the core
_, sharing_idx = np.unique(match_ids, return_inverse=True)
self.register_buffer("feature_sharing_index", torch.from_numpy(sharing_idx))
self._shared_features = True
else:
self._features = nn.Parameter(torch.Tensor(self.outdims, c)) # feature weights for each channel of the core
self._shared_features = False
# Classes for backwards compatibility
| 35.889474
| 120
| 0.60698
|
08f7015d2835dcc1e926fd4acbcfff51249816e9
| 1,186
|
py
|
Python
|
app/main/views.py
|
josphat-otieno/news-app
|
e6ff307230bd2cab787489fca4850004cd9bdbd0
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
josphat-otieno/news-app
|
e6ff307230bd2cab787489fca4850004cd9bdbd0
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
josphat-otieno/news-app
|
e6ff307230bd2cab787489fca4850004cd9bdbd0
|
[
"MIT"
] | 1
|
2022-02-28T22:33:33.000Z
|
2022-02-28T22:33:33.000Z
|
from flask import render_template,request, redirect, url_for
from . import main
from ..requests import get_articles, get_news_sources,get_top_headlines, get_news_category
| 34.882353
| 120
| 0.729342
|
08f715599ebdccb7db7f9153cc150737106850d8
| 6,904
|
py
|
Python
|
fabric_cf/actor/core/util/resource_count.py
|
fabric-testbed/ActorBase
|
3c7dd040ee79fef0759e66996c93eeec57c790b2
|
[
"MIT"
] | null | null | null |
fabric_cf/actor/core/util/resource_count.py
|
fabric-testbed/ActorBase
|
3c7dd040ee79fef0759e66996c93eeec57c790b2
|
[
"MIT"
] | 67
|
2020-12-21T15:39:49.000Z
|
2022-02-27T17:55:00.000Z
|
fabric_cf/actor/core/util/resource_count.py
|
fabric-testbed/ControlFramework
|
95ab745e32f15c993bc7a017aa97a5a0f67f210f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: Komal Thareja (kthare10@renci.org)
from fabric_cf.actor.core.util.resource_type import ResourceType
| 33.033493
| 119
| 0.637167
|
08f958d96728940d01ac948489adc3f2710db6d4
| 114
|
py
|
Python
|
unweaver/graphs/digraphgpkg/nodes/__init__.py
|
jsbeckwith/unweaver
|
a4ba9e4e288c75e93bf7f9d67bc11680f09c3da0
|
[
"Apache-2.0"
] | 4
|
2019-04-24T16:38:57.000Z
|
2021-12-28T20:38:08.000Z
|
unweaver/graphs/digraphgpkg/nodes/__init__.py
|
jsbeckwith/unweaver
|
a4ba9e4e288c75e93bf7f9d67bc11680f09c3da0
|
[
"Apache-2.0"
] | 3
|
2021-06-02T04:06:33.000Z
|
2021-11-02T01:47:20.000Z
|
unweaver/graphs/digraphgpkg/nodes/__init__.py
|
jsbeckwith/unweaver
|
a4ba9e4e288c75e93bf7f9d67bc11680f09c3da0
|
[
"Apache-2.0"
] | 1
|
2020-08-13T04:42:05.000Z
|
2020-08-13T04:42:05.000Z
|
from .node_view import NodeView
from .node import Node
from .nodes_view import NodesView
from .nodes import Nodes
| 22.8
| 33
| 0.824561
|
08f98d32f073c8a759a51d5a1b5fc9a27ec1c07c
| 1,927
|
py
|
Python
|
python/http_request.py
|
MrVallentin/http_request
|
b21cb23ead1e3bc7176f09804f9cc9287b9f0168
|
[
"MIT"
] | null | null | null |
python/http_request.py
|
MrVallentin/http_request
|
b21cb23ead1e3bc7176f09804f9cc9287b9f0168
|
[
"MIT"
] | null | null | null |
python/http_request.py
|
MrVallentin/http_request
|
b21cb23ead1e3bc7176f09804f9cc9287b9f0168
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# Author: Christian Vallentin <mail@vallentinsource.com>
# Website: http://vallentinsource.com
# Repository: https://github.com/MrVallentin/http_request
#
# Date Created: February 28, 2016
# Last Modified: February 29, 2016
#
# Developed and tested using Python 3.5.1
import http.client, urllib.parse
| 23.790123
| 65
| 0.694862
|
3e92c270410556137345bdc66663f957e85d9d78
| 937
|
py
|
Python
|
notebook/pypdf2_merge_page.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 174
|
2018-05-30T21:14:50.000Z
|
2022-03-25T07:59:37.000Z
|
notebook/pypdf2_merge_page.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 5
|
2019-08-10T03:22:02.000Z
|
2021-07-12T20:31:17.000Z
|
notebook/pypdf2_merge_page.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 53
|
2018-04-27T05:26:35.000Z
|
2022-03-25T07:59:37.000Z
|
import PyPDF2
merger = PyPDF2.PdfFileMerger()
merger.append('data/src/pdf/sample1.pdf', pages=(0, 1))
merger.append('data/src/pdf/sample2.pdf', pages=(2, 4))
merger.merge(2, 'data/src/pdf/sample3.pdf', pages=(0, 3, 2))
merger.write('data/temp/sample_merge_page.pdf')
merger.close()
merger = PyPDF2.PdfFileMerger()
merger.append('data/src/pdf/sample1.pdf', pages=PyPDF2.pagerange.PageRange('-1'))
merger.append('data/src/pdf/sample2.pdf', pages=PyPDF2.pagerange.PageRange('2:'))
merger.merge(2, 'data/src/pdf/sample3.pdf', pages=PyPDF2.pagerange.PageRange('::-1'))
merger.write('data/temp/sample_merge_pagerange.pdf')
merger.close()
reader1 = PyPDF2.PdfFileReader('data/src/pdf/sample1.pdf')
reader2 = PyPDF2.PdfFileReader('data/src/pdf/sample2.pdf')
writer = PyPDF2.PdfFileWriter()
writer.addPage(reader1.getPage(0))
writer.addPage(reader2.getPage(2))
with open('data/temp/sample_merge_wr.pdf', 'wb') as f:
writer.write(f)
| 30.225806
| 85
| 0.741729
|
3e93e3456bdf96692c3deeb42d3cc140eb248959
| 1,983
|
py
|
Python
|
examples/nlp/bert_squad_pytorch/data.py
|
gh-determined-ai/determined
|
9a1ab33a3a356b69681b3351629fef4ab98ddb56
|
[
"Apache-2.0"
] | 1,729
|
2020-04-27T17:36:40.000Z
|
2022-03-31T05:48:39.000Z
|
examples/nlp/bert_squad_pytorch/data.py
|
ChrisW09/determined
|
5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0
|
[
"Apache-2.0"
] | 1,940
|
2020-04-27T17:34:14.000Z
|
2022-03-31T23:02:28.000Z
|
examples/nlp/bert_squad_pytorch/data.py
|
ChrisW09/determined
|
5c37bfe9cfcc69174ba29a3f1a115c3e9e3632e0
|
[
"Apache-2.0"
] | 214
|
2020-04-27T19:57:28.000Z
|
2022-03-29T08:17:16.000Z
|
from transformers.data.processors.squad import SquadV1Processor, SquadV2Processor
from transformers import squad_convert_examples_to_features
import urllib.request
import os
| 44.066667
| 122
| 0.670701
|
3e946fdc0458e3cc6d05239b37f7f11a04a0d076
| 898
|
py
|
Python
|
test_simplebuy.py
|
caoxuwen/bitgym
|
0a6796a039290122430ebc13c8d7ad9ff741921a
|
[
"MIT"
] | 1
|
2018-09-07T10:10:29.000Z
|
2018-09-07T10:10:29.000Z
|
test_simplebuy.py
|
caoxuwen/bitgym
|
0a6796a039290122430ebc13c8d7ad9ff741921a
|
[
"MIT"
] | null | null | null |
test_simplebuy.py
|
caoxuwen/bitgym
|
0a6796a039290122430ebc13c8d7ad9ff741921a
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
import pandas as pd
import trading_env
# np.set_printoptions(threshold=np.nan)
#df = pd.read_hdf('dataset/SGXTW.h5', 'STW')
#df = pd.read_hdf('dataset/SGXTWsample.h5', 'STW')
df = pd.read_csv('dataset/btc_indexed2.csv')
print(df.describe())
env = trading_env.make(env_id='training_v1', obs_data_len=1, step_len=1,
df=df, fee=0.0, max_position=5, deal_col_name='close',
sample_days=1,
feature_names=['low', 'high',
'open', 'close',
'volume', 'datetime'])
env.reset()
env.render()
state, reward, done, info = env.step(1)
print state
# randow choice action and show the transaction detail
while True:
state, reward, done, info = env.step(0)
env.render()
if done:
print state, reward
break
| 27.212121
| 77
| 0.587973
|