hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
052c8a3287a40e2446164e87ba133bbda46f1779 | 294 | py | Python | Workshops/enBuyukSayi.py | brkyydnmz/Python | 8cde0421edda6ac5b7fd30e8f20ad7cb6e1708b0 | [
"MIT"
] | null | null | null | Workshops/enBuyukSayi.py | brkyydnmz/Python | 8cde0421edda6ac5b7fd30e8f20ad7cb6e1708b0 | [
"MIT"
] | null | null | null | Workshops/enBuyukSayi.py | brkyydnmz/Python | 8cde0421edda6ac5b7fd30e8f20ad7cb6e1708b0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
sayi1 = int(input("1. Say:"))
sayi2 = int(input("2. Say:"))
sayi3 = int(input("3. Say:"))
if (sayi1>=sayi2) and (sayi1>=sayi3):
enBuyuk = sayi1
elif(sayi2>=sayi1) and (sayi2>=sayi3):
enBuyuk = sayi2
else:
enBuyuk = sayi3
print("En Byk Say:",enBuyuk) | 21 | 38 | 0.608844 |
052d317538142bae7b508c18b4e71450d9b3e276 | 399 | py | Python | 08/seven-segment_part1.py | ReinprechtStefan/AdventOfCode2021 | a2750c5fbcc7fc927d710f4db6926d015a2fb673 | [
"Apache-2.0"
] | null | null | null | 08/seven-segment_part1.py | ReinprechtStefan/AdventOfCode2021 | a2750c5fbcc7fc927d710f4db6926d015a2fb673 | [
"Apache-2.0"
] | null | null | null | 08/seven-segment_part1.py | ReinprechtStefan/AdventOfCode2021 | a2750c5fbcc7fc927d710f4db6926d015a2fb673 | [
"Apache-2.0"
] | null | null | null | with open('input.txt') as f:
lines = f.readlines()
counter = 0
for line in lines:
right_part = line.split(" | ")[1]
for segment in right_part.strip().split(" "):
#print(segment, len(segment))
if len(segment) in [2,3,4,7]:
counter += 1
#else:
#print("NO ", segment, len(segment))
print(counter)
| 22.166667 | 53 | 0.491228 |
052ffb78d4e1a7b366b635d756b5d2bbba48de18 | 7,605 | py | Python | main/gui.py | MBmasher/weighted-object | eaaf25338240873b7c4197097b2bb73be256b702 | [
"MIT"
] | null | null | null | main/gui.py | MBmasher/weighted-object | eaaf25338240873b7c4197097b2bb73be256b702 | [
"MIT"
] | null | null | null | main/gui.py | MBmasher/weighted-object | eaaf25338240873b7c4197097b2bb73be256b702 | [
"MIT"
] | null | null | null | import Tkinter
import weighted_objects
import tkFileDialog
import time
import ttk
import numpy
import sys
while True:
# Ask user for file dialog.
Tkinter.Tk().withdraw()
osu_file_path = tkFileDialog.askopenfilename(title="Select an osu file", filetypes=(("osu files", "*.osu"),))
# Calculate final nerf.
final_nerf = weighted_objects.calculate_nerf(osu_file_path)
distance_snap_list = weighted_objects.weighted_distance_snap_list
time_list = weighted_objects.time_list
# Separate list into multiple lists when breaks exist.
time_break_separated_list = [[]]
list_number = 0
for i in range(len(time_list) - 1):
if time_list[i + 1] - time_list[i] > 3000:
# Create new list.
list_number += 1
time_break_separated_list.append([])
time_break_separated_list[list_number].append(time_list[i])
# Coordinates to be later used in the canvas.
canvas_distance_snap_list = []
canvas_time_list = []
# Calculating coordinates.
for i in time_list:
canvas_time_list.append(350 * (i - time_list[0]) / (time_list[-1] - time_list[0]))
for i in distance_snap_list:
canvas_distance_snap_list.append(150 - i * 75)
# Creating the GUI.
root = Tkinter.Tk()
root.resizable(width=False, height=False)
root.geometry("400x500")
root.title("Weighted Objects")
# Stuff for the timer.
ms = time_list[0]
note_number = 0
# Function to be used to initialize the timer.
# Function to be used to run the timer.
# Function to be used to update the labels that need constant updates.
# Function to be used to draw the green line that indicates where the timer is at.
# Function used to kill the GUI.
# Function used to kill the program entirely.
Tkinter.Label(root, fg="black",
text="Old Amount of Objects: {}".format(len(distance_snap_list))).pack()
Tkinter.Label(root, fg="black",
text="New Calculated Weighted Objects: {:.2f}".format(sum(distance_snap_list))).pack()
Tkinter.Label(root, fg="black",
text="Raw Percentage Change: {:.2f}%".format(100 * sum(distance_snap_list)
/ len(distance_snap_list))).pack()
Tkinter.Label(root, fg="black",
text="Calculated Nerf/Buff: {:.2f}%".format(100 * final_nerf)).pack()
Tkinter.Label(root, fg="blue", text="Graph of Distance Snap/Cumulative Sum of Distance Snap against Time").pack()
difficulty_graph = Tkinter.Canvas(root, width=350, height=150)
difficulty_graph.pack()
Tkinter.Label(root, fg="black", text="Red/Blue: Distance Snap").pack()
Tkinter.Label(root, fg="black", text="Yellow: Cumulative Sum of Distance Snap").pack()
# Draw grid lines and fill background
difficulty_graph.create_rectangle(0, 0, 350, 150, fill="#dddddd")
difficulty_graph.create_line(0, 30, 350, 30, fill="#cccccc")
difficulty_graph.create_line(0, 60, 350, 60, fill="#cccccc")
difficulty_graph.create_line(0, 90, 350, 90, fill="#cccccc")
difficulty_graph.create_line(0, 120, 350, 120, fill="#cccccc")
difficulty_graph.create_line(70, 0, 70, 150, fill="#cccccc")
difficulty_graph.create_line(140, 0, 140, 150, fill="#cccccc")
difficulty_graph.create_line(210, 0, 210, 150, fill="#cccccc")
difficulty_graph.create_line(280, 0, 280, 150, fill="#cccccc")
# Draw blue line graph, distance snap.
for i in range(len(distance_snap_list) - 1):
# Don't continue the graph if there is a break.
if time_list[i + 1] - time_list[i] < 3000:
difficulty_graph.create_line(canvas_time_list[i], canvas_distance_snap_list[i],
canvas_time_list[i + 1], canvas_distance_snap_list[i + 1],
fill="#9999ff")
# Draw red line graph, the average thing (what do you call this?).
for n in range(len(time_break_separated_list)):
for x in range(len(time_break_separated_list[n]) - 20):
if n == 0:
i = x
else:
i = x + numpy.cumsum(map(len, time_break_separated_list))[n - 1]
# Don't continue graph if there's a break.
if time_list[i + 11] - time_list[i + 10] < 3000:
difficulty_graph.create_line(canvas_time_list[i + 10],
sum(canvas_distance_snap_list[i:i + 20]) / 20.0,
canvas_time_list[i + 11],
sum(canvas_distance_snap_list[i + 1:i + 21]) / 20.0,
fill="#990000")
# Draw yellow line graph, cumulative distance snap sum.
for i in range(len(distance_snap_list) - 1):
difficulty_graph.create_line(canvas_time_list[i],
150 - (149 * numpy.cumsum(distance_snap_list)[i] / sum(distance_snap_list)),
canvas_time_list[i + 1],
150 - (149 * numpy.cumsum(distance_snap_list)[i + 1] / sum(distance_snap_list)),
fill="#ffff00")
timer_line = difficulty_graph.create_line(0, 0, 0, 150, fill="#77ff77")
time_label = Tkinter.Label(root, fg="black")
time_label.pack()
distance_snap_label = Tkinter.Label(root, fg="black")
distance_snap_label.pack()
cumulative_label = Tkinter.Label(root, fg="black")
cumulative_label.pack()
progress_bar = ttk.Progressbar(root, orient="horizontal", length=200, mode="determinate")
progress_bar.pack()
progress_bar["maximum"] = 2
Tkinter.Button(root, fg="blue", text="Start Realtime!", command=first_load).pack()
Tkinter.Button(root, fg="red", text="Choose another map", command=stop).pack()
# If window is closed, stop the program.
root.protocol("WM_DELETE_WINDOW", kill)
root.mainloop()
| 39.201031 | 118 | 0.598028 |
05311a2863ffbf10e5b4872464958a44b018f474 | 2,929 | py | Python | src/benchmark/probe_training_wrapper.py | dumpmemory/PEARL_v1 | df46be5ed86ba7850486b578a8926aa151e7bf87 | [
"MIT"
] | 24 | 2021-06-10T04:09:00.000Z | 2021-11-02T11:23:35.000Z | src/benchmark/probe_training_wrapper.py | dumpmemory/PEARL_v1 | df46be5ed86ba7850486b578a8926aa151e7bf87 | [
"MIT"
] | 1 | 2021-06-08T15:27:38.000Z | 2021-06-08T15:41:05.000Z | src/benchmark/probe_training_wrapper.py | dumpmemory/PEARL_v1 | df46be5ed86ba7850486b578a8926aa151e7bf87 | [
"MIT"
] | 4 | 2021-06-10T02:28:12.000Z | 2021-08-24T13:00:14.000Z | from .probe import ProbeTrainer
# train using embeddings
# train using images
# main training method | 46.492063 | 148 | 0.635712 |
0531675b4efb814c0c0505cc13c93cd557315404 | 1,310 | py | Python | grr/server/grr_response_server/blob_stores/db_blob_store.py | oueldz4/grr | 8c60d9198cc0875a8ea80b90237eb0a8272082ff | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/blob_stores/db_blob_store.py | oueldz4/grr | 8c60d9198cc0875a8ea80b90237eb0a8272082ff | [
"Apache-2.0"
] | null | null | null | grr/server/grr_response_server/blob_stores/db_blob_store.py | oueldz4/grr | 8c60d9198cc0875a8ea80b90237eb0a8272082ff | [
"Apache-2.0"
] | 1 | 2020-07-09T01:08:48.000Z | 2020-07-09T01:08:48.000Z | #!/usr/bin/env python
"""REL_DB blobstore implementation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from grr_response_server import blob_store
from grr_response_server import data_store
| 31.190476 | 79 | 0.79084 |
053268be449fba403f273951c902bae23a8253b1 | 333 | py | Python | tests/ut_repytests_loose-testnetmessportreuse.py | SeattleTestbed/repy_v1 | f40a02e2e398b1ec67fede84b41a264ae7356d2c | [
"MIT"
] | 1 | 2021-08-18T05:58:17.000Z | 2021-08-18T05:58:17.000Z | tests/ut_repytests_loose-testnetmessportreuse.py | SeattleTestbed/repy_v1 | f40a02e2e398b1ec67fede84b41a264ae7356d2c | [
"MIT"
] | 3 | 2015-11-17T21:01:03.000Z | 2016-07-14T09:08:04.000Z | tests/ut_repytests_loose-testnetmessportreuse.py | SeattleTestbed/repy_v1 | f40a02e2e398b1ec67fede84b41a264ae7356d2c | [
"MIT"
] | 5 | 2015-07-02T13:29:23.000Z | 2021-09-25T07:48:30.000Z | #pragma out
#pragma repy restrictions.loose
if callfunc == 'initialize':
ip = getmyip()
noopch = recvmess(ip,<messport>,noop)
recvmess(ip,<messport1>,foo)
sleep(.1)
sendmess(ip,<messport1>,'hi',ip,<messport>)
stopcomm(noopch)
| 18.5 | 45 | 0.678679 |
05343aca0c5c82c59e3358b3b9d65dce1ef6b0de | 806 | py | Python | pyzfscmds/check.py | johnramsden/pyzfscmds | b5d430ffd0454bc6b09e256aeea67164714d9809 | [
"BSD-3-Clause"
] | 9 | 2018-07-08T20:01:33.000Z | 2022-03-29T11:31:51.000Z | pyzfscmds/check.py | johnramsden/pyzfscmds | b5d430ffd0454bc6b09e256aeea67164714d9809 | [
"BSD-3-Clause"
] | 1 | 2019-07-10T12:16:53.000Z | 2019-07-10T12:16:53.000Z | pyzfscmds/check.py | johnramsden/pyzfscmds | b5d430ffd0454bc6b09e256aeea67164714d9809 | [
"BSD-3-Clause"
] | 5 | 2018-06-04T02:33:43.000Z | 2020-05-25T22:48:58.000Z | """
Startup checks
"""
import subprocess
import pyzfscmds.system.agnostic as zfssys
def is_root_on_zfs():
"""Check if running root on ZFS"""
system = zfssys.check_valid_system()
if system is None:
raise RuntimeError(f"System is not yet supported by pyzfscmds\n")
root_dataset = None
if zfssys.zfs_module_loaded() and zpool_exists():
root_dataset = zfssys.mountpoint_dataset("/")
if root_dataset is None:
raise RuntimeError("System is not booting off a ZFS root dataset\n")
return True
| 23.705882 | 77 | 0.66005 |
0536d3d2cb26fae2a4bb43f1a3c0258c006ca24c | 2,015 | py | Python | dist.py | dladustn95/Dialogue_generator | 004fa49e3140e6c7ceb14448604c8aa45966f70d | [
"MIT"
] | 4 | 2020-09-03T03:39:53.000Z | 2021-08-25T03:53:41.000Z | dist.py | dladustn95/Dialogue_generator | 004fa49e3140e6c7ceb14448604c8aa45966f70d | [
"MIT"
] | null | null | null | dist.py | dladustn95/Dialogue_generator | 004fa49e3140e6c7ceb14448604c8aa45966f70d | [
"MIT"
] | 1 | 2020-09-04T07:04:50.000Z | 2020-09-04T07:04:50.000Z | import sys
sp="#####"
distinct_1(sys.argv[1])
distinct_2(sys.argv[1])
distinct_3(sys.argv[1]) | 34.152542 | 79 | 0.655583 |
0537e1ab85799850e99a5e3c6bb0f22f481e1ab8 | 5,036 | py | Python | Scripts/plot_PolarVortexStrength_PDFs.py | zmlabe/StratoVari | c5549f54482a2b05e89bded3e3b0b3c9faa686f3 | [
"MIT"
] | 4 | 2019-11-23T19:44:21.000Z | 2020-02-20T16:54:45.000Z | Scripts/plot_PolarVortexStrength_PDFs.py | zmlabe/StratoVari | c5549f54482a2b05e89bded3e3b0b3c9faa686f3 | [
"MIT"
] | null | null | null | Scripts/plot_PolarVortexStrength_PDFs.py | zmlabe/StratoVari | c5549f54482a2b05e89bded3e3b0b3c9faa686f3 | [
"MIT"
] | 2 | 2019-06-21T19:27:55.000Z | 2021-02-12T19:13:22.000Z | """
Calculate PDFs for polar vortex response
Notes
-----
Author : Zachary Labe
Date : 25 June 2019
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import datetime
import read_MonthlyData as MO
import calc_Utilities as UT
import cmocean
import scipy.stats as sts
### Define directories
directorydata = '/seley/zlabe/simu/'
directoryfigure = '/home/zlabe/Desktop/STRATOVARI/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting PDF Polar Vortex Subsamples- %s----' % titletime)
### Alott time series (300 ensemble members)
year1 = 1701
year2 = 2000
years = np.arange(year1,year2+1,1)
###############################################################################
###############################################################################
###############################################################################
### Call arguments
varnames = ['U10']
period = 'JFM' # Enter temporal period (DJF,JFM,JFMA,ND)
simuh = 'Past' # Enter simulation time (Current,Past)
letters = [r'Mean',r'A',r'B',r'C']
###############################################################################
if simuh == 'Current':
simuq = 'Cu'
elif simuh == 'Past':
simuq = 'Pi'
else:
print(ValueError('Wrong simulation selected!'))
###############################################################################
###############################################################################
###############################################################################
### Call function for 4d variable data
lat,lon,lev,varfuture = MO.readExperiAll(varnames[0],'Future','surface')
lat,lon,lev,varpast = MO.readExperiAll(varnames[0],simuh,'surface')
### Create 2d array of latitude and longitude
lon2,lat2 = np.meshgrid(lon,lat)
### List of experiments
runs = [varfuture,varpast]
### Separate per monthly periods
if period == 'DJF':
varmo = np.empty((len(runs),varpast.shape[0]-1,varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = UT.calcDecJanFeb(runs[i],runs[i],lat,
lon,'surface',17)
elif period == 'JFM':
varmo = np.empty((len(runs),varpast.shape[0],varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = np.nanmean(runs[i][:,:3,:,:],axis=1)
elif period == 'JFMA':
varmo = np.empty((len(runs),varpast.shape[0],varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = np.nanmean(runs[i][:,:4,:,:],axis=1)
elif period == 'ND':
varmo = np.empty((len(runs),varpast.shape[0],varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = np.nanmean(runs[i][:,-2:,:,:],axis=1)
else:
ValueError('Wrong period selected! (DJF,JFM,JFMA,ND)')
### Remove missing data
varmo[np.where(varmo < -1e10)] = np.nan
###############################################################################
###############################################################################
###############################################################################
### Slice data for 60N
latq = np.where((lat >= 59.5) & (lat <= 60.5))[0]
latu = lat[latq].squeeze()
varmou = varmo[:,:,latq,:].squeeze()
### Calculate zonal mean
varmoz = np.nanmean(varmou[:,:,:],axis=2)
### Calculate anomalies
anom = varmoz[0,:] - varmoz[1,:]
### Remove nans
mask = ~np.isnan(anom)
anom = anom[mask]
### Fit a distribution
num_bins = np.arange(-50,50,1)
mA,sA = sts.norm.fit(anom[:100])
mB,sB = sts.norm.fit(anom[100:200])
mC,sC = sts.norm.fit(anom[200:])
mm,sm = sts.norm.fit(anom[:])
A = sts.norm.pdf(num_bins,mA,sA)
B = sts.norm.pdf(num_bins,mB,sB)
C = sts.norm.pdf(num_bins,mC,sC)
meann = sts.norm.pdf(num_bins,mm,sm)
plt.figure()
plt.plot(num_bins,A,color='darkblue',linewidth=2.0,label=r'A')
plt.plot(num_bins,B,color='darkgreen',linewidth=2.0,label=r'B')
plt.plot(num_bins,C,color='darkorange',linewidth=2.0,label=r'C')
plt.plot(num_bins,meann,color='k',linewidth=2.0,label=r'Mean',
linestyle='--',dashes=(1,0.3))
l = plt.legend(shadow=False,fontsize=7,loc='upper left',
fancybox=True,frameon=False,ncol=1,bbox_to_anchor=(0.72,1),
labelspacing=0.2,columnspacing=1,handletextpad=0.4)
for text in l.get_texts():
text.set_color('k')
### Statistical tests on distribution
tA,pA = sts.ks_2samp(A,meann)
tB,pB = sts.ks_2samp(B,meann)
tC,pC = sts.ks_2samp(C,meann)
print('\n\nP-value between A and mean --> %s!' % np.round(pA,4))
print('P-value between B and mean --> %s!' % np.round(pB,4))
print('P-value between C and mean --> %s!' % np.round(pC,4))
plt.savefig(directoryfigure + 'PDFs_PolarVortex_%s_%s.png' % \
(period,simuh),dpi=300)
| 34.027027 | 79 | 0.53475 |
053869e3d79166cc0d895c117eef19a63bd977af | 906 | py | Python | test/test_airtunnel/operators/test_sql_helpers.py | joerg-schneider/airflow-bootstrap | bbed0a2d5addd0dd6221b75c06982f47e0d837d4 | [
"MIT"
] | 23 | 2019-09-30T15:22:58.000Z | 2021-04-09T10:53:23.000Z | test/test_airtunnel/operators/test_sql_helpers.py | joerg-schneider/airflow-bootstrap | bbed0a2d5addd0dd6221b75c06982f47e0d837d4 | [
"MIT"
] | 1 | 2019-11-24T18:37:56.000Z | 2019-11-24T18:37:56.000Z | test/test_airtunnel/operators/test_sql_helpers.py | joerg-schneider/airflow-bootstrap | bbed0a2d5addd0dd6221b75c06982f47e0d837d4 | [
"MIT"
] | 4 | 2020-01-14T03:31:34.000Z | 2021-05-07T21:34:22.000Z | import pytest
from airtunnel.operators.sql import sql_helpers
TEST_SCRIPT = "ddl/test_schema/test_table.sql"
| 26.647059 | 74 | 0.728477 |
053914ae8ca6bed144522d26cba1f2a52c6014f5 | 2,582 | py | Python | EE475/Ch6P13.py | PhoeniXuzoo/NU-Projects | a217ad46e6876ceffb3dec1d6e52f775674b2e8b | [
"MIT"
] | null | null | null | EE475/Ch6P13.py | PhoeniXuzoo/NU-Projects | a217ad46e6876ceffb3dec1d6e52f775674b2e8b | [
"MIT"
] | null | null | null | EE475/Ch6P13.py | PhoeniXuzoo/NU-Projects | a217ad46e6876ceffb3dec1d6e52f775674b2e8b | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
## softmax: 0.1 600
## perceptron: 0.05 550
if __name__ == "__main__":
csvname = 'breast_cancer_data.csv'
x, y = readData(csvname)
w = np.ones([x.shape[0] + 1, 1])
x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)
xSoftList = [0]
ySoftList = [softmaxCostFunc(x, y, w)]
for i in range(600):
w = gradientDescentOneStepForSoftmax(x, y, w)
xSoftList.append(i+1)
ySoftList.append(softmaxCostFunc(x, y, w))
yPredic = np.transpose(np.dot(np.transpose(x), w))
wrong = 0
for i in range(np.size(yPredic)):
if ((yPredic[0][i] > 0) != (y[0][i] > 0)):
wrong += 1
print("Softmax Wrong Prediction: ", wrong)
w = np.ones([x.shape[0], 1])
xPerceptronList = [0]
yPerceptronList = [perceptronCostFunc(x, y, w)]
for i in range(550):
w = gradientDescentOneStepForPerceptron(x, y, w)
xPerceptronList.append(i+1)
yPerceptronList.append(perceptronCostFunc(x, y, w))
plt.plot(xSoftList, ySoftList, label="Softmax Cost Function",color="#F08080")
plt.plot(xPerceptronList, yPerceptronList, label="Perceptro Cost Function")
plt.legend(loc="upper right")
plt.show()
plt.close()
yPredic = np.transpose(np.dot(np.transpose(x), w))
wrong = 0
for i in range(np.size(yPredic)):
if ((yPredic[0][i] > 0) != (y[0][i] > 0)):
wrong += 1
print("Perceptron Wrong Prediction: ", wrong)
| 29.011236 | 82 | 0.573199 |
05399638e32621d9f8eab1ecc185a769af934b80 | 416 | py | Python | square.py | Formalhalt/Phyton-Calculators | 25f686e45a8333e9a141568c8f695350bde36bc6 | [
"CC0-1.0"
] | null | null | null | square.py | Formalhalt/Phyton-Calculators | 25f686e45a8333e9a141568c8f695350bde36bc6 | [
"CC0-1.0"
] | null | null | null | square.py | Formalhalt/Phyton-Calculators | 25f686e45a8333e9a141568c8f695350bde36bc6 | [
"CC0-1.0"
] | null | null | null |
height = float(input("Enter height of the square: "))
width = float(input("Enter width of the Square: "))
perimeter = (2 * height) + (2 * width)
area = height * height
print("The perimeter of the square is", perimeter)
print("The area of the square is", area)
close = input("Press X to exit")
# The above line of code keeps the program open for the user to see the outcome of the problem.
| 23.111111 | 96 | 0.663462 |
053b161da791d51b0f7c77d904ccb2a6a0472da3 | 6,492 | py | Python | dls7-1[cnn].py | cutz-j/computer_vision | 23408231221bb16539ea1964f000bdbb7f9e7e20 | [
"MIT"
] | null | null | null | dls7-1[cnn].py | cutz-j/computer_vision | 23408231221bb16539ea1964f000bdbb7f9e7e20 | [
"MIT"
] | null | null | null | dls7-1[cnn].py | cutz-j/computer_vision | 23408231221bb16539ea1964f000bdbb7f9e7e20 | [
"MIT"
] | null | null | null | import numpy as np
from common.util import im2col
from collections import OrderedDict
from common.layers import *
from common.gradient import numerical_gradient
## 4 ##
x = np.random.rand(10, 1, 28, 28) # (m, c, h, w)
print(x.shape)
x[0].shape # data 1
x[0, 0] # data channel
x1 = np.random.rand(1, 3, 7, 7)
col1 = im2col(x1, 5, 5, stride=1, pad=0)
print(col1.shape)
x2 = np.random.rand(10, 3, 7 ,7)
col2 = im2col(x2, 5, 5)
print(col2.shape)
def predict(self, x):
for layer in self.layers.values():
x = layer.forward(x)
return x
def loss(self, x, t):
""" .
Parameters
----------
x :
t :
"""
y = self.predict(x)
return self.last_layer.forward(y, t)
def accuracy(self, x, t, batch_size=100):
if t.ndim != 1 : t = np.argmax(t, axis=1)
acc = 0.0
for i in range(int(x.shape[0] / batch_size)):
tx = x[i*batch_size:(i+1)*batch_size]
tt = t[i*batch_size:(i+1)*batch_size]
y = self.predict(tx)
y = np.argmax(y, axis=1)
acc += np.sum(y == tt)
return acc / x.shape[0]
def numerical_gradient(self, x, t):
""" .
Parameters
----------
x :
t :
Returns
-------
(dictionary)
grads['W1']grads['W2']...
grads['b1']grads['b2']...
"""
loss_w = lambda w: self.loss(x, t)
grads = {}
for idx in (1, 2, 3):
grads['W' + str(idx)] = numerical_gradient(loss_w, self.params['W' + str(idx)])
grads['b' + str(idx)] = numerical_gradient(loss_w, self.params['b' + str(idx)])
return grads
def gradient(self, x, t):
""" ().
Parameters
----------
x :
t :
Returns
-------
(dictionary)
grads['W1']grads['W2']...
grads['b1']grads['b2']...
"""
# forward
self.loss(x, t)
# backward
dout = 1
dout = self.last_layer.backward(dout)
layers = list(self.layers.values())
layers.reverse()
for layer in layers:
dout = layer.backward(dout)
#
grads = {}
grads['W1'], grads['b1'] = self.layers['Conv1'].dW, self.layers['Conv1'].db
grads['W2'], grads['b2'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
grads['W3'], grads['b3'] = self.layers['Affine2'].dW, self.layers['Affine2'].db
return grads
| 31.211538 | 95 | 0.518792 |
053b82076a707b4cf23d2d9a676fce87856d471c | 17,136 | py | Python | tests/test_crawler.py | jacopoabbate/datavault-api-python-client | 70c3113b56db77de3835b4210dd7bffb22b34c9f | [
"MIT"
] | null | null | null | tests/test_crawler.py | jacopoabbate/datavault-api-python-client | 70c3113b56db77de3835b4210dd7bffb22b34c9f | [
"MIT"
] | null | null | null | tests/test_crawler.py | jacopoabbate/datavault-api-python-client | 70c3113b56db77de3835b4210dd7bffb22b34c9f | [
"MIT"
] | null | null | null | import datetime
import pytest
import requests
from datavault_api_client import crawler
from datavault_api_client.data_structures import DiscoveredFileInfo
| 36.69379 | 100 | 0.623424 |
053c288fde8eaacd236f5d1f96f0de4ba7806a4f | 2,976 | py | Python | prototype/couch/couch_concurrent.py | ooici/pyon | 122c629290d27f32f2f41dafd5c12469295e8acf | [
"BSD-2-Clause"
] | 2 | 2015-06-09T16:07:09.000Z | 2015-07-28T10:06:31.000Z | prototype/couch/couch_concurrent.py | ooici/pyon | 122c629290d27f32f2f41dafd5c12469295e8acf | [
"BSD-2-Clause"
] | 3 | 2020-07-22T15:14:55.000Z | 2021-12-13T19:35:06.000Z | prototype/couch/couch_concurrent.py | ooici/pyon | 122c629290d27f32f2f41dafd5c12469295e8acf | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
__author__ = 'Michael Meisinger'
import gevent
import random
from pyon.datastore.datastore import DatastoreManager
from pyon.public import IonObject
import ooi.timer
"""
from prototype.couch.couch_concurrent import runcc
runcc(dict(num_obj=100000, num_read=2000, num_thread=3))
"""
| 30.680412 | 109 | 0.652218 |
053c773f4e711f1e0bdb6a424915109fb1e18820 | 2,226 | py | Python | serve.py | racterub/itac-flask | 5b30e51c9a625483eaf312fb64472622b60b00eb | [
"MIT"
] | null | null | null | serve.py | racterub/itac-flask | 5b30e51c9a625483eaf312fb64472622b60b00eb | [
"MIT"
] | null | null | null | serve.py | racterub/itac-flask | 5b30e51c9a625483eaf312fb64472622b60b00eb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2020-04-06 23:45:54
# @Author : Racter Liu (racterub) (racterub@gmail.com)
# @Link : https://racterub.me
# @License : MIT
from flask import Flask, render_template, request, url_for, redirect, session, send_from_directory, send_file, make_response
app = Flask(__name__)
app.secret_key = "test"
DEBUG = True
PORT = 8989
# in-url param
# http get param
# implement login
# session
# serve static file
# make_response
# Jinja
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=DEBUG, port=PORT)
| 24.195652 | 124 | 0.612309 |
053ff9d6d078fd3f1bdb31f203cc1982c89849c7 | 4,067 | py | Python | api/list_bp.py | kziovas/practise-todolist | 53e6b789b46e1104a076835ac606544ba1ef7b25 | [
"MIT"
] | 2 | 2021-07-27T08:38:35.000Z | 2021-08-03T10:00:58.000Z | api/list_bp.py | kziovas/practise-sanic-rest-api-to-do-list | 53e6b789b46e1104a076835ac606544ba1ef7b25 | [
"MIT"
] | null | null | null | api/list_bp.py | kziovas/practise-sanic-rest-api-to-do-list | 53e6b789b46e1104a076835ac606544ba1ef7b25 | [
"MIT"
] | null | null | null | from sanic.response import json
from sanic import Blueprint
from service import ListService
from injector import inject, singleton
from logging import Logger
from sanic import Sanic
| 37.311927 | 157 | 0.715761 |
0541032df78b9eac36f755de81be4a580d936532 | 5,223 | py | Python | src/AoC_2015/d24_sleigh_balance_subset_sum/sleigh_balance.py | derailed-dash/Advent-of-Code | 12378baf33ef4a59958e84eb60e795b6530c22ba | [
"MIT"
] | 9 | 2021-12-31T20:13:03.000Z | 2022-03-05T07:05:06.000Z | src/AoC_2015/d24_sleigh_balance_subset_sum/sleigh_balance.py | derailed-dash/Advent-of-Code | 12378baf33ef4a59958e84eb60e795b6530c22ba | [
"MIT"
] | 1 | 2022-01-25T08:35:04.000Z | 2022-01-29T00:07:00.000Z | src/AoC_2015/d24_sleigh_balance_subset_sum/sleigh_balance.py | derailed-dash/Advent-of-Code | 12378baf33ef4a59958e84eb60e795b6530c22ba | [
"MIT"
] | null | null | null | """
Author: Darren
Date: 02/05/2021
Solving https://adventofcode.com/2015/day/24
We require three bags of equal weight.
Bag 1 in the passenger compartment, needs to have fewest packages.
Bags 2 and 3 to either side.
Solution:
Use subset sum function to work out which combinations of packages adds up to
total weight / number of bags (compartments).
The faster subsum is about 3x quicker than the version that uses itertools.combinations.
Once we have all combinations for the first bag, sort by the number of packages,
since we want the first bag to have fewest possible packages.
We don't care about what's in bags 2, 3...
I.e. because we know we will have valid combinations of packages that will add up to the same weight
"""
from __future__ import absolute_import
import logging
import os
import time
from math import prod
from itertools import combinations
# pylint: disable=logging-fstring-interpolation
SCRIPT_DIR = os.path.dirname(__file__)
INPUT_FILE = "input/input.txt"
SAMPLE_INPUT_FILE = "input/sample_input.txt"
def faster_subset_sum(items: list, target: int, partial=[], results=[]) -> list:
"""
Determine all combinations of list items that add up to the target
Args:
numbers (list): A list of values
target (int): The total that the values need to add up to
partial (list, optional): Used by the function. Defaults to [].
results (list, optional): Used by the function. Defaults to [].
Returns:
list: The list of valid combinations
"""
total = sum(partial)
# check if the partial sum is equals to target, and if so
# add the current terms to the results list
if total == target:
results.append(partial)
# if the partial sum equals or exceed the target, no point in recursing through remaining terms.
if total >= target:
return []
for i, item in enumerate(items):
remaining_numbers = items[i + 1:]
faster_subset_sum(remaining_numbers, target, partial + [item], results)
return results
if __name__ == "__main__":
t1 = time.perf_counter()
main()
t2 = time.perf_counter()
print(f"Execution time: {t2 - t1:0.4f} seconds")
| 34.589404 | 103 | 0.681409 |
0541425822ca873cc1104abcaddefbf0b86d3c05 | 8,946 | py | Python | artap/algorithm_bayesopt.py | tamasorosz/artap | e8df160bfc9c378c3fc96b0b86e92d75d89cf26b | [
"MIT"
] | 5 | 2021-06-13T17:04:37.000Z | 2022-03-04T17:16:06.000Z | artap/algorithm_bayesopt.py | tamasorosz/artap | e8df160bfc9c378c3fc96b0b86e92d75d89cf26b | [
"MIT"
] | null | null | null | artap/algorithm_bayesopt.py | tamasorosz/artap | e8df160bfc9c378c3fc96b0b86e92d75d89cf26b | [
"MIT"
] | 8 | 2021-03-11T18:23:47.000Z | 2022-02-22T11:13:23.000Z | from .problem import Problem
from .algorithm import Algorithm
from .config import artap_root
import time
import numpy as np
import os
import sys
sys.path.append(artap_root + os.sep + "lib" + os.sep)
import bayesopt
from multiprocessing import Process, Pipe, Queue, Manager
# from multiprocessing.managers import BaseManager
_l_type = ['L_FIXED', 'L_EMPIRICAL', 'L_DISCRETE', 'L_MCMC', 'L_ERROR']
_sc_type = ['SC_MTL', 'SC_ML', 'SC_MAP', 'SC_LOOCV', 'SC_ERROR']
_surr_name = ["sGaussianProcess", "sGaussianProcessML", "sGaussianProcessNormal", "sStudentTProcessJef", "sStudentTProcessNIG"]
# Python module to get run BayesOpt library in a OO pattern.
# The objective module should inherit this one and override evaluateSample.
## Main function. Starts the optimization process.
def optimize(self):
min_val, x_out, error = bayesopt.optimize(self.evaluateSample, self.n_dim,
self.lb, self.ub,
self.params)
return min_val, x_out, error
class BayesOpt(Algorithm):
""" BayesOpt algorithms """
class BayesOptClassSerial(BayesOptContinuous):
class BayesOptSerial(BayesOpt):
""" BayesOpt algorithms """
class BayesOptClassParallel(Process, BayesOptContinuous):
class BayesOptParallel(BayesOpt):
""" BayesOpt algorithms """ | 31.611307 | 127 | 0.59099 |
0543197cdee0aacdb12b0d10810f263f61b2c8d7 | 538 | py | Python | Sorting/ShortBubbleSort.py | sonivaibhv/Algo | ea53d61a17687ef08bb2a7dbfd9331fd10f49ea8 | [
"MIT"
] | 1 | 2017-05-06T13:05:35.000Z | 2017-05-06T13:05:35.000Z | Sorting/ShortBubbleSort.py | CuriousLearner/Algorithms | e44a04b3a0797da36a9de18c116a48241ce59d9d | [
"MIT"
] | null | null | null | Sorting/ShortBubbleSort.py | CuriousLearner/Algorithms | e44a04b3a0797da36a9de18c116a48241ce59d9d | [
"MIT"
] | null | null | null | def Short_Bubble_Sort(alist):
'''
Sorting alist using Short Bubble Sort
'''
passnum = len(alist) - 1
exchangesDone = True
while passnum > 0 and exchangesDone:
exchangesDone = False
for i in range(passnum):
if alist[i] > alist[i+1]:
exchangesDone = True
alist[i], alist[i+1] = alist[i+1], alist[i]
passnum = passnum - 1
return alist
main()
| 25.619048 | 59 | 0.565056 |
0543991c023c828b9777016230758b911a5898f1 | 5,997 | py | Python | src/arclink/libs/python/seiscomp/db/generic/genwrap.py | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | src/arclink/libs/python/seiscomp/db/generic/genwrap.py | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | src/arclink/libs/python/seiscomp/db/generic/genwrap.py | yannikbehr/seiscomp3 | ebb44c77092555eef7786493d00ac4efc679055f | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1 | 2021-09-15T08:13:27.000Z | 2021-09-15T08:13:27.000Z | # This file was created by a source code generator:
# genxml2wrap.py
# Do not modify. Change the definition and
# run the generator again!
#
# (c) 2010 Mathias Hoffmann, GFZ Potsdam
#
#
import datetime
#
#
# QualityControl::QCLog
# QualityControl::WaveformQuality
# QualityControl::Outage
# Inventory::StationReference
# Inventory::StationGroup
# Inventory::AuxSource
# Inventory::AuxDevice
# Inventory::SensorCalibration
# Inventory::Sensor
# Inventory::ResponsePAZ
# Inventory::ResponsePolynomial
# Inventory::DataloggerCalibration
# Inventory::Decimation
# Inventory::Datalogger
# Inventory::ResponseFIR
# Inventory::AuxStream
# Inventory::Stream
# Inventory::SensorLocation
# Inventory::Station
# Inventory::Network
# Routing::RouteArclink
# Routing::RouteSeedlink
# Routing::Route
# Routing::Access
| 18.395706 | 73 | 0.691346 |
05461bddcd3bd1546efdbcd5e16d6aa27b51efe8 | 7,754 | py | Python | mailchimp_marketing_asyncio/models/problem_detail_document.py | john-parton/mailchimp-asyncio | 3865ca0867bec8f537dc1e3256aa3a160c00f8a2 | [
"Apache-2.0"
] | null | null | null | mailchimp_marketing_asyncio/models/problem_detail_document.py | john-parton/mailchimp-asyncio | 3865ca0867bec8f537dc1e3256aa3a160c00f8a2 | [
"Apache-2.0"
] | null | null | null | mailchimp_marketing_asyncio/models/problem_detail_document.py | john-parton/mailchimp-asyncio | 3865ca0867bec8f537dc1e3256aa3a160c00f8a2 | [
"Apache-2.0"
] | 1 | 2022-03-09T14:52:22.000Z | 2022-03-09T14:52:22.000Z | # coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProblemDetailDocument):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 32.995745 | 181 | 0.607557 |
054f8d75e59fcecffb5d9b04254a41c1dbff6742 | 1,118 | py | Python | loadingstatus.py | NesbiDevelopment/PythonHelper | 6dc7545cd0ebd5bd3daf32860a7dc45d1caf50e3 | [
"MIT"
] | null | null | null | loadingstatus.py | NesbiDevelopment/PythonHelper | 6dc7545cd0ebd5bd3daf32860a7dc45d1caf50e3 | [
"MIT"
] | null | null | null | loadingstatus.py | NesbiDevelopment/PythonHelper | 6dc7545cd0ebd5bd3daf32860a7dc45d1caf50e3 | [
"MIT"
] | null | null | null | import time
import sys
| 31.055556 | 75 | 0.584079 |
0552a237d536bb49e4a74fe8039eabfd37370524 | 1,596 | py | Python | main.py | WillyHHsu/rest | 1adba475579cb2c0f9b8690b7f822c02b483146a | [
"MIT"
] | null | null | null | main.py | WillyHHsu/rest | 1adba475579cb2c0f9b8690b7f822c02b483146a | [
"MIT"
] | null | null | null | main.py | WillyHHsu/rest | 1adba475579cb2c0f9b8690b7f822c02b483146a | [
"MIT"
] | null | null | null | import os
from fastapi import FastAPI
from fastapi_sqlalchemy import DBSessionMiddleware
from fastapi_sqlalchemy import db
from dotenv import load_dotenv
from sqlalchemy import schema
from db import models as db_model
from schemas import models as schema
load_dotenv()
POSTGRES_USER = os.getenv('POSTGRES_USER')
POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD')
POSTGRES_DB = os.getenv('POSTGRES_DB')
POSTGRES_URL = os.getenv('POSTGRES_URL')
POSTGRES_PORT = os.getenv('POSTGRES_PORT', 5432)
app = FastAPI(
title="API REST",
description="Uma API REST by WillyHHsu",
)
app.add_middleware(
DBSessionMiddleware,
db_url=f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_URL}:{POSTGRES_PORT}/{POSTGRES_DB}"
)
| 29.555556 | 107 | 0.759398 |
05546175c9355e358802def95353b9059d638d79 | 866 | py | Python | src/compas_blender/utilities/data.py | KEERTHANAUDAY/compas | 4d1101cf302f95a4472a01a1265cc64eaec6aa4a | [
"MIT"
] | null | null | null | src/compas_blender/utilities/data.py | KEERTHANAUDAY/compas | 4d1101cf302f95a4472a01a1265cc64eaec6aa4a | [
"MIT"
] | null | null | null | src/compas_blender/utilities/data.py | KEERTHANAUDAY/compas | 4d1101cf302f95a4472a01a1265cc64eaec6aa4a | [
"MIT"
] | null | null | null | import bpy
__all__ = [
"delete_all_data",
]
def delete_all_data():
"""Delete all collections, mesh and curve objects, meshes, curves, materials."""
for collection in bpy.data.collections:
bpy.data.collections.remove(collection)
for obj in bpy.data.objects:
if obj.type == 'MESH':
bpy.data.objects.remove(obj)
elif obj.type == 'CURVE':
bpy.data.objects.remove(obj)
for mesh in bpy.data.meshes:
bpy.data.meshes.remove(mesh)
for curve in bpy.data.curves:
bpy.data.curves.remove(curve)
for material in bpy.data.materials:
bpy.data.materials.remove(material)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| 27.0625 | 84 | 0.51963 |
055668b6a61ba32a80522c93f3aa4dbcf035bb7b | 2,335 | py | Python | teams_to_tsv.py | FSU-ACM-OSSG/Contest-Server | f9aabd9742a6aa78cbefc685fd2760a1f83d7721 | [
"MIT"
] | 8 | 2019-01-13T21:57:53.000Z | 2021-11-29T12:32:48.000Z | teams_to_tsv.py | FSU-ACM-OSSG/Contest-Server | f9aabd9742a6aa78cbefc685fd2760a1f83d7721 | [
"MIT"
] | 73 | 2018-02-13T00:58:39.000Z | 2022-02-10T11:59:53.000Z | teams_to_tsv.py | FSU-ACM-OSSG/Contest-Server | f9aabd9742a6aa78cbefc685fd2760a1f83d7721 | [
"MIT"
] | 4 | 2018-02-08T18:56:54.000Z | 2019-02-13T19:01:53.000Z | ##############
# team_to_tsv script
# Creates two tsv files for importing into domjudge
# Team info gets stored inside teams.tsv in the following format
# <team_id(int)> <external_id> <category_id> <team_name>
# Account info gets stored inside acccounts.tsv in the following format
# team <team-name> <user-name> <password> <teamid>
#
# Import teams.tsv first, then accounts.tsv
#
# NOTE 1 : Domjudge doesn't insert teams with ID < 1
from app.models.Team import *
with open("teams.tsv", "w+") as teams_tsv, \
open("accounts.tsv", "w+") as accounts_tsv:
# Headers requiered by domjudge
teams_tsv.write("teams\t1\n")
accounts_tsv.write("accounts\t1\n")
walkin_counter = 1
for team in Team.objects.all():
# Only make 100 walk-in accounts
if walkin_counter > 101:
break;
# Accounts that are not in use are assigned to walk-ins
if team.team_name is None:
team.team_name = "".join(("Walk-in-", str(walkin_counter)))
walkin_counter += 1
# Empty team names are assign a dummy value
if team.team_name.isspace():
team.team_name = "UnnamedTeam"
# Avoiding team number 0, refer to NOTE 1 in the header
if team.teamID == "acm-0":
continue
teams_tsv.write(u"\t".join(
[team.teamID.strip("acm-"), # To only get ID number
team.teamID, # Set to external ID for exporting
"2", # Category ID of Participants Category - See footnote
team.team_name.strip('\t'), # So tabs in team_name don't interfere
'\n']))
accounts_tsv.write(u"\t".join(
["team",
team.team_name.strip('\t'), # So tabs in team_name don't interfere
'{0}-{1}'.format('team', team.teamID.split('-')[1].zfill(3)),
team.domPass,
# team.teamID.strip("acm-"), # To only get ID number
'\n']))
#
# FOOTNOTE: Team Category
#
# This value determines the team_category. Domjudge's defaults are:
# 1 -> System
# 2 -> Self-Registered
# 3 -> Jury
#
# Since System and Jury are meant for admin, we assign teams to being
# "self-registered" because you can't self-register for our contests
# anyway, and this is easier than making you create a new category first.
#
| 36.484375 | 77 | 0.614561 |
05573cc8d3a341c5de3d72784bf092562a5a2e63 | 1,848 | py | Python | mishris/utils/util.py | virazura/mishris | 60762364347bfa50ffc9948e9d227c569fe68da5 | [
"MIT"
] | null | null | null | mishris/utils/util.py | virazura/mishris | 60762364347bfa50ffc9948e9d227c569fe68da5 | [
"MIT"
] | null | null | null | mishris/utils/util.py | virazura/mishris | 60762364347bfa50ffc9948e9d227c569fe68da5 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import frappe
import filetype
"""
NEED IMPORT LOCAL LANG FROM FRAPPE
"""
| 34.222222 | 165 | 0.67803 |
055a29385d9e76d3a424d3a90ed95bbdc4015019 | 4,906 | py | Python | cleverapi/clever_api.py | oncecreated/cleverapi | 39b41860604a909d3e5262c1c795c0741570a653 | [
"MIT"
] | 13 | 2018-06-30T14:16:42.000Z | 2020-03-04T20:23:47.000Z | cleverapi/clever_api.py | oncecreated/cleverapi | 39b41860604a909d3e5262c1c795c0741570a653 | [
"MIT"
] | 11 | 2018-09-09T09:54:27.000Z | 2019-04-15T13:40:19.000Z | cleverapi/clever_api.py | oncecreated/cleverapi | 39b41860604a909d3e5262c1c795c0741570a653 | [
"MIT"
] | 14 | 2018-07-24T17:38:56.000Z | 2020-03-04T20:24:12.000Z | import hashlib
import json
import uuid
import requests
import aiohttp
from .exceptions import ApiResponseError
from .action import Action
| 28.858824 | 91 | 0.584183 |
055ac96948dda92e22c15b66cc5f914681a2cae3 | 5,350 | py | Python | blagging/views.py | androiddrew/blag-fork | 249144c9a017581a6c5e387f5d86f33421d82ae3 | [
"MIT"
] | null | null | null | blagging/views.py | androiddrew/blag-fork | 249144c9a017581a6c5e387f5d86f33421d82ae3 | [
"MIT"
] | 7 | 2017-01-03T15:34:30.000Z | 2017-07-13T15:27:08.000Z | blagging/views.py | androiddrew/blag-fork | 249144c9a017581a6c5e387f5d86f33421d82ae3 | [
"MIT"
] | null | null | null | from datetime import datetime as dt
from flask import render_template, redirect, request, url_for, abort
from flask_login import login_user, logout_user, login_required, current_user, login_url
from . import app, db, login_manager
from .models import Post, Tag, Author, tags as Post_Tag
from .forms import LoginForm, PostForm
# Auth#################
# MAIN##############
# MAIN OTHER###########
| 33.647799 | 119 | 0.65271 |
055b1e351a5242b821e047dfcb5c1f7591a3c693 | 509 | py | Python | id.py | txkodo/pyDatapack | f647e0043d09e3d456a8019fb00cb945c0d6b6a7 | [
"MIT"
] | null | null | null | id.py | txkodo/pyDatapack | f647e0043d09e3d456a8019fb00cb945c0d6b6a7 | [
"MIT"
] | null | null | null | id.py | txkodo/pyDatapack | f647e0043d09e3d456a8019fb00cb945c0d6b6a7 | [
"MIT"
] | null | null | null | import string
import secrets
| 36.357143 | 92 | 0.740668 |
055c22d5891f38a9238c8713208320ff8c57d8d5 | 185 | py | Python | bot/states/states.py | amtp1/ubi-4 | bbfa07f0936960058d7f282b1c83be7150494dc1 | [
"BSD-3-Clause"
] | null | null | null | bot/states/states.py | amtp1/ubi-4 | bbfa07f0936960058d7f282b1c83be7150494dc1 | [
"BSD-3-Clause"
] | null | null | null | bot/states/states.py | amtp1/ubi-4 | bbfa07f0936960058d7f282b1c83be7150494dc1 | [
"BSD-3-Clause"
] | null | null | null | from aiogram.dispatcher.filters.state import StatesGroup, State | 26.428571 | 63 | 0.783784 |
055c45d3bc0e2eb761a389c587de2205205755a0 | 255 | py | Python | apps/user/urls.py | dimastbk/x-one_test | aedf4dd4c5299c1f6e6afde2f557bd284e50f6dc | [
"MIT"
] | 1 | 2020-08-10T11:46:17.000Z | 2020-08-10T11:46:17.000Z | apps/user/urls.py | dimastbk/x-one_test | aedf4dd4c5299c1f6e6afde2f557bd284e50f6dc | [
"MIT"
] | null | null | null | apps/user/urls.py | dimastbk/x-one_test | aedf4dd4c5299c1f6e6afde2f557bd284e50f6dc | [
"MIT"
] | null | null | null | from rest_framework.routers import DefaultRouter
from apps.user.views import AuthViewSet, UserViewSet
router = DefaultRouter()
router.register("", AuthViewSet, basename="auth")
router.register("", UserViewSet, basename="user")
urlpatterns = router.urls
| 28.333333 | 52 | 0.796078 |
055c91bef8da3c2b5ab9913ec9ae41927e8fef83 | 1,514 | py | Python | evkit/utils/misc.py | joel99/midlevel-reps | f0b4a4d8ccf09a0488cd18af24723172aff99446 | [
"MIT"
] | 120 | 2019-04-22T04:45:28.000Z | 2022-03-23T01:53:17.000Z | evkit/utils/misc.py | joel99/midlevel-reps | f0b4a4d8ccf09a0488cd18af24723172aff99446 | [
"MIT"
] | 14 | 2019-06-12T08:21:21.000Z | 2021-08-25T15:36:58.000Z | evkit/utils/misc.py | joel99/midlevel-reps | f0b4a4d8ccf09a0488cd18af24723172aff99446 | [
"MIT"
] | 19 | 2019-06-19T07:00:36.000Z | 2022-03-24T07:18:30.000Z | import collections
import torch
import pprint
import string
remove_whitespace = str.maketrans('', '', string.whitespace)
def cfg_to_md(cfg, uuid):
''' Because tensorboard uses markdown'''
return uuid + "\n\n " + pprint.pformat((cfg)).replace("\n", " \n").replace("\n \'", "\n \'") + ""
def compute_weight_norm(parameters):
''' no grads! '''
total = 0.0
count = 0
for p in parameters:
total += torch.sum(p.data**2)
# total += p.numel()
count += p.numel()
return (total / count)
def get_number(name):
"""
use regex to get the first integer in the name
if none exists, return -1
"""
try:
num = int(re.findall("[0-9]+", name)[0])
except:
num = -1
return num
| 22.939394 | 110 | 0.579921 |
055cc455230997c5276c879e8d734a4e3c932b7e | 1,652 | py | Python | g13gui/g13/manager_tests.py | jtgans/g13gui | aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc | [
"MIT"
] | 3 | 2021-10-16T01:28:24.000Z | 2021-12-07T21:49:54.000Z | g13gui/g13/manager_tests.py | jtgans/g13gui | aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc | [
"MIT"
] | 12 | 2021-05-09T16:57:18.000Z | 2021-06-16T19:20:57.000Z | g13gui/g13/manager_tests.py | jtgans/g13gui | aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc | [
"MIT"
] | null | null | null | #!/usr/bin/python
import unittest
import time
import usb.util
from g13gui.observer.observer import ObserverTestCase
from g13gui.model.prefs import Preferences
from g13gui.g13.manager import DeviceManager
from g13gui.g13.manager import LCD_BUFFER_SIZE
if __name__ == '__main__':
unittest.main()
| 24.656716 | 65 | 0.598668 |
055df8a4d5bc728dd507e18c15a01996fcd7eeb9 | 754 | py | Python | mpikat/utils/unix_socket.py | ewanbarr/mpikat | 1c9a7376f9e79dfeec5a151d8f483d6fdf3e7cc9 | [
"MIT"
] | 2 | 2018-11-12T12:17:27.000Z | 2019-02-08T15:44:14.000Z | mpikat/utils/unix_socket.py | ewanbarr/mpikat | 1c9a7376f9e79dfeec5a151d8f483d6fdf3e7cc9 | [
"MIT"
] | 3 | 2018-08-03T12:05:20.000Z | 2018-08-03T12:13:53.000Z | mpikat/utils/unix_socket.py | ewanbarr/mpikat | 1c9a7376f9e79dfeec5a151d8f483d6fdf3e7cc9 | [
"MIT"
] | 4 | 2019-01-21T16:31:34.000Z | 2019-12-03T09:27:15.000Z | import socket
import logging
log = logging.getLogger('mpikat.utils.unix_socket')
| 26.928571 | 78 | 0.635279 |
0560a6e08907adcfebf943f18a20892cd59deb17 | 311 | py | Python | Exercises/python/Cod1/cod1.py | Rick222555000/Princess | 29c4c22351eeb8f2124ffe63632351fa373668e8 | [
"MIT"
] | null | null | null | Exercises/python/Cod1/cod1.py | Rick222555000/Princess | 29c4c22351eeb8f2124ffe63632351fa373668e8 | [
"MIT"
] | null | null | null | Exercises/python/Cod1/cod1.py | Rick222555000/Princess | 29c4c22351eeb8f2124ffe63632351fa373668e8 | [
"MIT"
] | null | null | null | #1- Crie um programa que ler 3 nmeros inteiros A, B, C e exibe a mensagem se o resultado R=(A+B)/C maior que B ou no.
A, B, C = int(input()), int(input()), int(input())
R = (A + B)/C
print(Maior(R, B)) | 25.916667 | 121 | 0.59164 |
0560aa251cb9f57348aa3861ec51b4ed5e27e782 | 1,021 | py | Python | mlearn/static/py/funcs.py | achandir/django-machine-learning-beta | 9604953addee0c1bea90d308b4248a69d332f5a8 | [
"BSD-3-Clause"
] | null | null | null | mlearn/static/py/funcs.py | achandir/django-machine-learning-beta | 9604953addee0c1bea90d308b4248a69d332f5a8 | [
"BSD-3-Clause"
] | null | null | null | mlearn/static/py/funcs.py | achandir/django-machine-learning-beta | 9604953addee0c1bea90d308b4248a69d332f5a8 | [
"BSD-3-Clause"
] | null | null | null | from django.core.files.storage import FileSystemStorage
from django.conf import settings
import os
| 30.029412 | 84 | 0.539667 |
05622f786bb071a97ceb1da54cab05760a5a36c8 | 624 | py | Python | classes.py | thepfanner/CrisisComABM | 919ab45ad522ec82806a6dff8ef8807a88e398d0 | [
"MIT"
] | 1 | 2017-03-31T01:48:07.000Z | 2017-03-31T01:48:07.000Z | classes.py | thepfanner/CrisisComABM | 919ab45ad522ec82806a6dff8ef8807a88e398d0 | [
"MIT"
] | null | null | null | classes.py | thepfanner/CrisisComABM | 919ab45ad522ec82806a6dff8ef8807a88e398d0 | [
"MIT"
] | null | null | null | __author__ = 'sp'
| 19.5 | 48 | 0.517628 |
0564823c9e294186f86aee5daa972c4a2f49f3f0 | 2,400 | py | Python | app.py | saty2146/flask_api_log | 760ac901b310649fe5dc98c6a8bdd0fdb5883a82 | [
"Apache-2.0"
] | null | null | null | app.py | saty2146/flask_api_log | 760ac901b310649fe5dc98c6a8bdd0fdb5883a82 | [
"Apache-2.0"
] | null | null | null | app.py | saty2146/flask_api_log | 760ac901b310649fe5dc98c6a8bdd0fdb5883a82 | [
"Apache-2.0"
] | null | null | null | #!venv/bin/python
import os, re, json
from flask import Flask, request
from flask_restful import Resource, Api
from json import dumps
from flask_jsonpify import jsonify
from flask import render_template
from boxes import *
app = Flask(__name__)
api = Api(app)
api.add_resource(Syslog, '/syslog') # Route_1
if __name__ == '__main__':
app.run(host="217.73.28.16", port=5002)
| 30.379747 | 96 | 0.617917 |
056594b9b59d36dfeef52d15b7455e3dcb8e0bf9 | 1,362 | py | Python | federateme.py | elitest/federateme.py | 887d27ddae814d7ed03fd7c993493d927d2492d5 | [
"Unlicense"
] | null | null | null | federateme.py | elitest/federateme.py | 887d27ddae814d7ed03fd7c993493d927d2492d5 | [
"Unlicense"
] | null | null | null | federateme.py | elitest/federateme.py | 887d27ddae814d7ed03fd7c993493d927d2492d5 | [
"Unlicense"
] | 1 | 2021-04-13T20:02:14.000Z | 2021-04-13T20:02:14.000Z | #!/usr/bin/env python3
import boto.utils, json, requests
if detect_ec2():
print(gen_link())
else:
print("This is not an AWS instance. Please run on an AWS EC2 instance.")
| 41.272727 | 175 | 0.642438 |
0565ccb5f3f8b36de113f3a2bcbbc32675fef341 | 58,839 | py | Python | pysnmp-with-texts/FORCE10-MONITORING-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/FORCE10-MONITORING-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/FORCE10-MONITORING-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module FORCE10-MONITORING-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FORCE10-MONITORING-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:14:24 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
f10Mgmt, = mibBuilder.importSymbols("FORCE10-SMI", "f10Mgmt")
F10VlanID, F10CycloneVersion, F10ProcessorModuleType, F10PortPipeID, F10QueueID, F10SlotID = mibBuilder.importSymbols("FORCE10-TC", "F10VlanID", "F10CycloneVersion", "F10ProcessorModuleType", "F10PortPipeID", "F10QueueID", "F10SlotID")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Gauge32, Counter32, Integer32, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, Bits, IpAddress, TimeTicks, Unsigned32, MibIdentifier, ObjectIdentity, NotificationType, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Gauge32", "Counter32", "Integer32", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "Bits", "IpAddress", "TimeTicks", "Unsigned32", "MibIdentifier", "ObjectIdentity", "NotificationType", "Counter64")
DisplayString, TextualConvention, MacAddress, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention", "MacAddress", "TruthValue")
f10MonitoringMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 6027, 3, 3))
f10MonitoringMib.setRevisions(('2008-12-18 12:00', '1906-01-20 00:00', '2000-11-02 10:30',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: f10MonitoringMib.setRevisionsDescriptions(('Force10 Monitoring MIB version 1.3 Added CPU Ingress Queue Unicast Statistics table. ', 'Force10 Monitoring MIB version 1.2 Added IP and ARP statistic objects that are not available in RFC1213. ', 'Force10 Monitoring MIB version 1.1',))
if mibBuilder.loadTexts: f10MonitoringMib.setLastUpdated('200812181200Z')
if mibBuilder.loadTexts: f10MonitoringMib.setOrganization('Force10 Networks, Inc.')
if mibBuilder.loadTexts: f10MonitoringMib.setContactInfo('Force10 Networks, Inc 1440 McCarthy Blvd Milpitas, CA 95035 (408) 571-3500 support@force10networks.com http://www.force10networks.com')
if mibBuilder.loadTexts: f10MonitoringMib.setDescription('Force10 Monitoring MIB provides statistics and accounting for various Force10 products. ')
f10MonGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 1))
f10MonQueue = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2))
f10MonMac = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3))
f10MonIfQueue = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4))
f10NetworkStat = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5))
f10IpStatistic = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1))
f10ArpStatistic = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2))
f10MonMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("version1", 1), ("version1dot1", 2), ("version1dot2", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MonMibVersion.setStatus('current')
if mibBuilder.loadTexts: f10MonMibVersion.setDescription(' version1(1) - initial version, define QOS Queue Statistics table. version1dot1(2) - support MAC Accounting (f10MonMac). version1dot2(3) - support Interface Queue Statistics Tables (f10MonIfQueue). ')
f10MonQueueGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 1))
f10MonMaxQueue = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MonMaxQueue.setStatus('current')
if mibBuilder.loadTexts: f10MonMaxQueue.setDescription('The maximum number of Force10 QOS queue supported by Force10 Interfaces. ')
f10InQueueStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2), )
if mibBuilder.loadTexts: f10InQueueStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: f10InQueueStatisticsTable.setDescription('The Force10 QOS Input Queue Statistics Table. This table provides Input Queue statistics for Force10 Interfaces. ')
f10InQueueStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10InQueueId"))
if mibBuilder.loadTexts: f10InQueueStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: f10InQueueStatisticsEntry.setDescription('An entry in the Force10 QOS Input Queue table. The Input Queue Statistics Table is indexed by the Interface and the Queue ID. The Interface index should be an valid ifIndex as defined in the RFC1213 MIB II Interface Table and the Queue ID should be a valid Force10 Queue ID. ')
f10InQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueId.setStatus('current')
if mibBuilder.loadTexts: f10InQueueId.setDescription('This is the second index of this table, it must be a valid Force10 QOS Queue ID. ')
f10InQueueDropPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueDropPackets.setStatus('current')
if mibBuilder.loadTexts: f10InQueueDropPackets.setDescription(' ')
f10InQueueBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueBytes.setStatus('current')
if mibBuilder.loadTexts: f10InQueueBytes.setDescription(' ')
f10InQueueMatchPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueMatchPackets.setStatus('current')
if mibBuilder.loadTexts: f10InQueueMatchPackets.setDescription(' ')
f10InQueueMatchBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueMatchBytes.setStatus('current')
if mibBuilder.loadTexts: f10InQueueMatchBytes.setDescription(' ')
f10InQueueMatchBps = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueMatchBps.setStatus('current')
if mibBuilder.loadTexts: f10InQueueMatchBps.setDescription(' ')
f10InQueueCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 7), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10InQueueCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10InQueueBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueueBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10InQueueBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) hardware only. ')
f10InQueuePktsCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 2, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10InQueuePktsCount.setStatus('current')
if mibBuilder.loadTexts: f10InQueuePktsCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) hardware only. ')
f10OutQueueStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3), )
if mibBuilder.loadTexts: f10OutQueueStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueStatisticsTable.setDescription('The Force10 QOS Output Queue Statistics Table. This table provides Output Queue statistics for Force10 Interfaces. ')
f10OutQueueStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10OutQueueId"))
if mibBuilder.loadTexts: f10OutQueueStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueStatisticsEntry.setDescription('An entry in the Output Queue table. The Output Queue Statistics Table is indexed by the Interface and the Queue ID. The Interface index should be an valid ifIndex as defined in the RFC1213 MIB II Interface Table and the the Queue ID should be a valid Force10 Queue ID. ')
f10OutQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueId.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueId.setDescription('This is the second index of this table, it must be a valid Force10 QOS Queue ID. ')
f10OutQueuePackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueuePackets.setStatus('current')
if mibBuilder.loadTexts: f10OutQueuePackets.setDescription(' ')
f10OutQueueBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueBytes.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10OutQueueBps = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueBps.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueBps.setDescription(' ')
f10OutQueueCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 5), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10OutQueueBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10OutQueueBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10OutQueueBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) hardware only. ')
f10WredStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4), )
if mibBuilder.loadTexts: f10WredStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: f10WredStatisticsTable.setDescription('QOS WRED Statistics Table This table provides QOS WRED statistics for the Force10 Interfaces. ')
f10WredStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10WredQueueId"))
if mibBuilder.loadTexts: f10WredStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: f10WredStatisticsEntry.setDescription('An entry in the WRED Statistics table. The WRED Statistics Table is indexed by the Interface and the Queue ID. The Interface index should be an valid ifIndex as defined in the RFC1213 MIB II Interface Table and the Queue ID should be a valid Force10 Queue ID. ')
f10WredQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredQueueId.setStatus('current')
if mibBuilder.loadTexts: f10WredQueueId.setDescription('This is the second index of this table, it must be a valid Force10 QOS Queue ID. ')
f10WredGreenName = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenName.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenName.setDescription(' ')
f10WredGreenThresholdLow = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenThresholdLow.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenThresholdLow.setDescription(' ')
f10WredGreenThresholdHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenThresholdHigh.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenThresholdHigh.setDescription(' ')
f10WredGreenDropPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenDropPackets.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenDropPackets.setDescription(' ')
f10WredGreenReserve1 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenReserve1.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenReserve1.setDescription(' ')
f10WredGreenReserve2 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredGreenReserve2.setStatus('current')
if mibBuilder.loadTexts: f10WredGreenReserve2.setDescription(' ')
f10WredYellowName = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowName.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowName.setDescription(' ')
f10WredYellowThresholdLow = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowThresholdLow.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowThresholdLow.setDescription(' ')
f10WredYellowThresholdHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowThresholdHigh.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowThresholdHigh.setDescription(' ')
f10WredYellowDropPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowDropPackets.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowDropPackets.setDescription(' ')
f10WredYellowReserve1 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowReserve1.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowReserve1.setDescription(' ')
f10WredYellowReserve2 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredYellowReserve2.setStatus('current')
if mibBuilder.loadTexts: f10WredYellowReserve2.setDescription(' ')
f10WredRedName = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedName.setStatus('current')
if mibBuilder.loadTexts: f10WredRedName.setDescription(' ')
f10WredRedThresholdLow = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 15), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedThresholdLow.setStatus('current')
if mibBuilder.loadTexts: f10WredRedThresholdLow.setDescription(' ')
f10WredRedThresholdHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 16), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedThresholdHigh.setStatus('current')
if mibBuilder.loadTexts: f10WredRedThresholdHigh.setDescription(' ')
f10WredRedDropPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedDropPackets.setStatus('current')
if mibBuilder.loadTexts: f10WredRedDropPackets.setDescription(' ')
f10WredRedReserve1 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedReserve1.setStatus('current')
if mibBuilder.loadTexts: f10WredRedReserve1.setDescription(' ')
f10WredRedReserve2 = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 2, 4, 1, 19), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10WredRedReserve2.setStatus('current')
if mibBuilder.loadTexts: f10WredRedReserve2.setDescription(' ')
f10MacGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 1))
f10MacAccounting = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2))
f10MacAccountingDestTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1), )
if mibBuilder.loadTexts: f10MacAccountingDestTable.setStatus('current')
if mibBuilder.loadTexts: f10MacAccountingDestTable.setDescription('The MAC Accounting Destination Table. Each entry in the table provides the MAC accounting statistics from a specific Interface, VLAN ID, and the desired destination MAC Address. ')
f10MacAccountingDestEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1), ).setIndexNames((0, "FORCE10-MONITORING-MIB", "f10MacAccInIfIndex"), (0, "FORCE10-MONITORING-MIB", "f10MacAccVlan"), (0, "FORCE10-MONITORING-MIB", "f10MacAccMacAddr"))
if mibBuilder.loadTexts: f10MacAccountingDestEntry.setStatus('current')
if mibBuilder.loadTexts: f10MacAccountingDestEntry.setDescription('An entry in the MAC Accounting Destination Table. The MAC Accounting Destination table is indexed by the input Interface, VLAN ID, and the destination MAC Address. ')
f10MacAccInIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccInIfIndex.setStatus('current')
if mibBuilder.loadTexts: f10MacAccInIfIndex.setDescription('The input Interface of this entry of the table. The value should be a valid ifIndex in the MIB II Interface Table. ')
f10MacAccVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 2), F10VlanID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccVlan.setStatus('current')
if mibBuilder.loadTexts: f10MacAccVlan.setDescription('The VLAN ID. ')
f10MacAccMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccMacAddr.setStatus('current')
if mibBuilder.loadTexts: f10MacAccMacAddr.setDescription("The MAC Address that identifies this entry of the table. This is the destination MAC Address of the packets that's going through the Interface identified by f10MacAccInIfIndex. ")
f10MacAccOutIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccOutIfIndex.setStatus('current')
if mibBuilder.loadTexts: f10MacAccOutIfIndex.setDescription('The output Interface of this entry of the table. The value should be a valid ifIndex in the MIB II Interface Table. ')
f10MacAccPackets = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccPackets.setStatus('current')
if mibBuilder.loadTexts: f10MacAccPackets.setDescription('The number of packets going through this entry of the the table, identified by the Interface/MAC/VLAN. ')
f10MacAccBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 3, 2, 1, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10MacAccBytes.setStatus('current')
if mibBuilder.loadTexts: f10MacAccBytes.setDescription('The number of bytes traffic going through this entry of the table, identified by the Interface/MAC/VLAN. ')
f10MonIfQueueGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 1))
f10IngQueueUnicastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2), )
if mibBuilder.loadTexts: f10IngQueueUnicastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10IngQueueUnicastStatTable.setDescription('The Ingress Queue Unicast Statistics Table. This table provides Queue statistics for Ingress Unicast packets between Force10 linecards. ')
f10IngQueueUnicastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1), ).setIndexNames((0, "FORCE10-MONITORING-MIB", "f10IngUnicastSrcCard"), (0, "FORCE10-MONITORING-MIB", "f10IngUnicastDestCard"), (0, "FORCE10-MONITORING-MIB", "f10IngUnicastSrcPortPipe"), (0, "FORCE10-MONITORING-MIB", "f10IngUnicastDestPortPipe"), (0, "FORCE10-MONITORING-MIB", "f10IngUnicastQueueId"))
if mibBuilder.loadTexts: f10IngQueueUnicastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10IngQueueUnicastStatEntry.setDescription('An entry in the Ingress Queue Unicast Statistics table. The Ingress Queue Unicast Statistics Table is indexed by the source and destination linecard/portpipe and Queue ID. ')
f10IngUnicastSrcCard = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 1), F10SlotID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastSrcCard.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastSrcCard.setDescription('This is the source linecard number. This is the first index of this table entry. ')
f10IngUnicastDestCard = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 2), F10SlotID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastDestCard.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastDestCard.setDescription('This is the destination linecard number. This is the 3rd index of this table entry. ')
f10IngUnicastSrcPortPipe = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 3), F10PortPipeID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastSrcPortPipe.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastSrcPortPipe.setDescription('This is the Force10 Cyclone PortPipe number of the source linecard. This is the 2nd index of this table entry. ')
f10IngUnicastDestPortPipe = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 4), F10PortPipeID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastDestPortPipe.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastDestPortPipe.setDescription('This is the Force10 Cyclone PortPipe number of the destination linecard. This is the 4th index of this table entry. ')
f10IngUnicastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 5), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastQueueId.setDescription('This is the Queue ID of this entry. This is the 5th index of this table entry. ')
f10IngUnicastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 6), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10IngUnicastBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastBytes.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10IngUnicastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10IngUnicastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10IngUnicastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10IngUnicastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10IngUnicastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastGreenDrop.setDescription('The number of Green packets being dropped in this queue. ')
f10IngUnicastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10IngUnicastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 14), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10IngUnicastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue. ')
f10IngUnicastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 2, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngUnicastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngUnicastRedDrop.setDescription('The number of Red packets being dropped in this queue. ')
f10IngQueueMulticastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3), )
if mibBuilder.loadTexts: f10IngQueueMulticastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10IngQueueMulticastStatTable.setDescription('The Ingress Queue Multicast Statistics Table. This table provides Queue statistics for Ingress Multicast packets at Force10 linecards. ')
f10IngQueueMulticastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1), ).setIndexNames((0, "FORCE10-MONITORING-MIB", "f10IngMulticastSrcCard"), (0, "FORCE10-MONITORING-MIB", "f10IngMulticastSrcPortPipe"), (0, "FORCE10-MONITORING-MIB", "f10IngMulticastQueueId"))
if mibBuilder.loadTexts: f10IngQueueMulticastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10IngQueueMulticastStatEntry.setDescription('An entry in the Ingress Queue Multicast Statistics table. The Ingress Queue Multicast Statistics Table is indexed by the source linecard/portpipe and Queue ID. ')
f10IngMulticastSrcCard = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 1), F10SlotID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastSrcCard.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastSrcCard.setDescription('This is the source linecard number. This is the first index of this table entry. ')
f10IngMulticastSrcPortPipe = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 2), F10PortPipeID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastSrcPortPipe.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastSrcPortPipe.setDescription('This is the Force10 Cyclone PortPipe number of the source linecard. This is the 2nd index of this table entry. ')
f10IngMulticastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 3), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastQueueId.setDescription('This is the Queue ID of this entry. This is the 3rd index of this table entry. ')
f10IngMulticastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 4), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10IngMulticastBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastBytes.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10IngMulticastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10IngMulticastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10IngMulticastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10IngMulticastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10IngMulticastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastGreenDrop.setDescription('The number of Green packets being dropped in this queue. ')
f10IngMulticastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10IngMulticastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10IngMulticastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue. ')
f10IngMulticastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 3, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10IngMulticastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10IngMulticastRedDrop.setDescription('The number of Red packets being dropped in this queue. ')
f10EgQueueUnicastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4), )
if mibBuilder.loadTexts: f10EgQueueUnicastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10EgQueueUnicastStatTable.setDescription('The Egress Queue Unicast Statistics Table. This table provides Queue statistics for Egress Unicast packets at Force10 Interface. ')
f10EgQueueUnicastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10EgUnicastQueueId"))
if mibBuilder.loadTexts: f10EgQueueUnicastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10EgQueueUnicastStatEntry.setDescription('An entry in the Egress Queue Unicast Statistics table. The Egress Queue Unicast Statistics Table is indexed by the ifIndex and Queue ID. The IfIndex should be an valid Interface Index as defined in the RFC1213 MIB II Interface Table. ')
f10EgUnicastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastQueueId.setDescription('This is the Queue ID of this entry. This is the 2nd index of this table entry. ')
f10EgUnicastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 2), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10EgUnicastBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastBytes.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10EgUnicastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10EgUnicastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10EgUnicastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10EgUnicastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgUnicastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastGreenDrop.setDescription('The number of Green packets being dropped in this queue. ')
f10EgUnicastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10EgUnicastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgUnicastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue. ')
f10EgUnicastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 4, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgUnicastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgUnicastRedDrop.setDescription('The number of Red packets being dropped in this queue. ')
f10EgQueueMulticastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5), )
if mibBuilder.loadTexts: f10EgQueueMulticastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10EgQueueMulticastStatTable.setDescription('The Egress Queue Multicast Statistics Table. This table provides Queue statistics for Egress Multicast packets at Force10 Interface. ')
f10EgQueueMulticastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "FORCE10-MONITORING-MIB", "f10EgMulticastQueueId"))
if mibBuilder.loadTexts: f10EgQueueMulticastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10EgQueueMulticastStatEntry.setDescription('An entry in the Egress Queue Multicast Statistics table. The Egress Queue Multicast Statistics Table is indexed by the ifIndex and Queue ID. The IfIndex should be an valid Interface Index as defined in the RFC1213 MIB II Interface Table. ')
f10EgMulticastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 1), F10QueueID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastQueueId.setDescription('This is the Queue ID of this entry. This is the 2nd index of this table entry. ')
f10EgMulticastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 2), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastCycloneVersion.setDescription('The linecard Cyclone hardware version. ')
f10EgMulticastBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastBytes.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastBytes.setDescription('The number of bytes in the queue. This object is available on Cyclone version 1.5 (CjTj) hardware only. ')
f10EgMulticastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10EgMulticastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 2.0 (C2T2) and Cyclone version 3.0 (X3) hardwares only. ')
f10EgMulticastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10EgMulticastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgMulticastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastGreenDrop.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgMulticastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability. ')
f10EgMulticastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs. ')
f10EgMulticastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue. ')
f10EgMulticastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 5, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10EgMulticastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10EgMulticastRedDrop.setDescription('The number of Red packets being dropped in this queue. ')
f10CpuIngQueueUnicastStatTable = MibTable((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6), )
if mibBuilder.loadTexts: f10CpuIngQueueUnicastStatTable.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngQueueUnicastStatTable.setDescription('The CPU Ingress Queue Unicast Statistics Table. This table provides Queue statistics for Ingress Unicast packets destined for CPU.')
f10CpuIngQueueUnicastStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1), ).setIndexNames((0, "FORCE10-MONITORING-MIB", "f10CpuIngUnicastSrcCard"), (0, "FORCE10-MONITORING-MIB", "f10CpuIngUnicastSrcPortPipe"), (0, "FORCE10-MONITORING-MIB", "f10CpuIngUnicastDestCpu"), (0, "FORCE10-MONITORING-MIB", "f10CpuIngUnicastQueueId"))
if mibBuilder.loadTexts: f10CpuIngQueueUnicastStatEntry.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngQueueUnicastStatEntry.setDescription('An entry in the CPU Ingress Queue Unicast Statistics Table. The CPU Ingress Queue Unicast Statistics Table is indexed by the source linecard/portpipe, cpu port and Queue ID.')
f10CpuIngUnicastSrcCard = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 1), F10SlotID())
if mibBuilder.loadTexts: f10CpuIngUnicastSrcCard.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastSrcCard.setDescription('This is the source linecard number. This is the first index of this table entry.')
f10CpuIngUnicastSrcPortPipe = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 2), F10PortPipeID())
if mibBuilder.loadTexts: f10CpuIngUnicastSrcPortPipe.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastSrcPortPipe.setDescription('This is the Force10 Cyclone PortPipe number of the source linecard.This is the 2nd index of this table entry.')
f10CpuIngUnicastDestCpu = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 3), F10ProcessorModuleType())
if mibBuilder.loadTexts: f10CpuIngUnicastDestCpu.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastDestCpu.setDescription('This is the destination CPU port of this entry. This is the 3rd index of this table entry.')
f10CpuIngUnicastQueueId = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 4), F10QueueID())
if mibBuilder.loadTexts: f10CpuIngUnicastQueueId.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastQueueId.setDescription('This is the Queue ID of this entry. This is the 4th index of this table entry.')
f10CpuIngUnicastCycloneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 5), F10CycloneVersion()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastCycloneVersion.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastCycloneVersion.setDescription('The linecard Cyclone hardware version.')
f10CpuIngUnicastBytesCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastBytesCount.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastBytesCount.setDescription('The cumulative number of bytes data passing through this queue. This object is available on Cyclone version 3.0 (X3) hardware only.')
f10CpuIngUnicastPacketCount = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastPacketCount.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastPacketCount.setDescription('The cumulative number of packets passing through this queue. This object is available on Cyclone version 3.0 (X3) hardware only.')
f10CpuIngUnicastGreenMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastGreenMin.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastGreenMin.setDescription('The min threshold for Green packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability.')
f10CpuIngUnicastGreenMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastGreenMax.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastGreenMax.setDescription('The max threshold for Green packets. The max threshold identifies the queue size level at which tail drops occurs.')
f10CpuIngUnicastGreenDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastGreenDrop.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastGreenDrop.setDescription('The number of Green packets being dropped in this queue.')
f10CpuIngUnicastYellowMin = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastYellowMin.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastYellowMin.setDescription('The min threshold for Yellow packets. The min threshold identifies the queue size percentage at which the WRED dropping starts to be applied with a given configured probability.')
f10CpuIngUnicastYellowMax = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastYellowMax.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastYellowMax.setDescription('The max threshold for Yellow packets. The max threshold identifies the queue size level at which tail drops occurs.')
f10CpuIngUnicastYellowDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastYellowDrop.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastYellowDrop.setDescription('The number of Yellow packets being dropped in this queue.')
f10CpuIngUnicastRedDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 6027, 3, 3, 4, 6, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10CpuIngUnicastRedDrop.setStatus('current')
if mibBuilder.loadTexts: f10CpuIngUnicastRedDrop.setDescription('The number of Red packets being dropped in this queue.')
f10BcastPktRecv = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10BcastPktRecv.setStatus('current')
if mibBuilder.loadTexts: f10BcastPktRecv.setDescription('The total broadcast packet received. ')
f10BcastPktSent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10BcastPktSent.setStatus('current')
if mibBuilder.loadTexts: f10BcastPktSent.setDescription('The total broadcast packet sent. ')
f10McastPktRecv = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10McastPktRecv.setStatus('current')
if mibBuilder.loadTexts: f10McastPktRecv.setDescription('The total multicast packet received. ')
f10McastPktSent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 1, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10McastPktSent.setStatus('current')
if mibBuilder.loadTexts: f10McastPktSent.setDescription('The total multicast packet sent. ')
f10ArpReqRecv = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpReqRecv.setStatus('current')
if mibBuilder.loadTexts: f10ArpReqRecv.setDescription('The total ARP request received. ')
f10ArpReqSent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpReqSent.setStatus('current')
if mibBuilder.loadTexts: f10ArpReqSent.setDescription('The total ARP request sent. ')
f10ArpReplyRecv = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpReplyRecv.setStatus('current')
if mibBuilder.loadTexts: f10ArpReplyRecv.setDescription('The total ARP reply received. ')
f10ArpReplySent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpReplySent.setStatus('current')
if mibBuilder.loadTexts: f10ArpReplySent.setDescription('The total ARP reply sent. ')
f10ArpProxySent = MibScalar((1, 3, 6, 1, 4, 1, 6027, 3, 3, 5, 2, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: f10ArpProxySent.setStatus('current')
if mibBuilder.loadTexts: f10ArpProxySent.setDescription('The total ARP proxy sent. ')
mibBuilder.exportSymbols("FORCE10-MONITORING-MIB", f10CpuIngUnicastYellowMin=f10CpuIngUnicastYellowMin, f10EgMulticastBytesCount=f10EgMulticastBytesCount, f10OutQueueStatisticsTable=f10OutQueueStatisticsTable, f10MacAccBytes=f10MacAccBytes, f10InQueueCycloneVersion=f10InQueueCycloneVersion, f10IngUnicastQueueId=f10IngUnicastQueueId, f10MacAccPackets=f10MacAccPackets, f10CpuIngUnicastSrcCard=f10CpuIngUnicastSrcCard, f10WredYellowThresholdLow=f10WredYellowThresholdLow, f10WredStatisticsEntry=f10WredStatisticsEntry, f10CpuIngUnicastPacketCount=f10CpuIngUnicastPacketCount, f10WredRedName=f10WredRedName, f10IngMulticastCycloneVersion=f10IngMulticastCycloneVersion, f10EgMulticastRedDrop=f10EgMulticastRedDrop, f10EgQueueMulticastStatEntry=f10EgQueueMulticastStatEntry, f10WredGreenThresholdLow=f10WredGreenThresholdLow, f10EgMulticastCycloneVersion=f10EgMulticastCycloneVersion, f10InQueueMatchBytes=f10InQueueMatchBytes, f10EgMulticastYellowMax=f10EgMulticastYellowMax, f10MonQueueGroup=f10MonQueueGroup, f10OutQueueBytes=f10OutQueueBytes, f10EgUnicastBytes=f10EgUnicastBytes, f10IngUnicastSrcCard=f10IngUnicastSrcCard, f10ArpStatistic=f10ArpStatistic, f10InQueueBytesCount=f10InQueueBytesCount, f10EgUnicastCycloneVersion=f10EgUnicastCycloneVersion, f10EgMulticastPacketCount=f10EgMulticastPacketCount, f10OutQueueStatisticsEntry=f10OutQueueStatisticsEntry, f10CpuIngUnicastGreenDrop=f10CpuIngUnicastGreenDrop, f10IngMulticastYellowMax=f10IngMulticastYellowMax, f10IngQueueMulticastStatTable=f10IngQueueMulticastStatTable, f10BcastPktRecv=f10BcastPktRecv, f10CpuIngQueueUnicastStatEntry=f10CpuIngQueueUnicastStatEntry, f10EgUnicastGreenMax=f10EgUnicastGreenMax, f10IngMulticastYellowMin=f10IngMulticastYellowMin, f10InQueueMatchBps=f10InQueueMatchBps, f10InQueueStatisticsEntry=f10InQueueStatisticsEntry, f10CpuIngUnicastGreenMax=f10CpuIngUnicastGreenMax, f10WredRedReserve1=f10WredRedReserve1, f10WredRedReserve2=f10WredRedReserve2, f10IngMulticastQueueId=f10IngMulticastQueueId, f10EgMulticastYellowDrop=f10EgMulticastYellowDrop, f10InQueueDropPackets=f10InQueueDropPackets, f10OutQueuePackets=f10OutQueuePackets, f10IngUnicastYellowMax=f10IngUnicastYellowMax, f10EgQueueMulticastStatTable=f10EgQueueMulticastStatTable, f10CpuIngUnicastSrcPortPipe=f10CpuIngUnicastSrcPortPipe, f10CpuIngUnicastBytesCount=f10CpuIngUnicastBytesCount, f10InQueuePktsCount=f10InQueuePktsCount, f10IngMulticastSrcPortPipe=f10IngMulticastSrcPortPipe, f10EgUnicastPacketCount=f10EgUnicastPacketCount, f10IngMulticastGreenMin=f10IngMulticastGreenMin, f10EgQueueUnicastStatEntry=f10EgQueueUnicastStatEntry, f10CpuIngUnicastQueueId=f10CpuIngUnicastQueueId, f10MonQueue=f10MonQueue, f10EgMulticastGreenDrop=f10EgMulticastGreenDrop, f10IngUnicastGreenMin=f10IngUnicastGreenMin, f10IngQueueUnicastStatEntry=f10IngQueueUnicastStatEntry, f10IngMulticastBytesCount=f10IngMulticastBytesCount, f10OutQueueBps=f10OutQueueBps, f10IngMulticastSrcCard=f10IngMulticastSrcCard, f10WredYellowName=f10WredYellowName, f10MonMac=f10MonMac, f10WredYellowReserve1=f10WredYellowReserve1, f10InQueueBytes=f10InQueueBytes, f10MonMibVersion=f10MonMibVersion, f10ArpProxySent=f10ArpProxySent, f10ArpReplySent=f10ArpReplySent, f10MacAccOutIfIndex=f10MacAccOutIfIndex, f10BcastPktSent=f10BcastPktSent, f10IngUnicastCycloneVersion=f10IngUnicastCycloneVersion, f10EgUnicastRedDrop=f10EgUnicastRedDrop, f10InQueueStatisticsTable=f10InQueueStatisticsTable, f10WredStatisticsTable=f10WredStatisticsTable, f10OutQueueBytesCount=f10OutQueueBytesCount, f10IngUnicastBytes=f10IngUnicastBytes, f10CpuIngQueueUnicastStatTable=f10CpuIngQueueUnicastStatTable, f10CpuIngUnicastRedDrop=f10CpuIngUnicastRedDrop, f10IngUnicastYellowMin=f10IngUnicastYellowMin, f10InQueueId=f10InQueueId, f10MacAccounting=f10MacAccounting, f10MonIfQueueGroup=f10MonIfQueueGroup, f10ArpReqRecv=f10ArpReqRecv, f10IngMulticastPacketCount=f10IngMulticastPacketCount, f10IngUnicastGreenMax=f10IngUnicastGreenMax, f10IngMulticastYellowDrop=f10IngMulticastYellowDrop, PYSNMP_MODULE_ID=f10MonitoringMib, f10IngMulticastBytes=f10IngMulticastBytes, f10MonMaxQueue=f10MonMaxQueue, f10CpuIngUnicastDestCpu=f10CpuIngUnicastDestCpu, f10WredGreenName=f10WredGreenName, f10CpuIngUnicastYellowDrop=f10CpuIngUnicastYellowDrop, f10CpuIngUnicastGreenMin=f10CpuIngUnicastGreenMin, f10EgMulticastYellowMin=f10EgMulticastYellowMin, f10MonIfQueue=f10MonIfQueue, f10WredRedThresholdHigh=f10WredRedThresholdHigh, f10IngUnicastGreenDrop=f10IngUnicastGreenDrop, f10EgUnicastYellowMax=f10EgUnicastYellowMax, f10EgQueueUnicastStatTable=f10EgQueueUnicastStatTable, f10MacAccountingDestEntry=f10MacAccountingDestEntry, f10WredGreenDropPackets=f10WredGreenDropPackets, f10CpuIngUnicastYellowMax=f10CpuIngUnicastYellowMax, f10WredYellowReserve2=f10WredYellowReserve2, f10EgUnicastYellowDrop=f10EgUnicastYellowDrop, f10MacAccMacAddr=f10MacAccMacAddr, f10MacAccInIfIndex=f10MacAccInIfIndex, f10IpStatistic=f10IpStatistic, f10WredGreenThresholdHigh=f10WredGreenThresholdHigh, f10IngUnicastSrcPortPipe=f10IngUnicastSrcPortPipe, f10McastPktSent=f10McastPktSent, f10EgMulticastGreenMin=f10EgMulticastGreenMin, f10MonitoringMib=f10MonitoringMib, f10MonGroup=f10MonGroup, f10IngUnicastDestCard=f10IngUnicastDestCard, f10IngUnicastDestPortPipe=f10IngUnicastDestPortPipe, f10IngMulticastRedDrop=f10IngMulticastRedDrop, f10EgUnicastYellowMin=f10EgUnicastYellowMin, f10MacGroup=f10MacGroup, f10IngMulticastGreenDrop=f10IngMulticastGreenDrop, f10WredYellowDropPackets=f10WredYellowDropPackets, f10IngUnicastRedDrop=f10IngUnicastRedDrop, f10NetworkStat=f10NetworkStat, f10EgMulticastGreenMax=f10EgMulticastGreenMax, f10EgMulticastBytes=f10EgMulticastBytes, f10WredGreenReserve1=f10WredGreenReserve1, f10IngUnicastYellowDrop=f10IngUnicastYellowDrop, f10ArpReqSent=f10ArpReqSent, f10IngQueueUnicastStatTable=f10IngQueueUnicastStatTable, f10ArpReplyRecv=f10ArpReplyRecv, f10EgMulticastQueueId=f10EgMulticastQueueId, f10WredQueueId=f10WredQueueId, f10IngUnicastBytesCount=f10IngUnicastBytesCount, f10CpuIngUnicastCycloneVersion=f10CpuIngUnicastCycloneVersion, f10WredYellowThresholdHigh=f10WredYellowThresholdHigh, f10McastPktRecv=f10McastPktRecv, f10EgUnicastGreenMin=f10EgUnicastGreenMin, f10OutQueueId=f10OutQueueId, f10IngQueueMulticastStatEntry=f10IngQueueMulticastStatEntry, f10WredGreenReserve2=f10WredGreenReserve2, f10EgUnicastGreenDrop=f10EgUnicastGreenDrop, f10IngMulticastGreenMax=f10IngMulticastGreenMax, f10InQueueMatchPackets=f10InQueueMatchPackets, f10EgUnicastQueueId=f10EgUnicastQueueId, f10OutQueueCycloneVersion=f10OutQueueCycloneVersion, f10WredRedDropPackets=f10WredRedDropPackets, f10MacAccVlan=f10MacAccVlan, f10MacAccountingDestTable=f10MacAccountingDestTable, f10WredRedThresholdLow=f10WredRedThresholdLow, f10EgUnicastBytesCount=f10EgUnicastBytesCount, f10IngUnicastPacketCount=f10IngUnicastPacketCount)
| 131.044543 | 6,796 | 0.791261 |
056746e5dbf852638494e8c736e9cb3208ccd43b | 1,964 | py | Python | recycler.py | LAION-AI/crawlingathome | 43a477777fb403046d67224747cde1dac9f2094a | [
"MIT"
] | 11 | 2021-06-02T03:46:52.000Z | 2021-09-11T22:19:12.000Z | recycler.py | LAION-AI/crawlingathome | 43a477777fb403046d67224747cde1dac9f2094a | [
"MIT"
] | 9 | 2021-06-14T07:46:20.000Z | 2021-08-28T22:50:46.000Z | recycler.py | LAION-AI/crawlingathome | 43a477777fb403046d67224747cde1dac9f2094a | [
"MIT"
] | 7 | 2021-06-01T11:59:36.000Z | 2022-03-20T13:44:18.000Z | import numpy as np
from requests import session
from .core import CPUClient, GPUClient, HybridClient
from .temp import TempCPUWorker
from .errors import *
# Dump a client's attributes into a dictionary so that it can be used remotely.
# Load an existing client using its attributes. It's best to load using an existing dumpClient(): `loadClient(**dump)`
| 35.709091 | 118 | 0.614562 |
0567803d049b2b08966e5134ef97c6b64fdfc130 | 1,921 | py | Python | config.py | uncharted-distil/distil-auto-ml | 244661942cff11617c81830d7f58a9f9b5c9499d | [
"Apache-2.0"
] | 2 | 2019-06-20T23:32:10.000Z | 2021-01-24T22:32:07.000Z | config.py | uncharted-distil/distil-auto-ml | 244661942cff11617c81830d7f58a9f9b5c9499d | [
"Apache-2.0"
] | 157 | 2019-04-09T18:40:42.000Z | 2021-05-06T13:44:33.000Z | config.py | uncharted-distil/distil-auto-ml | 244661942cff11617c81830d7f58a9f9b5c9499d | [
"Apache-2.0"
] | 1 | 2019-07-12T22:17:46.000Z | 2019-07-12T22:17:46.000Z | import os
DB_LOCATION = os.getenv("DB_URI", "test.db")
# Debug flag to output more verbose logging
# - defaults to False
DEBUG = os.getenv("DEBUG", False)
# Configurable output directory for saving machine learning model pickles
# - defaults to ../output
OUTPUT_DIR = os.getenv("OUTPUT_DIR", "output")
# Port to make worker service available on
PORT = os.getenv("PORT", "45042")
# Configurable filename for output logs
LOG_FILENAME = os.getenv("LOG_FILENAME", "distil-auto-ml.log")
# User agent to supply to TA3 Systems
SERVER_USER_AGENT = "qntfy_ta2"
# Primitives static file directory
D3MSTATICDIR = os.getenv("D3MSTATICDIR", "/static")
# Enable GPU pipelines - "auto" will try to detect, "true" and "false" will force
GPU = os.getenv("GPU", "auto")
# Batch size to apply to primitives where feasible
REMOTE_SENSING_BATCH_SIZE = int(os.getenv("REMOTE_SENSING_BATCH_SIZE", 128))
# Solution serach progress update message interval in seconds
PROGRESS_INTERVAL = float(os.getenv("PROGRESS_INTERVAL", 10.0))
# maximum number of augment columns to support
AUG_MAX_COLS = int(os.getenv("AUG_MAX_COLS", 50))
# maximum number of augment rows to support
AUG_MAX_ROWS = int(os.getenv("AUG_MAX_ROWS", 50000))
# maximum amount of time for hyperparam tuning in seconds
TIME_LIMIT = int(os.getenv("TIME_LIMIT", 600))
# use untuned/internally tuned pipelines (faster) or external tuning (better results)
HYPERPARAMETER_TUNING = os.getenv("HYPERPARAMETER_TUNING", "True") == "True"
# controls parallelism within primitives - defaults to the number of CPUs
N_JOBS = int(os.getenv("N_JOBS", -1))
# enable use of mlp classifier + gradcam visualization
MLP_CLASSIFIER = os.getenv("MLP_CLASSIFIER", "False") == "True"
# whether or not received features for remote sensing are pooled or not
IS_POOLED = os.getenv("POOL_FEATURES", "True") == "True"
COMPUTE_CONFIDENCES = os.getenv("COMPUTE_CONFIDENCES", "False") == "False"
| 34.303571 | 85 | 0.753774 |
0567c00611e59a9c33c0140344f11e8c956bd4aa | 278 | py | Python | python/testData/completion/slots.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/completion/slots.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/completion/slots.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z |
a = A()
a.ba<caret>
class B(object):
__slots__ = ['bar']
class C(B):
pass
C().ba<caret>
class D(object):
pass
class E(D):
__slots__ = ['bar']
E().ba<caret>
class F:
__slots__ = ['baz']
F().ba<caret> | 9.586207 | 30 | 0.535971 |
056887fff4c016e1bd810fe62a7c889a8d65cc5e | 1,952 | py | Python | aircraft_framework_win/framework_PhD/framework/Stability/Dynamic/state_vector.py | AlejandroRios/IAANDOCAC-aircraft-framework | 9768e9736af70e20e8ef1cc0ad6501f3a28dbb47 | [
"Apache-2.0"
] | null | null | null | aircraft_framework_win/framework_PhD/framework/Stability/Dynamic/state_vector.py | AlejandroRios/IAANDOCAC-aircraft-framework | 9768e9736af70e20e8ef1cc0ad6501f3a28dbb47 | [
"Apache-2.0"
] | null | null | null | aircraft_framework_win/framework_PhD/framework/Stability/Dynamic/state_vector.py | AlejandroRios/IAANDOCAC-aircraft-framework | 9768e9736af70e20e8ef1cc0ad6501f3a28dbb47 | [
"Apache-2.0"
] | null | null | null | """
Function :
Title :
Written by:
Email : aarc.88@gmail.com
Date :
Last edit :
Language : Python 3.8 or >
Aeronautical Institute of Technology - Airbus Brazil
Description:
-
Inputs:
-
Outputs:
-
TODO's:
-
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
# =============================================================================
# CLASSES
# =============================================================================
# =============================================================================
# FUNCTIONS
# =============================================================================
# =============================================================================
# MAIN
# =============================================================================
# =============================================================================
# TEST
# =============================================================================
# x = [68.0588,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0,
# 0]
# trim_par = {}
# trim_par = {'V':68.0588,
# 'H_m':10000,
# 'chi_deg':0,
# 'gamma_deg':0,
# 'phi_dot_deg_s':0,
# 'theta_dot_deg':0,
# 'psi_dot_deg_s':0,
# 'beta_deg_eq':0,
# 'W':[0, 0, 0]}
# X = state_vector(x, trim_par)
# print(X)
| 22.436782 | 79 | 0.236168 |
056958a4ad13bb68213d3b4a27aff485fb258a2f | 317 | py | Python | 10-Days-of-Statistics/Python/day-0_weighted_mean.py | joaopalmeiro/hackerrank | 271b87645710e5ed56cbfd8c4209f3a7436e3f72 | [
"MIT"
] | null | null | null | 10-Days-of-Statistics/Python/day-0_weighted_mean.py | joaopalmeiro/hackerrank | 271b87645710e5ed56cbfd8c4209f3a7436e3f72 | [
"MIT"
] | null | null | null | 10-Days-of-Statistics/Python/day-0_weighted_mean.py | joaopalmeiro/hackerrank | 271b87645710e5ed56cbfd8c4209f3a7436e3f72 | [
"MIT"
] | null | null | null | N = int(input())
X = list(map(int, input().split()))
W = list(map(int, input().split()))
print(weighted_mean(X, W))
| 16.684211 | 50 | 0.630915 |
0569e6f550e0e8fb6bd11e2714deff2f7f71997f | 2,274 | py | Python | common/settings.py | hehanlin/jobbole | 46d5fa26cfa1ebd5c6c3621f615ffecbb4152fa9 | [
"BSD-3-Clause"
] | 2 | 2018-01-18T09:16:16.000Z | 2022-02-12T08:59:23.000Z | common/settings.py | hehanlin/jobbole | 46d5fa26cfa1ebd5c6c3621f615ffecbb4152fa9 | [
"BSD-3-Clause"
] | null | null | null | common/settings.py | hehanlin/jobbole | 46d5fa26cfa1ebd5c6c3621f615ffecbb4152fa9 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import os
os_env = os.environ
# logging
LoggingConfig = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "%(asctime)s- %(module)s:%(lineno)d [%(levelname)1.1s] %(name)s: %(message)s",
'datefmt': '%Y/%m/%d %H:%M:%S'
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"info_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "simple",
"filename": Config.PROJECT_ROOT + '/jobbole_info.log',
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
},
"error_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "ERROR",
"formatter": "simple",
"filename": Config.PROJECT_ROOT + '/jobbole_error.log',
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
}
},
"loggers": {
"my_module": {
"level": "ERROR",
"handlers": ["info_file_handler"],
"propagate": False
}
},
"root": {
"level": "INFO",
"handlers": ["console", "info_file_handler", "error_file_handler"]
}
}
| 30.72973 | 100 | 0.554969 |
056bdc49927b577c2ca6f33c088621f5b1d3d179 | 8,834 | py | Python | interface.py | singularitai/Morphling | e7a3af969123c0d3c0f3c6f1036a97e9be0b289c | [
"MIT",
"Condor-1.1",
"Unlicense"
] | 9 | 2021-03-22T09:18:58.000Z | 2022-03-02T01:42:11.000Z | interface.py | singularitai/Morphling | e7a3af969123c0d3c0f3c6f1036a97e9be0b289c | [
"MIT",
"Condor-1.1",
"Unlicense"
] | null | null | null | interface.py | singularitai/Morphling | e7a3af969123c0d3c0f3c6f1036a97e9be0b289c | [
"MIT",
"Condor-1.1",
"Unlicense"
] | 2 | 2022-03-29T07:59:12.000Z | 2022-03-31T09:10:47.000Z | import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Mock.ui'
#
# Created by: PyQt5 UI code generator 5.12.3
#
# WARNING! All changes made in this file will be lost!
import sys
import subprocess
import application_backend as ab
from PyQt5 import QtCore, QtGui, QtWidgets
if __name__ == "__main__":
Ui_MainWindow() | 43.732673 | 130 | 0.677043 |
056ef751fabceeae1db74a620559c093e5b86dfa | 10,935 | py | Python | load-testing/locustfile.py | MaksimAniskov/aws-global-odoo | 0f225a2f4ede3215264fd3d3912fa7b4e87d4a8f | [
"MIT"
] | null | null | null | load-testing/locustfile.py | MaksimAniskov/aws-global-odoo | 0f225a2f4ede3215264fd3d3912fa7b4e87d4a8f | [
"MIT"
] | 1 | 2022-01-26T08:58:34.000Z | 2022-01-26T08:58:34.000Z | load-testing/locustfile.py | MaksimAniskov/aws-global-odoo | 0f225a2f4ede3215264fd3d3912fa7b4e87d4a8f | [
"MIT"
] | null | null | null | from locust import HttpUser, task, between
import re
import random
import json
import os
if __name__ == "__main__":
from locust.env import Environment
my_env = Environment(user_classes=[OdooUserCrmKanban])
OdooUserCrmKanban(my_env).run()
| 34.714286 | 102 | 0.438317 |
05702fee1b4a5bd092fcebf23643ddbeb574cdf2 | 939 | py | Python | code/model/testSpeedPolar.py | PBarde/IBoatPIE | dd8038f981940b732be979b49e9b14102c3d4cca | [
"MIT"
] | 1 | 2018-02-22T15:38:01.000Z | 2018-02-22T15:38:01.000Z | code/model/testSpeedPolar.py | PBarde/IBoatPIE | dd8038f981940b732be979b49e9b14102c3d4cca | [
"MIT"
] | null | null | null | code/model/testSpeedPolar.py | PBarde/IBoatPIE | dd8038f981940b732be979b49e9b14102c3d4cca | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 13 18:03:27 2017
@author: paul
"""
from SimulatorTLKT import Boat
from SimulatorTLKT import FIT_VELOCITY
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from math import pi
matplotlib.rcParams.update({'font.size': 22})
pOfS=np.arange(0,360,0.5)
wMags=np.arange(0,25,2)
polars=[]
legends=[]
fig=plt.figure()
for mag in wMags:
pol=[]
legends.append('Wind mag = '+str(mag) + ' m/s')
for p in pOfS :
pol.append(Boat.getDeterDyn(p,mag,FIT_VELOCITY))
polars.append(list(pol))
ax=plt.polar(pOfS*pi/180,pol,label=str(mag) + ' m/s')
#plt.legend(legends)
plt.legend(bbox_to_anchor=(1.1,1), loc=2, borderaxespad=0.)
#plt.xlabel('Polar plot of Boat velocity [m/s] wrt. point of sail [deg]',fontsize=22)
#ax.xaxis.set_label_position('top')
fig.savefig('../../../Article/Figures/polar_modified2.pdf', bbox_inches='tight')
| 25.378378 | 85 | 0.690096 |
05705dae303e8a7ae7b9765283158fc78c1a5987 | 3,387 | py | Python | src/mcxlib/usage_examples.py | carlashley/meecxprofile | 1fe776b3f23dd9b224d87dd155cc1681cf13fb5e | [
"Apache-2.0"
] | 2 | 2021-09-10T12:52:43.000Z | 2021-09-10T15:38:29.000Z | src/mcxlib/usage_examples.py | carlashley/meecxprofile | 1fe776b3f23dd9b224d87dd155cc1681cf13fb5e | [
"Apache-2.0"
] | null | null | null | src/mcxlib/usage_examples.py | carlashley/meecxprofile | 1fe776b3f23dd9b224d87dd155cc1681cf13fb5e | [
"Apache-2.0"
] | null | null | null | from pprint import pformat
ds_obj_mcx_note = ('The MCX data returned back from \'dscl\' is a string nested in the attribute queried.\n'
'Settings can be filtered by using key filters.\n'
'Multiple values can be filtered for specific domains by comma seperating the values\n'
'Filter syntax examples:\n'
' - \'com.apple.MCX=\' will keep the preference domain \'com.apple.MCX\'.\n'
' - \'com.apple.MCX=com.apple.cachedaccounts.CreateAtLogin\' will keep the preference\n'
' domain value from the \'com.apple.MCX\' preference domain _specifically_.\n'
' - \'com.apple.MCX=com.apple.cachedaccounts.CreateAtLogin,com.apple.cachedaccounts.WarnOnCreate\'\n'
' will keep the two values for the \'com.apple.MCX\' preference domain.\n'
'Please note that filtering values is only done if the preference domain is also specified\n\n'
'In the example dictionary below:\n'
' - \'com.apple.MCX\' is referred to as the \'preference domain\'.\n'
' - \'com.apple.cachedaccounts.CreateAtLogin\' is referred to as the \'preference domain value\'.\n'
' This domain value should be taken from the \'mcx_preference_settings\' dictionary.\n\n')
ds_obj_mcx_dict_example = {'com.apple.MCX': {'Forced': [{'mcx_preference_settings': {'com.apple.cachedaccounts.CreateAtLogin': True,
'com.apple.cachedaccounts.CreatePHDAtLogin': False,
'com.apple.cachedaccounts.WarnOnCreate': False}}]},
'com.apple.dock': {'Forced': [{'mcx_preference_settings': {'AppItems-Raw': [],
'DocItems-Raw': [],
'contents-immutable': False,
'static-only': False},
'mcx_union_policy_keys': [{'mcx_input_key_names': ['AppItems-Raw'],
'mcx_output_key_name': 'static-apps',
'mcx_remove_duplicates': True},
{'mcx_input_key_names': ['DocItems-Raw'],
'mcx_output_key_name': 'static-others',
'mcx_remove_duplicates': True},
{'mcx_input_key_names': ['MCXDockSpecialFolders-Raw'],
'mcx_output_key_name': 'MCXDockSpecialFolders',
'mcx_remove_duplicates': True}]}]}}
ds_obj_mcx = f'{ds_obj_mcx_note}{pformat(ds_obj_mcx_dict_example)}'
| 91.540541 | 138 | 0.437851 |
057149c969c7c699e7d3de460f67852d23e83cd2 | 2,622 | py | Python | monitors/dns-monitor.py | CompeteNZ/Minotaur | 47afb2ed7bd9c21d1adf8cf4fd0d5396c80fd803 | [
"MIT"
] | null | null | null | monitors/dns-monitor.py | CompeteNZ/Minotaur | 47afb2ed7bd9c21d1adf8cf4fd0d5396c80fd803 | [
"MIT"
] | null | null | null | monitors/dns-monitor.py | CompeteNZ/Minotaur | 47afb2ed7bd9c21d1adf8cf4fd0d5396c80fd803 | [
"MIT"
] | null | null | null | # DESCRIPTION
# Run dns check and store the results in the db
# monitor_source = host address
# DEPENDENCIES
# Install python
# Install mysql.connector "python -m pip install mysql-connector-python"
# Install dotenv "python -m pip install python-dotenv"
# Install nslookup "python -m pip install nslookup"
# HOW TO RUN
# run cmd "python <script>"
# automate on windows using a bat file with command "python <script>" see batch folder for batch files
# automate on linux using cron with command "python <script>"
# TODO
#!/usr/bin/env python3
import os
import sys
import datetime
import mysql.connector
from nslookup import Nslookup
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
try:
conn = mysql.connector.connect(
user=os.getenv("DB_USERNAME"),
password=os.getenv("DB_PASSWORD"),
host=os.getenv("DB_HOST"),
port=int(os.getenv("DB_PORT")),
database=os.getenv("DB_DATABASE")
)
except mysql.connector.Error as err:
print(err)
sys.exit(1)
# get db connection cursor
cursor = conn.cursor()
# get list of ping monitors from the db
try:
sql = "SELECT monitor_id,monitor_type,monitor_source FROM monitors WHERE monitor_type=%s AND monitor_state=%s"
val = ('dns', 1)
cursor.execute(sql, val)
except mysql.connector.Error as err:
print(err)
sys.exit(1)
results = cursor.fetchall()
dns_query = Nslookup()
for (monitor_id, monitor_type, monitor_source) in results:
ips_record = dns_query.dns_lookup(monitor_source)
#print(ips_record.response_full, ips_record.answer)
if not ips_record.answer:
# host unknown (e.g. domain name lookup error)
# store result in the db as -1
try:
sql = "INSERT INTO monitor_results (monitor_id, monitor_type, monitor_source, monitor_result) VALUES (%s, %s, %s, %s)"
val = (monitor_id, monitor_type, monitor_source, -1)
cursor.execute(sql, val)
except mysql.connector.Error as err:
print(err)
continue
else:
# UPDATE - NOW NOT SAVING OK RESULTS ONLY ERRORS (saves on database etc)
# host found (e.g. resolved IP address)
# store result in the db
#try:
# sql = "INSERT INTO monitor_results (monitor_id, monitor_type, monitor_source, monitor_result) VALUES (%s, %s, %s, %s)"
# val = (monitor_id, monitor_type, monitor_source, 1)
# cursor.execute(sql, val)
#except mysql.connector.Error as err:
# print(err)
continue
# commit db transaction and close conection
conn.commit()
conn.close() | 30.137931 | 131 | 0.676583 |
0571570e4ea6cc0ac98e3e348473a3292c2d2151 | 797 | py | Python | program_param.py | duszek123/Example_Project | 72e65ce5f31774c250cf388dbfb0a6d2a6b3ffeb | [
"MIT"
] | null | null | null | program_param.py | duszek123/Example_Project | 72e65ce5f31774c250cf388dbfb0a6d2a6b3ffeb | [
"MIT"
] | null | null | null | program_param.py | duszek123/Example_Project | 72e65ce5f31774c250cf388dbfb0a6d2a6b3ffeb | [
"MIT"
] | null | null | null | import torch
import cv2
#data dir with train i validation picture
data_dir = '/home/pawel/Pulpit/picture_data'
#source video stream
camera_source = '/dev/video2'
#flag, false, not used
save = False
#input picture size (px)
input_size = (224,224)
size_pict = input_size[0]
#part of the data from the database intended for training
batch_size = 8
#numb of process core
num_workers = 4
#numb of train epoch
epoch_num = 2
#old variable not use
frame_iterator = 0
#flag, not use
flag_start = False
#use device in project - cpu or gpu(cuda)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#using video stream in project
video_stream = vid = cv2.VideoCapture(camera_source)
if not video_stream.isOpened():
raise ValueError("Unable to open video source", camera_source)
| 24.151515 | 71 | 0.756587 |
0572b494de8de54123140e45c9c69a2ed0fbad3b | 501 | py | Python | models/fields/__init__.py | hengwei-chan/3D_SBDD | eda6d51aaf01ef25581a46920a25161678fab76d | [
"MIT"
] | 67 | 2021-12-02T05:53:44.000Z | 2022-03-31T07:21:26.000Z | models/fields/__init__.py | hengwei-chan/3D_SBDD | eda6d51aaf01ef25581a46920a25161678fab76d | [
"MIT"
] | 13 | 2021-12-05T14:23:46.000Z | 2022-03-25T21:07:20.000Z | models/fields/__init__.py | hengwei-chan/3D_SBDD | eda6d51aaf01ef25581a46920a25161678fab76d | [
"MIT"
] | 16 | 2022-01-11T11:48:24.000Z | 2022-03-27T19:20:58.000Z | from .classifier import SpatialClassifier
| 31.3125 | 68 | 0.628743 |
0572d30a3c1b204b7741919022f74dedf09c6c6c | 1,693 | py | Python | get_data/__init__.py | BrunoASNascimento/inmet_api | ec663543b1f6a77900166df2e6bf64d1f26f910d | [
"MIT"
] | null | null | null | get_data/__init__.py | BrunoASNascimento/inmet_api | ec663543b1f6a77900166df2e6bf64d1f26f910d | [
"MIT"
] | null | null | null | get_data/__init__.py | BrunoASNascimento/inmet_api | ec663543b1f6a77900166df2e6bf64d1f26f910d | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
import requests
import pandas as pd
cleaner_data(get_data())
| 32.557692 | 151 | 0.559362 |
05757df9e7e0717b064bec504f59f7b4d4c67024 | 7,795 | py | Python | get_both_current_and_active_power.py | wed35/Two-dimensional-Images-of-Current-and-Active-Power-Signals-for-Elevator-Condition-Recognition | d8a01915f46457257bda7c699fe36e7bdf4f907d | [
"MIT"
] | null | null | null | get_both_current_and_active_power.py | wed35/Two-dimensional-Images-of-Current-and-Active-Power-Signals-for-Elevator-Condition-Recognition | d8a01915f46457257bda7c699fe36e7bdf4f907d | [
"MIT"
] | null | null | null | get_both_current_and_active_power.py | wed35/Two-dimensional-Images-of-Current-and-Active-Power-Signals-for-Elevator-Condition-Recognition | d8a01915f46457257bda7c699fe36e7bdf4f907d | [
"MIT"
] | null | null | null | #%%
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
import os
import time
%matplotlib inline
#%%
import pymysql
#%%
def colon_time(time_elements): # make time format as hh:mm:ss.ms
'''
PARAMETER => ['hh', 'mm', 'ss', 'ms']
'''
if time_elements[3]=='0':
return time_elements[0]+':'+time_elements[1]+':'+time_elements[2]
else:
return time_elements[0]+':'+time_elements[1]+':'+time_elements[2]+'.'+time_elements[3]
#%%
# get current
check_time = time.time()
start = '2019-08-09 12:03:00.000'
end = '2019-08-09 12:03:50.000'
df_cur = Select('HisItemCurr', start, end) # 600 data per minute
plt.plot(df_cur['DataSavedTime'], df_cur['Item005'])
plt.ylim(-10, 100)
plt.show()
#cur_result.to_csv('C:/Users/haeng/Desktop/test'+'.csv')
print('time duration = ', time.time()-check_time)
#%%
# get valid current values
check_time = time.time()
cur_date, cur_values = [], []
i = 1
temp_cnt = []
while i in range(1, len(df_cur['Item005'])):
if df_cur['Item005'][i-1]==0 and df_cur['Item005'][i]!=0:
cnt_zero = 0
temp_date = []
temp_values = []
temp_date.append(df_cur['DataSavedTime'][i-1])
temp_values.append(df_cur['Item005'][i-1])
j = i
while j in range(i, len(df_cur['Item005'])):
if df_cur['Item005'][j]!=0 and j+1<=len(df_cur['Item005']-1):
if df_cur['Item005'][j+1]==0:
cnt_zero += 1
else:
cnt_zero = 0
elif df_cur['Item005'][j]==0 and j+1<=len(df_cur['Item005']-1):
if df_cur['Item005'][j+1]!=0:
cnt_zero = 0
else:
cnt_zero += 1
if cnt_zero>41:
temp_cnt.append(cnt_zero)
cnt_zero = 0
break
temp_date.append(df_cur['DataSavedTime'][j])
temp_values.append(df_cur['Item005'][j])
j += 1
temp_date.append(df_cur['DataSavedTime'][j])
temp_values.append(df_cur['Item005'][j])
i = j
cur_date.append(temp_date)
cur_values.append(temp_values)
i += 1
for i in range(len(cur_date)):
del cur_date[i][len(cur_date[i])-40:]
del cur_values[i][len(cur_values[i])-40:]
print('time duration: ', time.time()-check_time)
#%%
# split current date
start_date, start_time, end_time = [], [], [] # hh:mm:ss.ms
start_time_bar, end_time_bar = [], [] # hh_mm_ss_ms
for i in range(len(cur_date)):
start_date.append(str(cur_date[i][0]).split()[0])
start_t = str(cur_date[i][0]).split()[1]
start_time.append(start_t)
start_time_bar.append(bar_time(start_t))
end_t = str(cur_date[i][len(cur_date[i])-1]).split()[1]
end_time.append(end_t)
end_time_bar.append(bar_time(end_t))
print(start_date)
print(start_time)
print(start_time_bar)
#%%
# set file name to save csv and png
file_names = []
for i in range(len(cur_date)):
file_name = start_date[i]+'_'+start_time_bar[i]
file_names.append(file_name)
print(file_names)
#%%
# save current csv and png
for i in range(len(cur_date)):
cur_start = start_date[i]+' '+start_time[i][:12]
cur_end = start_date[i]+' '+end_time[i][:12]
df_cur_save = Select('HisItemCurr', cur_start, cur_end)
df_cur_save.to_csv('./elevator_label/'+file_names[i]+'.csv')
plt.figure()
plt.plot(df_cur_save['DataSavedTime'], df_cur_save['Item005'])
plt.ylim(-10, 100)
plt.savefig('./elevator_label/'+file_names[i]+'.png')
plt.close()
#%%
# get active power by using time of current
# start_, end_ --> xx:xx:xx.xxx
df_act_dict = {}
for i in range(len(cur_date)):
# change start second by substracting 1
start_new = check_second([start_time_bar[i].split('_')[0], start_time_bar[i].split('_')[1],
start_time_bar[i].split('_')[2], start_time_bar[i].split('_')[3]], '-1')
s_temp = start_date[i]+' '+colon_time(start_new)
# change end second by adding 1
end_new = check_second([end_time_bar[i].split('_')[0], end_time_bar[i].split('_')[1],
end_time_bar[i].split('_')[2], end_time_bar[i].split('_')[3]], '+1')
e_temp = start_date[i]+' '+colon_time(end_new)
check_time = time.time()
df_act = Select('HisItemAct', s_temp, e_temp) # I don't know why this loop takes a long time in this part
df_act_dict[i] = df_act
plt.figure()
plt.plot(df_act['DataSavedTime'], df_act['Item005'])
plt.ylim(-10, 100)
plt.show()
print('time duration(plot) = ', time.time()-check_time)
#%%
# get real active power time
act_start_time, act_end_time = [], []
act_start_idx, act_end_idx = [], []
for z in range(len(cur_date)):
#print(df_act_dict[z].shape) # 261, 111
#df_act_dict[z].to_csv('./elevator_label/active_raw_test'+str(z)+'.csv')
for i in range(1, df_act_dict[z].shape[0]):
if df_act_dict[z]['Item005'][i-1]==0 and df_act_dict[z]['Item005'][i]!=0:
act_start_time.append(str(df_act_dict[z]['DataSavedTime'][i-1]).split()[1])
act_start_idx.append(i-1)
break
for i in range(df_act_dict[z].shape[0]-2, int(df_act_dict[z].shape[0]/2), -1):
if df_act_dict[z]['Item005'][i]!=0 and df_act_dict[z]['Item005'][i+1]==0:
act_end_time.append(str(df_act_dict[z]['DataSavedTime'][i+1]).split()[1])
act_end_idx.append(i+1)
break
print(act_start_idx)
print(act_start_time)
print(act_end_idx)
print(act_end_time)
#%%
# save active power csv and png
for i in range(len(cur_date)):
df_act_save = df_act_dict[i][act_start_idx[i]:act_end_idx[i]+1]
df_act_save.to_csv('./elevator_label/'+file_names[i]+'_active.csv')
plt.figure()
plt.plot(df_act_save['DataSavedTime'], df_act_save['Item005'])
plt.ylim(-10, 100)
plt.savefig('./elevator_label/'+file_names[i]+'_active.png')
plt.close()
#%%
| 29.194757 | 134 | 0.630276 |
057648a66341634f2bd91398e33248914e65d08f | 435 | py | Python | src/pynorare/cli_util.py | concepticon/pynorare | 3cf5ea2d1597c5acc84963f781ff49d96b4d7e02 | [
"MIT"
] | null | null | null | src/pynorare/cli_util.py | concepticon/pynorare | 3cf5ea2d1597c5acc84963f781ff49d96b4d7e02 | [
"MIT"
] | 5 | 2020-07-20T11:05:07.000Z | 2022-03-11T15:51:52.000Z | src/pynorare/cli_util.py | concepticon/pynorare | 3cf5ea2d1597c5acc84963f781ff49d96b4d7e02 | [
"MIT"
] | null | null | null | from pyconcepticon import Concepticon
from pynorare.dataset import get_dataset_cls
| 24.166667 | 81 | 0.691954 |
0576551dec71ed65de6452c0a4914168209bd3e8 | 2,987 | py | Python | braille/lang.py | galou/braille-converter | bf3b898c212a5067d61ce7dc6828df227ddd9db5 | [
"MIT"
] | 24 | 2015-04-03T10:24:18.000Z | 2022-01-29T10:50:34.000Z | braille/lang.py | galou/braille-converter | bf3b898c212a5067d61ce7dc6828df227ddd9db5 | [
"MIT"
] | 2 | 2016-03-28T04:10:14.000Z | 2017-02-22T23:25:12.000Z | braille/lang.py | galou/braille-converter | bf3b898c212a5067d61ce7dc6828df227ddd9db5 | [
"MIT"
] | 10 | 2015-05-06T06:26:21.000Z | 2019-11-13T23:11:11.000Z | # Copyright 2012 Jonathan Paugh
# See COPYING for license details
'''
Functions that deal with lang files or rulesets
'''
import ds
import comp as cpl
from .options import opt
from .util import fwarn, do_re, gettype
import os
langdir = os.path.join(os.path.dirname(__file__), 'lang')
if not os.path.isdir(langdir):
raise IOError('Cannot load lang files; unknown dir "%s"' % langdir)
#Cache of imported rulesets, indexed by lang name
ruleset = { }
def import_ruleset(lang='amer-2', comp=None, fresh=False):
'''
loads the rules for the given language
params:
-------
lang='amer-2' Language to load. Defaults to American Grade 2.
This consists of solely of alphanumeric characters and hyphens.
comp=True - Compile the ruleset to the most succint form (brl).
The default is set by commandline-argument.
fresh=False - Get a fresh version of the ruleset, from file, rather
than relying on the cache. Defaults False.
If you change the comp option (or change the lang file), you must
set this to True to see your changes.
'''
#Don't be grumpy about underscores.
lang = lang.replace('_', '-')
rules = []
#prefer cached version first
if not fresh and lang in ruleset:
return ruleset[lang]
#Set default comp
if comp == None:
comp = opt('comp')
#Import standard (international) rules first
if (not lang == 'standard' and
not 'standard' in ruleset):
import_ruleset('standard')
cxt = ds.Context()
cxt.fname = os.path.join(langdir, lang)
cxt.lineno = 0
try:
with open(cxt.fname) as lfile:
for line in lfile:
cxt.lineno += 1
rule = __parse_rule(cxt, line, comp)
if rule:
rules.append(rule)
except IOError as e:
raise
rules.sort(cmp=__cmp_rules)
# cache ruleset for this language
ruleset[lang] = rules
if not lang == 'standard':
rules.extend(ruleset['standard'])
return rules
def __parse_rule(cxt, line, comp=False):
'''
parse a string into a line tuple.
'''
line = line.strip()
if (not line) or line[0] == '#':
return None
rule = do_re(ds.patt.rule, line)
if not rule:
fwarn(cxt, 'Invalid Rule "%s"' % line)
return None
typ = rule['type'].lower()
rule['type'] = typ
if not typ in ds.types:
fwarn(cxt, 'Unknown rule type: '+typ)
return None
if not rule['priority']:
rule['priority'] = 1
#Compile the rule. (Convert it's brl to minimum form)
fun = gettype(rule, 'comp')
if comp or fun == cpl.dotify or fun == cpl.prefix:
fun(cxt, rule)
else:
#The minimum we must do is dotify any dots
cpl.try_dotify(cxt, rule)
return rule
def __cmp_rules(x, y):
'''
cmp function for the rules.
'''
if gettype(x, 'order') < gettype(y, 'order'):
return -1
elif gettype(x, 'order') > gettype(y, 'order'):
return 1
elif x['priority'] < y['priority']:
return -1
elif x['priority'] > y['priority']:
return 1
else:
# Longer strings first
return -1 * cmp(len(x['prn']), len(y['prn']))
| 23.706349 | 69 | 0.657181 |
057756ea7512bea24b4425c570ad661d5b1d078c | 118 | py | Python | Codeforces/B_Simple_Game.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | Codeforces/B_Simple_Game.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | Codeforces/B_Simple_Game.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | n,m=map(int,input().split())
mid=n//2
if n==1 and m==1:
print("1")
elif mid<m:
print(m-1)
else:
print(m+1) | 14.75 | 28 | 0.542373 |
057a549b59e9c893c4abd50247ba001cdab7fac2 | 966 | py | Python | toughradius/tests/test_base.py | geosson/GSRadius | 5870e3d055e8366f98b8e65220a1520b5da22f6d | [
"Apache-2.0"
] | 1 | 2019-05-12T15:06:58.000Z | 2019-05-12T15:06:58.000Z | toughradius/tests/test_base.py | geosson/GSRadius | 5870e3d055e8366f98b8e65220a1520b5da22f6d | [
"Apache-2.0"
] | null | null | null | toughradius/tests/test_base.py | geosson/GSRadius | 5870e3d055e8366f98b8e65220a1520b5da22f6d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#coding:utf-8
from toughlib import config as iconfig
import os
import requests | 27.6 | 95 | 0.575569 |
057b75bb649e28b716661271413ac2187e4d17f1 | 48 | py | Python | game/data/components/__init__.py | UnidayStudio/Easy-2D-Game-Engine | 1a8501cba538d7542b0e24bf64eead388085480f | [
"MIT"
] | 8 | 2019-12-15T22:32:30.000Z | 2021-06-14T07:38:51.000Z | game/data/components/__init__.py | UnidayStudio/Easy-2D-Game-Engine | 1a8501cba538d7542b0e24bf64eead388085480f | [
"MIT"
] | null | null | null | game/data/components/__init__.py | UnidayStudio/Easy-2D-Game-Engine | 1a8501cba538d7542b0e24bf64eead388085480f | [
"MIT"
] | 2 | 2020-09-10T17:34:23.000Z | 2021-03-11T09:26:26.000Z | from game.data.components.TestComponent import * | 48 | 48 | 0.854167 |
057bdb050500b53da7e385ff2282c3ebb232fe64 | 121 | py | Python | hcaptcha/__init__.py | yunusbyrak/py-hcaptcha | f429bfaba7619c2ac255ae101423d72c2866aa09 | [
"MIT"
] | 1 | 2022-01-09T23:49:03.000Z | 2022-01-09T23:49:03.000Z | hcaptcha/__init__.py | bryonpokemon/py-hcaptcha | 92f723c8f5180d921731d7d04deb637099514a2e | [
"MIT"
] | null | null | null | hcaptcha/__init__.py | bryonpokemon/py-hcaptcha | 92f723c8f5180d921731d7d04deb637099514a2e | [
"MIT"
] | 1 | 2022-01-09T23:49:03.000Z | 2022-01-09T23:49:03.000Z | from .challenges import Challenge
from .solvers import Solver
from .agents import random_agent
from .exceptions import *
| 24.2 | 33 | 0.826446 |
057c321a1c38497a94f1e9f85d9de7c4b624cddb | 10,869 | py | Python | mscode/xp/general_comparison.py | cohenjer/mscode | e761c4af0227c386bdc7d22a55a2218486faf708 | [
"MIT"
] | null | null | null | mscode/xp/general_comparison.py | cohenjer/mscode | e761c4af0227c386bdc7d22a55a2218486faf708 | [
"MIT"
] | null | null | null | mscode/xp/general_comparison.py | cohenjer/mscode | e761c4af0227c386bdc7d22a55a2218486faf708 | [
"MIT"
] | null | null | null | # recovery (and error) vs noise for all algorithms
# recovery (and error) vs condB for all algorithms
# recovery vs (k,d) for all algorithms (heatmap)
# todo: also condD?
# Questions:
# - test two distributions for X: Gaussian, and decreasing
# - to choose lambda(s), we fix according to average best one from a set of experiments using the same settings on the fly. The grid is very coarse. In practice, use cross-validation.
# - We initialize with 1 zero init, cf init tests for more details
# Reasonable dimensions for reasonable runtime
import numpy as np
from matplotlib import pyplot as plt
from itertools import combinations, product
import shelve
import pandas as pd
from mscode.utils.utils import count_support, redundance_count, find_lambda
from mscode.methods.algorithms import iht_mix, homp, omp, ls_kn_supp, pseudo_trick, brute_trick, ista_mix, ista, admm_mix
from mscode.utils.generator import gen_mix, initialize
import plotly.express as px
# Random seeding
np.random.seed(seed=0)
# Problem parameters
k = 5 #2
r = 6 #2
n = 50 #10
m = 50 #20
d = 100 #50
#noise = 0.03 # 0.03
SNR = 20 # dB
cond = 2*1e2
distr = 'Uniform'
tol = 1e-6
# We run the tests several times since performances are very problem-dependent
Nbdata = 50
# Recovery and error versus noise
grid_SNR = [1000, 100, 50, 40, 30, 20, 15, 10, 5, 2, 0] #[40, 20]
grid_lambda = [1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
# Store results in Pandas DataFrame
store_pd = pd.DataFrame(columns=["xp", "value", "algorithm", "error type", "SNR", "lambda", "k", "r", "d", "m", "n", "cond"])
for SNR in grid_SNR:
print('SNR', SNR, 'dB')
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_m = []
for iter in range(3):
store_lamb_m.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_m'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
lamb = np.median(store_lamb)
lamb_m = np.median(store_lamb_m)
print('lambda ratio is', lamb, 'and for mixed', lamb_m)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr = distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Running algorithms
X_istam, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
# Storing results
dic={
'xp':10*['XP1'],
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)]+
[np.linalg.norm(X - X_istam), np.linalg.norm(X - X_ista), np.linalg.norm(X - X_homp), np.linalg.norm(X - X_iht), np.linalg.norm(X - X_trick)],
'algorithm': 2*['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"error type": 5*['support recovery']+5*['reconstruction error'],
"SNR":10*[SNR], "lambda":2*[lamb, lamb_m,0,0,0],
"k":10*[k], "r":10*[r], "d":10*[d], "m":10*[m], "n":10*[n], "cond":10*[condB],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
## Recovery and error versus conditionning
SNR = 20
grid_cond = [1, 10, 50, 100, 5*1e2, 1e3, 5*1e3, 1e4, 5*1e4, 1e5]
for cond in grid_cond:
print('cond', cond)
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_m = []
for iter in range(3):
store_lamb_m.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_m'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
lamb = np.median(store_lamb)
lamb_m = np.median(store_lamb_m)
print('lambda ratio is', lamb, 'and for mixed', lamb_m)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr=distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Running algorithms
X_istam, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
dic={
'xp':10*['XP2'],
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)]+
[np.linalg.norm(X - X_istam), np.linalg.norm(X - X_ista), np.linalg.norm(X - X_homp), np.linalg.norm(X - X_iht), np.linalg.norm(X - X_trick)],
'algorithm': 2*['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"error type": 5*['support recovery']+5*['reconstruction error'],
"SNR":10*[SNR], "lambda":2*[lamb, lamb_m,0,0,0],
"k":10*[k], "r":10*[r], "d":10*[d], "m":10*[m], "n":10*[n], "cond":10*[np.round(condB,3)],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
## Recovery and error versus (k,d)
cond = 5*1e2
grid_k = [1, 2, 5, 10, 20]
grid_d = [20, 50, 100, 200, 400]
for d in grid_d:
for k in grid_k:
print('(d,k) is', d, k)
# run 3 checks for lambda, to find a reasonable value
store_lamb = []
store_lamb_m = []
for iter in range(3):
store_lamb_m.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista_m'))
store_lamb.append(find_lambda((m,n,d,k,r,SNR,cond), grid_lambda, 'Fista'))
lamb = np.median(store_lamb)
lamb_m = np.median(store_lamb_m)
print('lambda ratio is', lamb, 'and for mixed', lamb_m)
for j in range(Nbdata):
# Generate data
Y, Ytrue, D, B, X, S, sig, condB = gen_mix([m, n, d, r], k, snr=SNR, cond=cond, distr=distr)
# The default zero init
X0 = initialize([d,r], distr = 'Zeros')
# Running algorithms
X_istam, _, err_ista_m, S_ista_m = ista_mix(Y, D, B, lamb_m, k=k, X0=X0, verbose=False, tol=tol)
X_ista, _, err_ista, S_ista = ista(Y, D, B, lamb, k=k, X0=X0, verbose=False, tol=tol)
X_homp, err_homp, S_homp = homp(Y, D, B, k, X0, tol=tol)
X_iht, err_iht, S_iht = iht_mix(Y, D, B, k, X0, tol=tol)
X_trick, err_trick, S_trick = pseudo_trick(Y, D, B, k)
# Storing results
dic={
'xp':10*['XP3'],
'value':[count_support(S, S_ista_m), count_support(S, S_ista), count_support(S, S_homp), count_support(S, S_iht), count_support(S, S_trick)]+
[np.linalg.norm(X - X_istam), np.linalg.norm(X - X_ista), np.linalg.norm(X - X_homp), np.linalg.norm(X - X_iht), np.linalg.norm(X - X_trick)],
'algorithm': 2*['Mixed-FISTA', 'Block-FISTA', 'HOMP', 'IHT', 'TrickOMP'],
"error type": 5*['support recovery']+5*['reconstruction error'],
"SNR":10*[SNR], "lambda":2*[lamb, lamb_m,0,0,0],
"k":10*[k], "r":10*[r], "d":10*[d], "m":10*[m], "n":10*[n], "cond":10*[condB],
}
store_pd = store_pd.append(pd.DataFrame(dic), ignore_index=True)
df1 = store_pd[store_pd.xp=='XP1']
df2 = store_pd[store_pd.xp=='XP2']
df3 = store_pd[store_pd.xp=='XP3']
fig = px.box(df1[df1['error type']=='support recovery'], x='SNR', y='value', facet_col='algorithm', color='algorithm', title="Support recovery versus SNR", labels={'value':'Support recovery'})
fig.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig.update_xaxes(type='category')
fig.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=400,
yaxis=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis2=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis3=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis4=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis5=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
showlegend=False,
)
fig.show()
fig2 = px.box(df2[df2['error type']=='support recovery'], x='cond', y='value', color='algorithm', facet_col='algorithm', title="Support recovery versus conditionning of B", labels={'value':'Support recovery'})
fig2.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig2.update_xaxes(type='category')
fig2.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=400,
yaxis=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis2=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis3=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis4=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
yaxis5=dict(zeroline=False, gridcolor='rgb(233,233,233)'),
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
showlegend=False,
)
fig2.show()
# Normalizing the support recovery scores
fig3=px.density_heatmap(df3[df3['error type']=='support recovery'], x='d', y='k', z='value', facet_col='algorithm', color_continuous_scale='Viridis', histfunc="avg", labels={'value':'Support recovery'}, title='Recovery for varying sparsity and dictionary size')
fig3.for_each_annotation(lambda a: a.update(text=a.text.split("=")[-1]))
fig3.update_xaxes(type='category')
fig3.update_yaxes(type='category')
fig3.update_layout(
font_family="HelveticaBold",
font_size=15,
autosize=False,
width=1000,
height=310,
paper_bgcolor="white",#'rgb(233,233,233)',
plot_bgcolor="white",#'rgb(233,233,233)',
)
fig3.show()
year = 2021
month = 10
day = 20
path = '../..'
stor_name = '{}-{}-{}'.format(year,month,day)
#store_pd.to_pickle('{}/data/XP1/{}_results'.format(path,stor_name))
#fig.write_image('{}/data/XP1/{}_plot1.pdf'.format(path,stor_name))
#fig2.write_image('{}/data/XP1/{}_plot2.pdf'.format(path,stor_name))
#fig3.write_image('{}/data/XP1/{}_plot3.pdf'.format(path,stor_name))
# for Frontiers export
#fig.write_image('{}/data/XP1/{}_plot1.jpg'.format(path,stor_name))
#fig2.write_image('{}/data/XP1/{}_plot2.jpg'.format(path,stor_name))
#fig3.write_image('{}/data/XP1/{}_plot3.jpg'.format(path,stor_name))
# to load data
#store_pd = pd.read_pickle('{}/data/XP1/{}_results'.format(path,stor_name))
| 44.004049 | 261 | 0.631153 |
057c9190ccad439b376e3bce3f11d837eb5a4576 | 42 | py | Python | tests/test_modules/simple_test_package/aa.py | ajylee/call_map | 21e7684b0814eae6f16cd4bc75597dc4e9239ec0 | [
"BSD-2-Clause"
] | 20 | 2017-12-24T00:19:15.000Z | 2021-11-15T07:42:25.000Z | tests/test_modules/simple_test_package/aa.py | ajylee/call_map | 21e7684b0814eae6f16cd4bc75597dc4e9239ec0 | [
"BSD-2-Clause"
] | 1 | 2017-10-22T21:03:41.000Z | 2017-12-24T04:26:22.000Z | tests/test_modules/simple_test_package/aa.py | ajylee/call_map | 21e7684b0814eae6f16cd4bc75597dc4e9239ec0 | [
"BSD-2-Clause"
] | 2 | 2017-11-04T10:06:59.000Z | 2019-08-01T22:24:49.000Z | from . import bb
| 8.4 | 16 | 0.547619 |
057cd72af1308e0a81b1f8fd12ba9d1678f47b2d | 1,262 | py | Python | tests/fixtures.py | GustavoKatel/pushbullet-cli | e5102772752a97db539594b0d50b5effb36a22e2 | [
"MIT"
] | 176 | 2017-01-30T16:21:48.000Z | 2022-02-10T05:32:57.000Z | tests/fixtures.py | GustavoKatel/pushbullet-cli | e5102772752a97db539594b0d50b5effb36a22e2 | [
"MIT"
] | 49 | 2017-01-21T20:27:03.000Z | 2022-01-16T02:57:51.000Z | tests/fixtures.py | GustavoKatel/pushbullet-cli | e5102772752a97db539594b0d50b5effb36a22e2 | [
"MIT"
] | 21 | 2017-01-26T06:08:54.000Z | 2022-01-04T19:53:25.000Z | import click
import pytest
from click.testing import CliRunner
def wrap_runner_func(runner, func):
return invoke
| 21.033333 | 61 | 0.723455 |
057dcb0e3d38cc7460f6b046f1c4949c4d391cb9 | 2,478 | py | Python | sktime/transformations/hierarchical/tests/test_aggregate.py | biologioholic/sktime | 9d0391a04b11d22bd783b452f01aa5b4529b41a2 | [
"BSD-3-Clause"
] | 1 | 2021-12-22T02:45:39.000Z | 2021-12-22T02:45:39.000Z | sktime/transformations/hierarchical/tests/test_aggregate.py | biologioholic/sktime | 9d0391a04b11d22bd783b452f01aa5b4529b41a2 | [
"BSD-3-Clause"
] | null | null | null | sktime/transformations/hierarchical/tests/test_aggregate.py | biologioholic/sktime | 9d0391a04b11d22bd783b452f01aa5b4529b41a2 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
"""Tests for hierarchical aggregator."""
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["ciaran-g"]
import pytest
from sktime.transformations.hierarchical.aggregate import Aggregator
from sktime.utils._testing.hierarchical import _bottom_hier_datagen
# test for equal output with with named/unnamed indexes
# test that flatten_single_levels works as expected
def test_aggregator_flatten():
"""Tests Aggregator flattening single levels.
This tests that the flatten_single_levels argument works as expected for a
fixed example of a complicated hierarchy.
"""
agg = Aggregator(flatten_single_levels=False)
agg_flat = Aggregator(flatten_single_levels=True)
X = _bottom_hier_datagen(
no_bottom_nodes=10,
no_levels=4,
random_seed=111,
)
# aggregate without flattening
X_agg = agg.fit_transform(X)
# aggregate with flattening
X_agg_flat = agg_flat.fit_transform(X)
msg = (
"Aggregator without flattening should have 21 unique levels, "
"with the time index removed, for random_seed=111."
)
assert len(X_agg.droplevel(-1).index.unique()) == 21, msg
msg = (
"Aggregator with flattening should have 17 unique levels, "
"with the time index removed, for random_seed=111."
)
assert len(X_agg_flat.droplevel(-1).index.unique()) == 17, msg
| 33.486486 | 80 | 0.717514 |
057e82bc7eee8bfd854f64e90c47dfe5089a763d | 563 | py | Python | doni/tests/unit/api/test_availability_window.py | ChameleonCloud/doni | e280a0fddf4ee7d2abb69ceed49a9728e88cf99b | [
"Apache-2.0"
] | null | null | null | doni/tests/unit/api/test_availability_window.py | ChameleonCloud/doni | e280a0fddf4ee7d2abb69ceed49a9728e88cf99b | [
"Apache-2.0"
] | 49 | 2021-03-16T14:58:18.000Z | 2022-03-14T22:06:36.000Z | doni/tests/unit/api/test_availability_window.py | ChameleonCloud/doni | e280a0fddf4ee7d2abb69ceed49a9728e88cf99b | [
"Apache-2.0"
] | null | null | null | from flask.testing import FlaskClient
from doni.tests.unit import utils
| 29.631579 | 82 | 0.708703 |
057ec8e5e224d55258d512334e2a91039899ab2c | 747 | py | Python | src/genui/generators/serializers.py | Tontolda/genui | c5b7da7c5a99fc16d34878e2170145ac7c8e31c4 | [
"0BSD"
] | 15 | 2021-05-31T13:39:17.000Z | 2022-03-30T12:04:14.000Z | src/genui/generators/serializers.py | martin-sicho/genui | ea7f1272030a13e8e253a7a9b6479ac6a78552d3 | [
"MIT"
] | 3 | 2021-04-08T22:02:22.000Z | 2022-03-16T09:10:20.000Z | src/genui/generators/serializers.py | Tontolda/genui | c5b7da7c5a99fc16d34878e2170145ac7c8e31c4 | [
"0BSD"
] | 5 | 2021-03-04T11:00:54.000Z | 2021-12-18T22:59:22.000Z | """
serializers
Created by: Martin Sicho
On: 27-01-20, 17:00
"""
from rest_framework import serializers
from genui.utils.serializers import GenericModelSerializerMixIn
from genui.compounds.serializers import MolSetSerializer
from genui.projects.serializers import ProjectSerializer
from . import models
| 29.88 | 96 | 0.781794 |
057f8e845bc31c86789aa18cb713245d93a393bc | 5,898 | py | Python | cccbr_methods/models.py | lelandpaul/cccbr_methods | 8fce303d7d7fd178f1b371389a4cc318852e392a | [
"MIT"
] | null | null | null | cccbr_methods/models.py | lelandpaul/cccbr_methods | 8fce303d7d7fd178f1b371389a4cc318852e392a | [
"MIT"
] | 1 | 2021-12-13T20:44:46.000Z | 2021-12-13T20:44:46.000Z | cccbr_methods/models.py | lelandpaul/cccbr_methods | 8fce303d7d7fd178f1b371389a4cc318852e392a | [
"MIT"
] | null | null | null | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from datetime import datetime, timedelta
from sqlalchemy import Table, Column, Integer, String, Date, ForeignKey, Boolean
from sqlalchemy.orm import relationship, backref
from sqlalchemy.orm import sessionmaker
from re import sub
import os
module_path = '/'.join(__file__.split('/')[:-1])
# SQLAlchemy Setup
Base = declarative_base()
engine = create_engine('sqlite:///{}/data/methods.db?check_same_thread=False'.format(module_path))
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
class Performance(Base):
__tablename__ = 'performances'
id = Column(Integer, primary_key=True, autoincrement=True) # id
kind = Column(String(32)) # method.performances.KIND
date = Column(Date) # PERF.date
society = Column(String(32)) # PERF.society
town = Column(String(32)) # PERF.location.town
county = Column(String(32)) # PERF.location.county
building = Column(String(32)) # PERF.location.building
address = Column(String(32)) # PERF.location.address
country = Column(String(32)) # PERF.location.country
room = Column(String(32)) # PERF.location.room
region = Column(String(32)) # PERF.location.region
method_id_fk = Column(Integer, ForeignKey('methods.id'))
method = relationship("Method", back_populates="performances")
| 45.369231 | 106 | 0.590709 |
057fec44c986714a8f02d47b39f9f891463a6252 | 848 | py | Python | peuler_012_better.py | bayramcicek/mini-programs | 3f876e3274b7beeb5e7413ac9c5275813d9f0d2d | [
"Unlicense"
] | null | null | null | peuler_012_better.py | bayramcicek/mini-programs | 3f876e3274b7beeb5e7413ac9c5275813d9f0d2d | [
"Unlicense"
] | null | null | null | peuler_012_better.py | bayramcicek/mini-programs | 3f876e3274b7beeb5e7413ac9c5275813d9f0d2d | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
import math
test = Solution
triangle_arr = [0]
temp, box, curr_num = 0, 0, 0
for i in range(1, 1001):
while temp <= i:
box += 1
curr_num = (box * (box + 1)) / 2
temp = test.number_of_factor(curr_num)
triangle_arr.append(curr_num)
print(curr_num)
# number_test = int(input())
#
# limit_list = []
# for a in range(number_test):
# limit_list.append(int(input()))
#
# for limit in limit_list:
# print(int(triangle_arr[limit]))
| 20.190476 | 69 | 0.5625 |
05803580ad5cf536a86b26fbe2b79573b774b99b | 9,253 | py | Python | swyft/plot/plot.py | undark-lab/swyft | 50aa524e2f3a2b3d1354543178ff72bc7f055a35 | [
"MIT"
] | 104 | 2020-11-26T09:46:03.000Z | 2022-03-18T06:22:03.000Z | swyft/plot/plot.py | cweniger/swyft | 2c0ed514622a37e8ec4e406b99a8327ecafb7ab4 | [
"MIT"
] | 83 | 2021-03-02T15:54:26.000Z | 2022-03-10T08:09:05.000Z | swyft/plot/plot.py | undark-lab/swyft | 50aa524e2f3a2b3d1354543178ff72bc7f055a35 | [
"MIT"
] | 10 | 2021-02-04T14:27:36.000Z | 2022-03-31T17:39:34.000Z | import numpy as np
import pylab as plt
from scipy.integrate import simps
def plot_1d(
samples,
pois,
truth=None,
bins=100,
figsize=(15, 10),
color="k",
labels=None,
label_args={},
ncol=None,
subplots_kwargs={},
fig=None,
contours=True,
) -> None:
"""Make beautiful 1-dim posteriors.
Args:
samples: Samples from `swyft.Posteriors.sample`
pois: List of parameters of interest
truth: Ground truth vector
bins: Number of bins used for histograms.
figsize: Size of figure
color: Color
labels: Custom labels (default is parameter names)
label_args: Custom label arguments
ncol: Number of panel columns
subplot_kwargs: Subplot kwargs
"""
grid_interpolate = False
diags = {}
if ncol is None:
ncol = len(pois)
K = len(pois)
nrow = (K - 1) // ncol + 1
if fig is None:
fig, axes = plt.subplots(nrow, ncol, figsize=figsize, **subplots_kwargs)
else:
axes = fig.get_axes()
lb = 0.125
tr = 0.9
whspace = 0.15
fig.subplots_adjust(
left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace
)
if labels is None:
labels = [samples["parameter_names"][pois[i]] for i in range(K)]
for k in range(K):
if nrow == 1 and ncol > 1:
ax = axes[k]
elif nrow == 1 and ncol == 1:
ax = axes
else:
i, j = k % ncol, k // ncol
ax = axes[j, i]
ret = plot_posterior(
samples,
pois[k],
ax=ax,
grid_interpolate=grid_interpolate,
color=color,
bins=bins,
contours=contours,
)
ax.set_xlabel(labels[k], **label_args)
if truth is not None:
ax.axvline(truth[pois[k]], ls=":", color="r")
diags[(pois[k],)] = ret
return fig, diags
def plot_corner(
samples,
pois,
bins=100,
truth=None,
figsize=(10, 10),
color="k",
labels=None,
label_args={},
contours_1d: bool = True,
fig=None,
) -> None:
"""Make a beautiful corner plot.
Args:
samples: Samples from `swyft.Posteriors.sample`
pois: List of parameters of interest
truth: Ground truth vector
bins: Number of bins used for histograms.
figsize: Size of figure
color: Color
labels: Custom labels (default is parameter names)
label_args: Custom label arguments
contours_1d: Plot 1-dim contours
fig: Figure instance
"""
K = len(pois)
if fig is None:
fig, axes = plt.subplots(K, K, figsize=figsize)
else:
axes = np.array(fig.get_axes()).reshape((K, K))
lb = 0.125
tr = 0.9
whspace = 0.1
fig.subplots_adjust(
left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace
)
diagnostics = {}
if labels is None:
labels = [samples["parameter_names"][pois[i]] for i in range(K)]
for i in range(K):
for j in range(K):
ax = axes[i, j]
# Switch off upper left triangle
if i < j:
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.set_frame_on(False)
continue
# Formatting labels
if j > 0 or i == 0:
ax.set_yticklabels([])
# ax.set_yticks([])
if i < K - 1:
ax.set_xticklabels([])
# ax.set_xticks([])
if i == K - 1:
ax.set_xlabel(labels[j], **label_args)
if j == 0 and i > 0:
ax.set_ylabel(labels[i], **label_args)
# Set limits
# ax.set_xlim(x_lims[j])
# if i != j:
# ax.set_ylim(y_lims[i])
# 2-dim plots
if j < i:
ret = plot_posterior(
samples, [pois[j], pois[i]], ax=ax, color=color, bins=bins
)
if truth is not None:
ax.axvline(truth[pois[j]], color="r")
ax.axhline(truth[pois[i]], color="r")
diagnostics[(pois[j], pois[i])] = ret
if j == i:
ret = plot_posterior(
samples,
pois[i],
ax=ax,
color=color,
bins=bins,
contours=contours_1d,
)
if truth is not None:
ax.axvline(truth[pois[i]], ls=":", color="r")
diagnostics[(pois[i],)] = ret
return fig, diagnostics
if __name__ == "__main__":
pass
| 29.848387 | 88 | 0.514428 |
05819bbe1c0902e6600dadc33453e92046d7a1ff | 3,038 | py | Python | control-gastos/python/main.py | manuelduarte077/Ejercicios-con-Python-NodeJS | d7b26fdeeb1640272847274b99b2f607145d58a4 | [
"MIT"
] | 1 | 2021-07-13T18:43:59.000Z | 2021-07-13T18:43:59.000Z | control-gastos/python/main.py | manuelduarte077/Ejercicios-con-Python-NodeJS | d7b26fdeeb1640272847274b99b2f607145d58a4 | [
"MIT"
] | null | null | null | control-gastos/python/main.py | manuelduarte077/Ejercicios-con-Python-NodeJS | d7b26fdeeb1640272847274b99b2f607145d58a4 | [
"MIT"
] | null | null | null | import os
from tabulate import tabulate
import requests
iniciar()
| 31 | 79 | 0.600066 |
05826df3789ad47bc005b4bcd34765514c7e2fd2 | 409 | py | Python | examples/idioms/programs/016.1530-depth-first-traversing-of-a-binary-tree.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 31 | 2020-05-02T13:34:26.000Z | 2021-06-06T17:25:52.000Z | examples/idioms/programs/016.1530-depth-first-traversing-of-a-binary-tree.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 108 | 2019-11-18T19:41:52.000Z | 2022-03-18T13:58:17.000Z | examples/idioms/programs/016.1530-depth-first-traversing-of-a-binary-tree.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 4 | 2020-05-19T08:57:44.000Z | 2020-09-21T08:53:46.000Z | """Depth-first traversing of a binary tree.
Call a function _f on every node of binary tree _bt, in depth-first infix order
Source: programming-idioms.org
"""
# Implementation author: TinyFawks
# Created on 2016-02-18T08:50:27.130406Z
# Last modified on 2016-02-18T09:16:52.625429Z
# Version 2
# Recursive DFS.
| 18.590909 | 79 | 0.694377 |
0582a1028ca60869856e20d167bdffc0aa95e128 | 412 | py | Python | pal95_doc/docs/__init__.py | MacHu-GWU/pal95_doc-project | 753b865435f316e985320247489e68f465741827 | [
"MIT"
] | 13 | 2019-10-01T02:51:27.000Z | 2022-02-28T17:38:58.000Z | pal95_doc/docs/__init__.py | MacHu-GWU/pal95_doc-project | 753b865435f316e985320247489e68f465741827 | [
"MIT"
] | 2 | 2020-11-09T09:17:21.000Z | 2021-04-27T21:20:59.000Z | pal95_doc/docs/__init__.py | MacHu-GWU/pal95_doc-project | 753b865435f316e985320247489e68f465741827 | [
"MIT"
] | 1 | 2020-02-28T12:05:22.000Z | 2020-02-28T12:05:22.000Z | # -*- coding: utf-8 -*-
from .equipment import lt_equipment
from .spell import lt_spell_lxy, lt_spell_zle, lt_spell_lyr, lt_spell_an
from .monster import lt_monster
from .zone import lt_zone
doc_data = dict(
lt_equipment=lt_equipment,
lt_spell_lxy=lt_spell_lxy,
lt_spell_zle=lt_spell_zle,
lt_spell_lyr=lt_spell_lyr,
lt_spell_an=lt_spell_an,
lt_monster=lt_monster,
lt_zone=lt_zone,
) | 25.75 | 72 | 0.764563 |
0582c3422fbd8d71835125e19cb23d6667d70ef1 | 3,157 | py | Python | nexrad/nexrad_tutorial.py | uva-hydroinformatics-lab/precipitation_processing | 54ef1673900b6bb2ee38daec3aac33748a8402cd | [
"MIT"
] | 1 | 2019-01-08T03:57:49.000Z | 2019-01-08T03:57:49.000Z | nexrad/nexrad_tutorial.py | uva-hydroinformatics/precipitation_processing | 54ef1673900b6bb2ee38daec3aac33748a8402cd | [
"MIT"
] | null | null | null | nexrad/nexrad_tutorial.py | uva-hydroinformatics/precipitation_processing | 54ef1673900b6bb2ee38daec3aac33748a8402cd | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy.ma as ma
import numpy as np
import pyart.graph
import tempfile
import pyart.io
import boto
s3conn = boto.connect_s3("AKIAISFFH4JXWC2HYFSA","9Az+XWYP9cbL3Sh641z/tbMuC1CSpjPjQTFkHj8D")
bucket = s3conn.get_bucket('noaa-nexrad-level2')
s3key = bucket.get_key('2015/05/15/KVWX/KVWX20150515_080737_V06.gz')
print s3key
#localfile = tempfile.NamedTemporaryFile(mode='r')
localfile = open("sample_nexrad_data", "w")
s3key.get_contents_to_filename(localfile.name)
radar = pyart.io.read_nexrad_archive(localfile.name)
# display the lowest elevation scan data
display = pyart.graph.RadarDisplay(radar)
fig = plt.figure(figsize=(9, 12))
plots = [
# variable-name in pyart, display-name that we want, sweep-number of radar (0=lowest ref, 1=lowest velocity)
['reflectivity', 'Reflectivity (dBZ)', 0],
['differential_reflectivity', 'Zdr (dB)', 0],
['differential_phase', 'Phi_DP (deg)', 0],
['cross_correlation_ratio', 'Rho_HV', 0],
['velocity', 'Velocity (m/s)', 1],
['spectrum_width', 'Spectrum Width', 1]
]
plot_radar_images(plots)
refl_grid = radar.get_field(0, 'reflectivity')
print refl_grid[0]
rhohv_grid = radar.get_field(0, 'cross_correlation_ratio')
zdr_grid = radar.get_field(0, 'differential_reflectivity')
# apply rudimentary quality control
reflow = np.less(refl_grid, 20)
zdrhigh = np.greater(np.abs(zdr_grid), 2.3)
rhohvlow = np.less(rhohv_grid, 0.95)
notweather = np.logical_or(reflow, np.logical_or(zdrhigh, rhohvlow))
print notweather[0]
qcrefl_grid = ma.masked_where(notweather, refl_grid)
print qcrefl_grid[0]
qced = radar.extract_sweeps([0])
qced.add_field_like('reflectivity', 'reflectivityqc', qcrefl_grid)
display = pyart.graph.RadarDisplay(qced)
fig = plt.figure(figsize=(10, 5))
plots = [
# variable-name in pyart, display-name that we want, sweep-number of radar (0=lowest ref, 1=lowest velocity)
['reflectivity', 'Reflectivity (dBZ)', 0],
['reflectivityqc', 'QCed Reflectivity (dBZ)', 0],
]
for plotno, plot in enumerate(plots, start=1):
ax = fig.add_subplot(1, 2, plotno)
display.plot(plot[0], plot[2], ax=ax, title=plot[1],
colorbar_label='',
axislabels=('East-West distance from radar (km)' if plotno == 2 else '',
'North-South distance from radar (km)' if plotno == 1 else ''))
display.set_limits((-300, 300), (-300, 300), ax=ax)
plt.show()
| 36.287356 | 113 | 0.667089 |
05830297f5e87cadfedcaa83499c7c9b2affb118 | 3,746 | py | Python | ServeRest-APITesting-Python/Tests/test_cart.py | barbosamp/automacao-api-rest-jornada-learning | 9ceb57bc6f4d845c35a149d760775c10c3a38614 | [
"MIT"
] | 2 | 2020-11-20T18:40:32.000Z | 2021-04-20T23:13:13.000Z | ServeRest-APITesting-Python/Tests/test_cart.py | barbosamp/automacao-api-rest-jornada-learning | 9ceb57bc6f4d845c35a149d760775c10c3a38614 | [
"MIT"
] | 1 | 2020-10-22T16:16:40.000Z | 2020-10-22T16:16:40.000Z | ServeRest-APITesting-Python/Tests/test_cart.py | kpedron/automacao-api-rest-jornada-learning | 50ceaf9f43b03383cc65e92460b6b9a398a88e02 | [
"MIT"
] | 2 | 2020-10-16T02:37:20.000Z | 2020-10-31T13:54:46.000Z | import unittest
import requests
import json
import pytest
BASE_URL = "https://api.serverest.dev"
| 32.017094 | 108 | 0.591564 |
05836efbaef8a6e021845f469c0a620d95e4b977 | 372 | py | Python | MotorTorpedoQuotePT109/QuotePT109/migrations/0002_page_likes.py | alex-lake29/MotorTorpedoQuotePT-109 | 012d45e8a329022492acad86e6693abf0ba5b7d2 | [
"MIT"
] | null | null | null | MotorTorpedoQuotePT109/QuotePT109/migrations/0002_page_likes.py | alex-lake29/MotorTorpedoQuotePT-109 | 012d45e8a329022492acad86e6693abf0ba5b7d2 | [
"MIT"
] | null | null | null | MotorTorpedoQuotePT109/QuotePT109/migrations/0002_page_likes.py | alex-lake29/MotorTorpedoQuotePT-109 | 012d45e8a329022492acad86e6693abf0ba5b7d2 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2022-03-21 19:27
from django.db import migrations, models
| 19.578947 | 49 | 0.583333 |
0585d3beb2756c9c282cc3b3a1b2f3b72dff380a | 474 | py | Python | message_sender/migrations/0003_auto_20161124_1357.py | praekeltfoundation/seed-message-sender | d90ef4dc9fa248df97ca97f07569c6c70afcd1bd | [
"BSD-3-Clause"
] | 1 | 2017-01-03T08:53:18.000Z | 2017-01-03T08:53:18.000Z | message_sender/migrations/0003_auto_20161124_1357.py | praekelt/seed-message-sender | d90ef4dc9fa248df97ca97f07569c6c70afcd1bd | [
"BSD-3-Clause"
] | 45 | 2016-03-16T09:32:27.000Z | 2018-06-28T10:05:19.000Z | message_sender/migrations/0003_auto_20161124_1357.py | praekeltfoundation/seed-message-sender | d90ef4dc9fa248df97ca97f07569c6c70afcd1bd | [
"BSD-3-Clause"
] | 1 | 2016-09-28T09:32:05.000Z | 2016-09-28T09:32:05.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-11-24 13:57
from __future__ import unicode_literals
from django.db import migrations, models
| 24.947368 | 74 | 0.656118 |
058753427b8d12d1061f42dc505d9be81b5a17ea | 15,639 | py | Python | src/02_ppo.py | grzegorzwojdyga/trl | 1921e71a7465a43dcc135d97821aa8b03bfebf8c | [
"Apache-2.0"
] | null | null | null | src/02_ppo.py | grzegorzwojdyga/trl | 1921e71a7465a43dcc135d97821aa8b03bfebf8c | [
"Apache-2.0"
] | null | null | null | src/02_ppo.py | grzegorzwojdyga/trl | 1921e71a7465a43dcc135d97821aa8b03bfebf8c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""02-ppo.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1GXTVkhpJyQQsUWn6tGQAWPmstw9adAzj
# PPO for transformer models
> A Pytorch implementation of Proximal Policy Optimization for transfomer models.
This follows the language model approach proposed in paper ["Fine-Tuning Language Models from Human Preferences"](
https://arxiv.org/pdf/1909.08593.pdf) and is similar to the [original implementation](https://github.com/openai/lm-human-preferences). The two main differences are 1) the method is implemented in Pytorch and 2) works with the `transformer` library by Hugging Face.
"""
# default_exp ppo
# export
import numpy as np
import torch.nn.functional as F
from torch.optim import Adam
import torch
import collections
import time
import random
from trl.core import (logprobs_from_logits,
whiten,
clip_by_value,
entropy_from_logits,
flatten_dict,
average_torch_dicts,
stats_to_np,
stack_dicts,
add_suffix)
"""## KL-controllers
To ensure that the learned policy does not deviate to much from the original language model the KL divergence between the policy and a reference policy (the language model before PPO training) is used as an additional reward signal. Large KL-divergences are punished and staying close to the reference is rewarded.
Two controllers are presented in the paper: an adaptive log-space proportional controller and a fixed controller.
"""
# exports
# exports
# exports
"""## Tensor shapes and contents
Debugging tensor shapes and contents usually involves inserting a lot of print statements in the code. To avoid this in the future I add a list of the tensor shapes and contents for reference. If the tensors are sliced or reshaped I list the last shape.
| Name | Shape | Content |
|-------|---------|---------|
| `query` | `[batch_size, query_length]`| contains token ids of query|
| `response`| `[batch_size, response_length]`| contains token ids of responses|
| `scores`| `[batch_size]`| rewards of each query/response pair|
| `model_input`| `[batch_size, query_length + response_length]`| combined query and response tokens|
| `m_input`|`[forward_batch_size, query_length + response_length]`| small forward batch of model_input|
| `logits` | `[forward_batch_size, query_length + response_length, vocab_size]`| logits from model outputs|
| `ref_logits`|`[forward_batch_size, query_length + response_length, vocab_size]`| logits from ref_model outputs|
| `logprobs`| `[batch_size, response_length]`| log-probabilities of response tokens |
| `ref_logprobs`| `[batch_size, response_length]`| reference log-probabilities of response tokens |
| `rewards`| `[batch_size, response_length]`| the model rewards incl. kl-score for each token|
| `non_score_reward`| `[batch_size, response_length]`| the model kl-score for each token|
## Model output alignments
Some notes on output alignments, since I spent a considerable time debugging this. All model outputs are shifted by 1 to the model inputs. That means that the logits are shifted by one as well as values. For this reason the logits and values are always shifted one step to the left. This also means we don't have logits for the first input element and so we delete the first input token when calculating the softmax, since we don't have logits predictions. The same applies for the values and we shift them by index one to the left.
## KL-divergence
One question that came up during the implementation was "Why is the KL-divergence just the difference of the log-probs? Where is the probability in front of the log term?". The answer can be found in Sergey Levine's [lecture slides](http://rll.berkeley.edu/deeprlcourse/docs/week_3_lecture_1_dynamics_learning.pdf): To calculate the KL divergence we calculate the expected value of the log term. The probability usually in front of the log-term comes from that expected value and for a set of trajectories we can simply take the mean over the sampled trajectories.
""" | 47.390909 | 564 | 0.632585 |
0587d07321592ddb102cc4ed98640454fd0d67f7 | 4,589 | py | Python | RockPaperScissors.py | andreimaftei28/projects-on-JetBrainAcademy | 8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1 | [
"MIT"
] | null | null | null | RockPaperScissors.py | andreimaftei28/projects-on-JetBrainAcademy | 8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1 | [
"MIT"
] | null | null | null | RockPaperScissors.py | andreimaftei28/projects-on-JetBrainAcademy | 8c2b8ab7bab5757db94e9f0b6d55c33852f64ee1 | [
"MIT"
] | 3 | 2020-12-19T13:48:06.000Z | 2021-08-12T18:36:33.000Z | """Rock Paper Scisssors game using OOP"""
import random
from tempfile import mkstemp
from shutil import move, copymode
from os import fdopen, remove
#If rating.txt does not exist, it get's created here
fill = open("rating.txt", "a", encoding="utf-8")
fill.close()
#creating instance of the RockPaperScissors class
rps = RockPaperScissors()
rps.file()
rps.rewrite_file()
| 33.014388 | 123 | 0.504249 |
0588017972ca3ca8aebe2412eda69531f658e740 | 807 | py | Python | jarvis/accounts/tests/factories.py | Anubhav722/blahblah | 160698e06a02e671ac40de3113cd37d642e72e96 | [
"MIT"
] | 1 | 2019-01-03T06:10:04.000Z | 2019-01-03T06:10:04.000Z | jarvis/accounts/tests/factories.py | Anubhav722/blahblah | 160698e06a02e671ac40de3113cd37d642e72e96 | [
"MIT"
] | 1 | 2021-03-31T19:11:52.000Z | 2021-03-31T19:11:52.000Z | jarvis/accounts/tests/factories.py | Anubhav722/blahblah | 160698e06a02e671ac40de3113cd37d642e72e96 | [
"MIT"
] | null | null | null | from faker import Faker
from ..models import Client, UserProfile
from django.contrib.auth import get_user_model
from factory import django, SubFactory, fuzzy, Sequence, LazyAttribute
fake = Faker()
User = get_user_model()
| 21.810811 | 70 | 0.700124 |
0588430e94f2e77e31265668a8e628ff493b0db0 | 24 | py | Python | tests/components/devcon/__init__.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | tests/components/devcon/__init__.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | null | null | null | tests/components/devcon/__init__.py | pcaston/Open-Peer-Power | 81805d455c548e0f86b0f7fedc793b588b2afdfd | [
"Apache-2.0"
] | 1 | 2019-04-24T14:10:08.000Z | 2019-04-24T14:10:08.000Z | """Tests for Devcon."""
| 12 | 23 | 0.583333 |
05884cb8cc1e8c53f7f9b4339d31feb82c92a4b6 | 98 | py | Python | Code coach problems/Easy/Python/Isogram_Detector.py | Djivs/sololearn-code-solutions | 7727dd97f79863a88841548770481f6f2abdc7bf | [
"MIT"
] | 1 | 2020-07-27T07:32:57.000Z | 2020-07-27T07:32:57.000Z | Code coach problems/Easy/Python/Isogram_Detector.py | Djivs/sololearn-code-solutions | 7727dd97f79863a88841548770481f6f2abdc7bf | [
"MIT"
] | null | null | null | Code coach problems/Easy/Python/Isogram_Detector.py | Djivs/sololearn-code-solutions | 7727dd97f79863a88841548770481f6f2abdc7bf | [
"MIT"
] | 1 | 2020-11-07T12:45:21.000Z | 2020-11-07T12:45:21.000Z | a = input()
i = 0
while i != len(a):
if a[i] in a[i+1:]:
break
print(str(i == len(a)).lower())
| 14 | 31 | 0.510204 |
0589b9d3ea2a64dcded6b8ab04bba1a44e732a41 | 2,813 | py | Python | src/cbc_binary_toolkit/schemas.py | carbonblack/cbc-binary-toolkit | 92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4 | [
"MIT"
] | 8 | 2020-05-12T18:08:52.000Z | 2021-12-27T06:11:00.000Z | src/cbc_binary_toolkit/schemas.py | carbonblack/cbc-binary-toolkit | 92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4 | [
"MIT"
] | 4 | 2020-05-13T16:07:49.000Z | 2020-06-30T18:47:14.000Z | src/cbc_binary_toolkit/schemas.py | carbonblack/cbc-binary-toolkit | 92c90b80e3c3e0b5c2473ef2086d2ce2fb651db4 | [
"MIT"
] | 3 | 2020-05-16T19:57:57.000Z | 2020-11-01T08:43:31.000Z | # -*- coding: utf-8 -*-
# *******************************************************
# Copyright (c) VMware, Inc. 2020-2021. All Rights Reserved.
# SPDX-License-Identifier: MIT
# *******************************************************
# *
# * DISCLAIMER. THIS PROGRAM IS PROVIDED TO YOU "AS IS" WITHOUT
# * WARRANTIES OR CONDITIONS OF ANY KIND, WHETHER ORAL OR WRITTEN,
# * EXPRESS OR IMPLIED. THE AUTHOR SPECIFICALLY DISCLAIMS ANY IMPLIED
# * WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY,
# * NON-INFRINGEMENT AND FITNESS FOR A PARTICULAR PURPOSE.
"""Schemas for Engine Results component"""
from schema import And, Or, Optional, Schema
IOCv2SEVSchema = Schema(
{
"id": And(str, len),
"match_type": And(str, lambda type: type in ["query", "equality", "regex"]),
"values": And([str], len),
Optional("field"): And(str, len),
Optional("link"): And(str, len),
"severity": And(int, lambda n: n > 0 and n < 11) # Needs stripped before sent to CBC
}
)
IOCv2Schema = Schema(
{
"id": And(str, len),
"match_type": And(str, lambda type: type in ["query", "equality", "regex"]),
"values": And([str], len),
Optional("field"): And(str, len),
Optional("link"): And(str, len)
}
)
ReportSchema = Schema(
{
"id": And(str, len),
"timestamp": And(int, lambda n: n > 0),
"title": And(str, len),
"description": And(str, len),
"severity": And(int, lambda n: n > 0 and n < 11),
Optional("link"): str,
Optional("tags"): [str],
"iocs_v2": [IOCv2Schema],
Optional("visibility"): str
}
)
EngineResponseSchema = Schema(
{
"iocs": [IOCv2SEVSchema],
"engine_name": And(str, len),
"binary_hash": And(str, lambda n: len(n) == 64),
"success": bool
}
)
BinaryMetadataSchema = Schema(
{
"sha256": And(str, lambda n: len(n) == 64),
"url": And(str, len),
"architecture": [str],
"available_file_size": Or(int, None),
"charset_id": Or(int, None),
"comments": Or(str, None),
"company_name": Or(str, None),
"copyright": Or(str, None),
"file_available": bool,
"file_description": Or(str, None),
"file_size": Or(int, None),
"file_version": Or(str, None),
"internal_name": Or(str, None),
"lang_id": Or(int, None),
"md5": And(str, lambda n: len(n) == 32),
"original_filename": Or(str, None),
"os_type": Or(str, None),
"private_build": Or(str, None),
"product_description": Or(str, None),
"product_name": Or(str, None),
"product_version": Or(str, None),
"special_build": Or(str, None),
"trademark": Or(str, None)
}
)
| 31.606742 | 93 | 0.542126 |
058a7c137ede0bf5c3a55a3ce41c3dfb2936df30 | 2,079 | py | Python | src/views/list.py | AllForJan/prizma-backend | fe866e74fa01e900cc7eab624bb5716a4bae056d | [
"MIT"
] | 2 | 2018-04-08T22:18:11.000Z | 2018-04-26T08:12:46.000Z | src/views/list.py | AllForJan/prizma-backend | fe866e74fa01e900cc7eab624bb5716a4bae056d | [
"MIT"
] | null | null | null | src/views/list.py | AllForJan/prizma-backend | fe866e74fa01e900cc7eab624bb5716a4bae056d | [
"MIT"
] | 2 | 2018-04-08T22:18:13.000Z | 2018-04-08T22:18:18.000Z | from elasticsearch import Elasticsearch
from flask import request, jsonify
from flask_restful import Resource
from db.manager import get_conn
import settings
conn = get_conn()
| 25.353659 | 89 | 0.457431 |
058ba31e5a3c9cecbd73c880b21c4ea42a75e1cf | 3,056 | py | Python | tests/test_cli.py | SlawekNowy/vpk | 3c9e175f48d8d56b5995387bcaa2c16ec62f9688 | [
"MIT"
] | 116 | 2015-08-29T23:24:28.000Z | 2022-03-04T19:35:52.000Z | tests/test_cli.py | SlawekNowy/vpk | 3c9e175f48d8d56b5995387bcaa2c16ec62f9688 | [
"MIT"
] | 27 | 2015-07-30T16:44:17.000Z | 2021-12-25T19:00:44.000Z | tests/test_cli.py | SlawekNowy/vpk | 3c9e175f48d8d56b5995387bcaa2c16ec62f9688 | [
"MIT"
] | 21 | 2015-08-03T23:57:25.000Z | 2021-12-21T10:29:59.000Z | import sys
import unittest
from contextlib import contextmanager
try:
from io import StringIO
except ImportError:
from StringIO import StringIO
from vpk import cli
| 35.534884 | 105 | 0.634162 |
058c253ae43e29116887b045dfd233f62ef4ccf0 | 218 | py | Python | cpab/cpaNd/model/__init__.py | freifeld/cpabDiffeo | 22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6 | [
"MIT"
] | 17 | 2016-03-16T21:35:36.000Z | 2021-11-11T04:16:21.000Z | cpab/cpaNd/model/__init__.py | freifeld/cpabDiffeo | 22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6 | [
"MIT"
] | null | null | null | cpab/cpaNd/model/__init__.py | freifeld/cpabDiffeo | 22df6cdbd7111b9ae3e7f1c0e31ff85e92d281a6 | [
"MIT"
] | 4 | 2016-08-12T23:02:09.000Z | 2019-03-14T18:20:36.000Z | from _LogLikelihood import LogLikelihood
from _LogPrior import LogPrior
#from _ScaleDependentLogLikelihoodGaussian import ScaleDependentLogLikelihoodGaussian
from _ScaleDependentLogPrior import ScaleDependentLogPrior
| 36.333333 | 85 | 0.917431 |
058c66b876771cc78eea5a8cc9478dd196dd972b | 1,768 | py | Python | authentic/authentic.settings.py | GuillaumeGautierLA/publik | 5ef6c19e9b91bf066717b4bc9df2540f4c2e1bc2 | [
"MIT"
] | null | null | null | authentic/authentic.settings.py | GuillaumeGautierLA/publik | 5ef6c19e9b91bf066717b4bc9df2540f4c2e1bc2 | [
"MIT"
] | 13 | 2019-12-21T09:48:42.000Z | 2020-07-01T19:12:28.000Z | authentic/authentic.settings.py | GuillaumeGautierLA/publik | 5ef6c19e9b91bf066717b4bc9df2540f4c2e1bc2 | [
"MIT"
] | 4 | 2020-01-06T16:00:00.000Z | 2021-01-08T14:46:29.000Z | # To pass env vars to Python scripts run by Publik in services which remove custom env vars:
# https://unix.stackexchange.com/questions/44370/how-to-make-unix-service-see-environment-variables
# So we hardcode the values in the file below when the container starts
import sys
sys.path.insert(0, "/home")
from pyenv import *
# Databases
DATABASES['default']['ENGINE'] = 'tenant_schemas.postgresql_backend'
DATABASES['default']['NAME'] = DB_AUTHENTIC_NAME
DATABASES['default']['USER'] = DB_AUTHENTIC_USER
DATABASES['default']['PASSWORD'] = DB_AUTHENTIC_PASS
DATABASES['default']['HOST'] = 'db'
DATABASES['default']['PORT'] = DB_PORT
BROKER_URL = 'amqp://{user}:{password}@rabbitmq:{port}//'.format(
user=RABBITMQ_DEFAULT_USER,
password=RABBITMQ_DEFAULT_PASS,
port=RABBITMQ_PORT,
)
# Zone
LANGUAGE_CODE = 'fr-fr'
TIME_ZONE = 'Europe/Paris'
# Email configuration
ADMINS = (
(ERROR_MAIL_AUTHOR, ERROR_MAIL_ADDR),
)
EMAIL_SUBJECT_PREFIX = '[authentic] '
SERVER_EMAIL = ERROR_MAIL_ADDR
DEFAULT_FROM_EMAIL = ERROR_MAIL_ADDR
# SMTP configuration
EMAIL_HOST = SMTP_HOST
EMAIL_HOST_USER = SMTP_USER
EMAIL_HOST_PASSWORD = SMTP_PASS
EMAIL_PORT = SMTP_PORT
# HTTPS Security
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
# Idp
# SAML 2.0 IDP
#A2_IDP_SAML2_ENABLE = False
# CAS 1.0 / 2.0 IDP
#A2_IDP_CAS_ENABLE = False
# OpenID 1.0 / 2.0 IDP
#A2_IDP_OPENID_ENABLE = False
# Authentifications
#A2_AUTH_PASSWORD_ENABLE = True
#A2_SSLAUTH_ENABLE = False
CACHES = {
'default': {
'BACKEND': 'hobo.multitenant.cache.TenantCache',
'REAL_BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
# Role provisionning via local RabbitMQ
HOBO_ROLE_EXPORT = True
LOGGING = LOGGING_FROM_PYENV | 26.38806 | 99 | 0.750566 |
058f090a9e7707433a3105b87e3e591439fed2ac | 8,377 | py | Python | code/train/train_model.py | 96jhwei/Genetic-U-Net | 25116f01afcf8ed4386cd0fc258da15e1c982cb5 | [
"MIT"
] | 14 | 2021-09-09T11:22:17.000Z | 2022-03-14T10:06:36.000Z | code/train/train_model.py | 96jhwei/Genetic-U-Net | 25116f01afcf8ed4386cd0fc258da15e1c982cb5 | [
"MIT"
] | 1 | 2021-11-24T10:30:36.000Z | 2021-11-24T10:30:36.000Z | code/train/train_model.py | 96jhwei/Genetic-U-Net | 25116f01afcf8ed4386cd0fc258da15e1c982cb5 | [
"MIT"
] | 5 | 2021-11-02T09:29:49.000Z | 2022-03-25T09:44:25.000Z | import numpy
from torch.utils.data import DataLoader
from tqdm import tqdm
from loss.FocalLoss import FocalLossForSigmoid
import torch
from metrics.calculate_metrics import calculate_metrics
import shutil
from metrics.average_meter import AverageMeter
import torch.multiprocessing
from torch.nn.utils.clip_grad import clip_grad_norm_
import os
import sys
import numpy as np
import random
from thop import profile
from .util.get_optimizer import get_optimizer
from dataset.util.get_datasets import get_datasets
import multiprocessing as mp
sys.path.append('../')
| 45.037634 | 133 | 0.493613 |
059016200f557d7398f34c3a96008e7fee9686c3 | 961 | py | Python | dataset/check_for_duplicates.py | mathildor/TF-SegNet | dff209c8174b5e8fa77b4c2644298f6903a09445 | [
"MIT"
] | 98 | 2017-11-06T15:55:22.000Z | 2022-03-22T11:29:47.000Z | dataset/check_for_duplicates.py | yingz9/TF-SegNet | dff209c8174b5e8fa77b4c2644298f6903a09445 | [
"MIT"
] | 8 | 2017-11-15T06:05:41.000Z | 2019-06-19T06:53:03.000Z | dataset/check_for_duplicates.py | yingz9/TF-SegNet | dff209c8174b5e8fa77b4c2644298f6903a09445 | [
"MIT"
] | 34 | 2017-11-06T03:05:54.000Z | 2022-01-25T16:00:09.000Z |
import os
from PIL import Image
import numpy
from PIL import ImageChops
""" TESTED:
No duplicates in:
- within validation images first part (stopped because of training - took to much time)
"""
image_path="../../IR_images/combined_dataset/val_images/images"
# image_path="../../IR_images/combined_dataset/val_images/images"
images = sorted(os.listdir(image_path))
for image_file_1 in images:
for image_file_2 in images:
image1 = Image.open(os.path.join(image_path,image_file_1))
image2 = Image.open(os.path.join(image_path,image_file_2))
#pixels = image.load()
if ImageChops.difference(image1, image2).getbbox() is None:
# if(image1==image2):# and image_file_1 != image_file_2):
print("Same image!!!")
print(image_file_1)
print(image_file_2)
# else:
# print("not same")
# print(image_file_1)
# print(image_file_2)
| 26.694444 | 95 | 0.648283 |
059038232d1c85e48c2eed487377d93d1ad944f4 | 1,983 | py | Python | _posts/import.py | suepeng/suepeng.github.io | 844e0063e0604a77886aad5eaea588c4df2792a9 | [
"MIT"
] | null | null | null | _posts/import.py | suepeng/suepeng.github.io | 844e0063e0604a77886aad5eaea588c4df2792a9 | [
"MIT"
] | null | null | null | _posts/import.py | suepeng/suepeng.github.io | 844e0063e0604a77886aad5eaea588c4df2792a9 | [
"MIT"
] | null | null | null | import os, glob
from dateutil import parser
from bs4 import BeautifulSoup
ext = lambda line, cap: line.replace("\s", "").replace(cap, "").strip()
#------------------------------
# Main
#------------------------------
if __name__ == "__main__":
posts = 0
doc = []
for idx, line in enumerate(open("raw.txt").readlines()):
if len(doc) and ('TITLE:' in line):
posts += write_post(doc)
doc, meta = [], {}
doc.append(line)
# latest post
posts += write_post(doc)
print(f"converted {posts} posts with {idx} lines")
| 31.983871 | 86 | 0.474534 |
0594a8c465e7b18e9888443f247ba1cf8ff7c9cf | 702 | py | Python | layers/reshape.py | WJGiles/Dorknet | 1582937e843b1a911334291c25ea415fb56e5ccc | [
"MIT"
] | null | null | null | layers/reshape.py | WJGiles/Dorknet | 1582937e843b1a911334291c25ea415fb56e5ccc | [
"MIT"
] | null | null | null | layers/reshape.py | WJGiles/Dorknet | 1582937e843b1a911334291c25ea415fb56e5ccc | [
"MIT"
] | 1 | 2020-07-27T17:03:22.000Z | 2020-07-27T17:03:22.000Z | import numpy as np
from .layer import Layer | 33.428571 | 85 | 0.564103 |
05975def902880bc29f1fd9e4b623039913f810f | 4,003 | py | Python | src/upload/upload.py | alliance-genome/agr_ferret | e2ccef16308b1a8a6f1b2a3dde6e29e0530da721 | [
"MIT"
] | 2 | 2020-07-22T14:25:00.000Z | 2021-09-20T18:29:08.000Z | src/upload/upload.py | alliance-genome/agr_ferret | e2ccef16308b1a8a6f1b2a3dde6e29e0530da721 | [
"MIT"
] | 6 | 2019-09-24T14:09:42.000Z | 2021-06-07T15:27:55.000Z | src/upload/upload.py | alliance-genome/agr_ferret | e2ccef16308b1a8a6f1b2a3dde6e29e0530da721 | [
"MIT"
] | 3 | 2020-12-19T08:57:51.000Z | 2020-12-19T08:58:09.000Z | # Functions for use in downloading files.
import logging, os, requests, json, hashlib, urllib
from requests_toolbelt.utils import dump
from retry import retry
logger = logging.getLogger(__name__)
| 51.320513 | 131 | 0.673995 |
0597da213baf4860aef1103fe9f6eaf312ad6be5 | 9,246 | py | Python | Klipps/convert.py | rafalkaron/KindleClippingsBeautifier | 10d79da2a073f8867041a2520d7a234937237243 | [
"MIT"
] | 1 | 2020-05-25T11:30:54.000Z | 2020-05-25T11:30:54.000Z | Klipps/convert.py | rafalkaron/KindleClippingsBeautifier | 10d79da2a073f8867041a2520d7a234937237243 | [
"MIT"
] | null | null | null | Klipps/convert.py | rafalkaron/KindleClippingsBeautifier | 10d79da2a073f8867041a2520d7a234937237243 | [
"MIT"
] | null | null | null | # coding: utf-8
import re
import datetime
from .feed import read_file
__author__ = "Rafa Karo <rafalkaron@gmail.com>"
def clipps_str_to_html_str(clipps_str):
"""Return a string that contains the converted \"Kindle Clippings.txt file\" to HTML."""
# ADD ELEMENTS (SVG favicon encoded with: https://yoksel.github.io/url-encoder/)
pre_elements = r"""<!DOCTYPE html>
<html>
<head>
<title>Kindle Clippings</title>
<meta charset="utf-8">
<link href="data:image/svg+xml,%3C%3Fxml version='1.0' encoding='UTF-8' standalone='no'%3F%3E%3C!-- Created with Inkscape (http://www.inkscape.org/) --%3E%3Csvg xmlns:dc='http://purl.org/dc/elements/1.1/' xmlns:cc='http://creativecommons.org/ns%23' xmlns:rdf='http://www.w3.org/1999/02/22-rdf-syntax-ns%23' xmlns:svg='http://www.w3.org/2000/svg' xmlns='http://www.w3.org/2000/svg' xmlns:sodipodi='http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd' xmlns:inkscape='http://www.inkscape.org/namespaces/inkscape' width='1000' height='1000' viewBox='0 0 264.58335 264.58335' version='1.1' id='svg8' inkscape:version='0.92.4 (5da689c313, 2019-01-14)' sodipodi:docname='klipps3.svg' inkscape:export-filename='C:%5CUsers%5Crafal%5CDesktop%5Cklipps3.png' inkscape:export-xdpi='72.000008' inkscape:export-ydpi='72.000008'%3E%3Ctitle id='title3713'%3EKlipps%3C/title%3E%3Cdefs id='defs2' /%3E%3Csodipodi:namedview id='base' pagecolor='%23515151' bordercolor='%23000000' borderopacity='1' inkscape:pageopacity='0.20784314' inkscape:pageshadow='2' inkscape:zoom='0.25' inkscape:cx='30.072603' inkscape:cy='582.33116' inkscape:document-units='px' inkscape:current-layer='layer1' showgrid='false' inkscape:window-width='1842' inkscape:window-height='1057' inkscape:window-x='70' inkscape:window-y='-8' inkscape:window-maximized='1' units='px' inkscape:showpageshadow='false' showborder='true' inkscape:pagecheckerboard='false' showguides='true' inkscape:guide-bbox='true'%3E%3Csodipodi:guide position='132.29167,132.29167' orientation='0,1' id='guide3724' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='132.29167,132.29167' orientation='1,0' id='guide3726' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='79.375005,79.375005' orientation='-0.70710678,0.70710678' id='guide3748' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='132.29167,132.29167' orientation='0.70710678,0.70710678' id='guide3750' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='26.458327,150.45027' orientation='-0.70710678,0.70710678' id='guide3776' inkscape:locked='false' /%3E%3Csodipodi:guide position='150.45027,26.458323' orientation='-0.70710678,0.70710678' id='guide3778' inkscape:locked='false' /%3E%3Csodipodi:guide position='114.13307,238.12501' orientation='0.70710678,0.70710678' id='guide3780' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='26.458335,150.45028' orientation='0.70710678,0.70710678' id='guide3782' inkscape:locked='false' inkscape:label='' inkscape:color='rgb(0,0,255)' /%3E%3Csodipodi:guide position='150.45028,26.458334' orientation='1,0' id='guide3801' inkscape:locked='false' /%3E%3Csodipodi:guide position='238.12501,114.13307' orientation='0,1' id='guide3803' inkscape:locked='false' /%3E%3Csodipodi:guide position='132.29167,114.13307' orientation='-0.70710678,0.70710678' id='guide3806' inkscape:locked='false' /%3E%3Csodipodi:guide position='26.458336,150.45028' orientation='0,1' id='guide3826' inkscape:locked='false' /%3E%3C/sodipodi:namedview%3E%3Cmetadata id='metadata5'%3E%3Crdf:RDF%3E%3Ccc:Work rdf:about=''%3E%3Cdc:format%3Eimage/svg+xml%3C/dc:format%3E%3Cdc:type rdf:resource='http://purl.org/dc/dcmitype/StillImage' /%3E%3Cdc:title%3EKlipps%3C/dc:title%3E%3Cdc:creator%3E%3Ccc:Agent%3E%3Cdc:title%3ERafa Karo%3C/dc:title%3E%3C/cc:Agent%3E%3C/dc:creator%3E%3C/cc:Work%3E%3C/rdf:RDF%3E%3C/metadata%3E%3Cg inkscape:groupmode='layer' id='layer3' inkscape:label='Background' /%3E%3Cg inkscape:groupmode='layer' id='layer2' inkscape:label='Filling'%3E%3Ccircle style='fill:%23ffffff;stroke-width:0.22826612' id='path3736-9' cx='132.29167' cy='132.29169' r='114.13306' /%3E%3C/g%3E%3Cg inkscape:label='Icon' inkscape:groupmode='layer' id='layer1' transform='translate(0,-32.416632)'%3E%3Cpath style='fill:%23e63946;stroke-width:1.32083833;fill-opacity:1' d='M 431.36914 100 L 100 431.36914 L 568.63086 900 L 568.63086 568.63086 L 900 568.63086 L 431.36914 100 z ' transform='matrix(0.26458335,0,0,0.26458335,0,32.416632)' id='rect3770' /%3E%3Cpath style='fill:%231d3557;fill-opacity:1;stroke-width:1.32083833' d='M 500 500 L 500 831.36914 L 568.63086 900 L 568.63086 568.63086 L 900 568.63086 L 831.36914 500 L 500 500 z ' transform='matrix(0.26458335,0,0,0.26458335,0,32.416632)' id='rect3770-4' /%3E%3C/g%3E%3C/svg%3E%0A" rel='icon' type='image/svg'/>
</head>
<body>"""
heading = "<h1>Kindle Clippings</h1>\n<h2>"
footer = f"<footer>Generated on {datetime.datetime.now().strftime('%B %d, %Y')} at {datetime.datetime.now().strftime('%I:%M %p')} with <a target=\"_blank\" href=\"https://github.com/rafalkaron/Klipps\">Klipps</a></footer>"
post_elements = "</body>\n</html>"
html_str = "\n".join((pre_elements, heading, clipps_str, footer, post_elements))
# SEARCH AND REPLACE
html_str = re.sub(r"\n\n", "\n", html_str) # Removes empty lines
html_str = re.sub(r"==========", "<div class=\"entry\">\n<h2>", html_str) # Replaces Kindle entryies markup with the "entry" class and opens headers 2
html_str = re.sub(r"- .* \| ", "###timestamp### ", html_str) # Removes redundant information from timestamps and adds a tag that is used to optimize RE in the next lines
for added_on in re.findall(r"^###timestamp### .*", html_str, re.MULTILINE): # Shortens and wraps timestamps || MAKE THIS GENERIC FOR OTHER LANGUAGES
added_on_new = re.sub(r"###timestamp###", "", added_on) # Removes the ###timestamp### tag
added_on_new = re.sub(r":\d\d$", "", added_on_new, re.MULTILINE) # [Optional] Removes seconds in 24h timestamps
added_on_new = re.sub(r":\d\d PM$", " PM", added_on_new, re.MULTILINE) # [Optional] Removes seconds in 12h PM timestamps
added_on_new = re.sub(r":\d\d AM$", " AM", added_on_new, re.MULTILINE) # [Optional] Removes seconds in 12h AM timestamps
added_on_new = re.sub(r"^ Added on ", "", added_on_new) # [Optional] Removes the "Added on" timestamp text
added_on_new = f"<div class=\"timestamp\">{added_on_new}</div>\n<blockquote>" # Wraps timestamps in timestamp divs and opens a blockquote
html_str = re.sub(added_on, added_on_new, html_str)
html_str = re.sub(r"<div class=\"timestamp\">", "</h2>\n<div class=\"timestamp\">", html_str) # Closes headers 2 before timestamps
html_str = re.sub(r"<div class=\"entry\">\n<h2>\n<footer>", "</blockquote>\n</div>\n<footer>", html_str) # Removes redundant entry divs and headers 2 before the footer
html_str = re.sub("<div class=\"entry\">\n<h2>", "</blockquote>\n</div>\n<div class=\"entry\">\n<h2>", html_str) # Closes blockquote and entry div before opening anothe entry div
html_str = re.sub(r"</h1>\n<h2>", "</h1>\n<div class=\"entry\">\n<h2>", html_str) # Opens the first element div after
return html_str
def default_style_html_str(html_str):
"""Return a string that contains the \"Kindle Clippings.txt file\" converted to HTML with a default embedded CSS style."""
html_str = re.sub("<h1>", "<div class=\"frontpage\"><h1>", html_str)
html_str = re.sub("</h1>", "</h1>\n<div class=\"generator\"><p>Generated with Klipps</p></div>\n</div>", html_str)
html_str = re.sub("/>\n</head>", """/>
<style>
*{
font-family: Helvetica, Arial, sans-serif;
font-size: 100%;
margin: 0px;
}
.frontpage{
background-color: #1D3557;
height: 100vh;
}
h1{
font-size: 10vw;
text-align: center;
padding-top: 15vh;
padding-bottom: 20vh;
padding-left: 1vh;
padding-right: 1vh;
color: #F1FAEE;
}
.generator{
font-size: 3vw;
text-align: center;
color: #F1FAEE;
}
.entry{
padding: 4rem 8vw 4rem 8vw;
}
.entry:nth-child(odd){
background: #F1FAEE;
}
.entry:nth-child(even){
background: rgb(228, 235, 226);
}
h2{
font-size: 2.6rem;
color: #1D3557;
}
.timestamp{
font-size: 1.2rem;
font-weight: bold;
padding-bottom: 1rem;
color: #1D3557;
}
blockquote{
font-size: 1.5rem;
text-align: justify;
color: #1D3557;
}
footer{
font-size: 1.5rem;
padding: 2rem 1rem 2rem 1rem;
background-color: #1D3557;
color: #F1FAEE;
text-align: center;
}
a{
color: #E63946;
font-weight: bolder;
text-decoration: none;
}
</style>
</head>""", html_str)
return html_str
def custom_style_html_str(css_filepath, html_str):
"""Return a string that contains the \"Kindle Clippings.txt file\" converted to HTML with a custom embedded CSS style."""
style = read_file(css_filepath)
html_str = re.sub("/>\n</head>", f"/>\n<style>\n{style}\n</style>\n</head>", html_str)
return html_str | 72.234375 | 4,532 | 0.701709 |
05983c5f355ffca2350a3a76badc57638f8db8e8 | 1,308 | py | Python | rlpy/Domains/__init__.py | okkhoy/rlpy | af25d2011fff1d61cb7c5cc8992549808f0c6103 | [
"BSD-3-Clause"
] | 265 | 2015-01-21T08:11:12.000Z | 2021-12-21T08:06:21.000Z | rlpy/Domains/__init__.py | okkhoy/rlpy | af25d2011fff1d61cb7c5cc8992549808f0c6103 | [
"BSD-3-Clause"
] | 22 | 2015-03-26T17:41:43.000Z | 2019-12-19T08:47:36.000Z | rlpy/Domains/__init__.py | okkhoy/rlpy | af25d2011fff1d61cb7c5cc8992549808f0c6103 | [
"BSD-3-Clause"
] | 85 | 2015-02-18T00:25:15.000Z | 2021-11-15T11:10:00.000Z | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
#from Domain import Domain
from future import standard_library
standard_library.install_aliases()
from .HelicopterHover import HelicopterHover, HelicopterHoverExtended
from .HIVTreatment import HIVTreatment
from .PuddleWorld import PuddleWorld
from .GridWorld import GridWorld
from .BlocksWorld import BlocksWorld
from .MountainCar import MountainCar
from .ChainMDP import ChainMDP
from .SystemAdministrator import SystemAdministrator
from .PST import PST
from .Pacman import Pacman
from .IntruderMonitoring import IntruderMonitoring
from .FiftyChain import FiftyChain
from .FlipBoard import FlipBoard
from .RCCar import RCCar
from .Acrobot import Acrobot, AcrobotLegacy
from .Bicycle import BicycleBalancing, BicycleRiding
from .Swimmer import Swimmer
from .Pinball import Pinball
from .FiniteTrackCartPole import (FiniteCartPoleBalance,
FiniteCartPoleBalanceOriginal,
FiniteCartPoleBalanceModern,
FiniteCartPoleSwingUp,
FiniteCartPoleSwingUpFriction)
from .InfiniteTrackCartPole import InfCartPoleBalance, InfCartPoleSwingUp
| 40.875 | 73 | 0.786697 |
0598570bbddd550266f613922fec2e9624969d88 | 365 | py | Python | mercury_engine_data_structures/adapters/enum_adapter.py | Antidote/mercury-engine-data-structures | d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64 | [
"MIT"
] | 2 | 2021-06-18T16:47:00.000Z | 2021-07-06T22:36:32.000Z | mercury_engine_data_structures/adapters/enum_adapter.py | Antidote/mercury-engine-data-structures | d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64 | [
"MIT"
] | 1 | 2021-10-01T20:26:01.000Z | 2021-10-01T20:26:01.000Z | mercury_engine_data_structures/adapters/enum_adapter.py | Antidote/mercury-engine-data-structures | d8e8ba1eacaa37f4fc76b78bb208ffc2cde61f64 | [
"MIT"
] | 5 | 2021-08-23T17:01:01.000Z | 2021-11-20T03:57:14.000Z | from construct import Adapter, Int32ub, Enum
| 26.071429 | 51 | 0.682192 |
05995048419b1dbd1bd29b14c238cf37023f8b47 | 2,740 | py | Python | lib/strider/virt/vagrantbox.py | jcftang/strider | 432a68eb1303541b6d955bd6ecf7439d1f9b0d48 | [
"Apache-2.0"
] | 16 | 2016-02-10T13:06:50.000Z | 2021-02-28T06:21:16.000Z | lib/strider/virt/vagrantbox.py | jcftang/strider | 432a68eb1303541b6d955bd6ecf7439d1f9b0d48 | [
"Apache-2.0"
] | 4 | 2016-02-20T16:33:40.000Z | 2016-05-28T10:46:06.000Z | lib/strider/virt/vagrantbox.py | jcftang/strider | 432a68eb1303541b6d955bd6ecf7439d1f9b0d48 | [
"Apache-2.0"
] | 1 | 2016-09-01T11:06:56.000Z | 2016-09-01T11:06:56.000Z | import vagrant
import os
from subprocess import CalledProcessError
from strider.common.instance_data import InstanceData, SshData
import strider.common.logger
| 31.860465 | 75 | 0.550365 |
552672dd092eb5fb84094dd67c6ad2cf6eb3df04 | 4,739 | py | Python | python/aces/lutFormats/tests/UnitTestsLutFormats.py | aforsythe/clf | 47ba8bee31bd13e4f23632c7b0a38293be31c019 | [
"AMPAS"
] | 43 | 2015-07-09T23:13:41.000Z | 2022-02-04T15:45:42.000Z | python/aces/lutFormats/tests/UnitTestsLutFormats.py | aforsythe/clf | 47ba8bee31bd13e4f23632c7b0a38293be31c019 | [
"AMPAS"
] | 1 | 2019-09-18T14:30:39.000Z | 2019-09-18T14:30:39.000Z | python/aces/lutFormats/tests/UnitTestsLutFormats.py | aforsythe/clf | 47ba8bee31bd13e4f23632c7b0a38293be31c019 | [
"AMPAS"
] | 9 | 2015-07-10T15:26:55.000Z | 2020-08-20T11:52:47.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The Academy / ASC Common LUT Format Sample Implementations are provided by the
Academy under the following terms and conditions:
Copyright 2015 Academy of Motion Picture Arts and Sciences ("A.M.P.A.S.").
Portions contributed by others as indicated. All rights reserved.
A worldwide, royalty-free, non-exclusive right to copy, modify, create
derivatives, and use, in source and binary forms, is hereby granted, subject to
acceptance of this license. Performance of any of the aforementioned acts
indicates acceptance to be bound by the following terms and conditions:
* Copies of source code, in whole or in part, must retain the above copyright
notice, this list of conditions and the Disclaimer of Warranty.
* Use in binary form must retain the above copyright notice, this list of
conditions and the Disclaimer of Warranty in the documentation and/or other
materials provided with the distribution.
* Nothing in this license shall be deemed to grant any rights to trademarks,
copyrights, patents, trade secrets or any other intellectual property of
A.M.P.A.S. or any contributors, except as expressly stated herein.
* Neither the name "A.M.P.A.S." nor the name of any other contributors to this
software may be used to endorse or promote products derivative of or based on
this software without express prior written permission of A.M.P.A.S. or the
contributors, as appropriate.
This license shall be construed pursuant to the laws of the State of California,
and any disputes related thereto shall be subject to the jurisdiction of the
courts therein.
Disclaimer of Warranty: THIS SOFTWARE IS PROVIDED BY A.M.P.A.S. AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL A.M.P.A.S., OR ANY
CONTRIBUTORS OR DISTRIBUTORS, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, RESITUTIONARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
WITHOUT LIMITING THE GENERALITY OF THE FOREGOING, THE ACADEMY SPECIFICALLY
DISCLAIMS ANY REPRESENTATIONS OR WARRANTIES WHATSOEVER RELATED TO PATENT OR
OTHER INTELLECTUAL PROPERTY RIGHTS IN THE ACES CONTAINER REFERENCE
IMPLEMENTATION, OR APPLICATIONS THEREOF, HELD BY PARTIES OTHER THAN A.M.P.A.S.,
WHETHER DISCLOSED OR UNDISCLOSED.
"""
__author__ = 'Haarm-Pieter Duiker'
__copyright__ = 'Copyright (C) 2015 Academy of Motion Picture Arts and Sciences'
__maintainer__ = 'Academy of Motion Picture Arts and Sciences'
__email__ = 'acessupport@oscars.org'
__status__ = 'Production'
__major_version__ = '1'
__minor_version__ = '0'
__change_version__ = '0'
__version__ = '.'.join((__major_version__,
__minor_version__,
__change_version__))
'''
Simple tests of the lutFormats module
Should be turned into a proper set of unit tests.
'''
import os
import sys
# Make sure we can import lutFormats
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import lutFormats
tmpDir = "/tmp"
#aces1OCIOConfirDir = "/work/client/academy/ocio/hpd/OpenColorIO-Configs/aces_1.0.0"
aces1OCIOConfirDir = "/path/to/OpenColorIO-Configs/aces_1.0.0"
spiPath = "%s/luts/ACEScc_to_linear.spi1d" % aces1OCIOConfirDir
cspPath = "%s/baked/maya/sRGB (D60 sim.) for ACEScg Maya.csp" % aces1OCIOConfirDir
spipl = lutFormats.Registry.read( spiPath )
csppl = lutFormats.Registry.read( cspPath )
newSpiPath = "%s/ACEScc_to_linear_new.spi1d" % tmpDir
lutFormats.Registry.write(spipl, newSpiPath)
newSpi3dPath = "%s/srgb_new.spi3d" % tmpDir
lutFormats.Registry.write(csppl, newSpi3dPath, lutDataFormat="3D")
newCspPath = "%s/srgb_new_3d.csp" % tmpDir
lutFormats.Registry.write(csppl, newCspPath, lutDataFormat="3D")
newCsp1DPath = "%s/srgb_new_1d.csp" % tmpDir
lutFormats.Registry.write(csppl, newCsp1DPath)
newCsp1D3DPath = "%s/srgb_new_1d3d.csp" % tmpDir
lutFormats.Registry.write(csppl, newCsp1D3DPath, lutDataFormat="1D_3D_1D")
newClf1D3DPath = "%s/srgb_new_1d3d.clf" % tmpDir
lutFormats.Registry.write(csppl, newClf1D3DPath, lutDataFormat="1D_3D_1D")
newCtl1DPath = "%s/srgb_new_1d.ctl" % tmpDir
lutFormats.Registry.write(csppl, newCtl1DPath)
newCtl1D3DPath = "%s/srgb_new_3d.ctl" % tmpDir
lutFormats.Registry.write(csppl, newCtl1D3DPath, lutDataFormat="3D")
| 40.853448 | 84 | 0.779067 |
552683d69b93369ce9f2b67f499349c272254782 | 10,177 | py | Python | edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/Hazard_CFW_MultiPil.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/Hazard_CFW_MultiPil.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | null | null | null | edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/Hazard_CFW_MultiPil.py | srcarter3/awips2 | 37f31f5e88516b9fd576eaa49d43bfb762e1d174 | [
"Apache-2.0"
] | 1 | 2021-10-30T00:03:05.000Z | 2021-10-30T00:03:05.000Z | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
##
# This is a base file that is not intended to be overridden.
##
########################################################################
# Hazard_CFW.py
#
#
##########################################################################
import GenericHazards
import string, time, re, os, types, copy
| 40.384921 | 105 | 0.584455 |
552924a7e504599cbe9d1cfc08f6a123e6773a8c | 880 | py | Python | setup.py | hubmapconsortium/python-sdk | 17eaec434f1f65190a6e53d0055fe382841222de | [
"MIT"
] | null | null | null | setup.py | hubmapconsortium/python-sdk | 17eaec434f1f65190a6e53d0055fe382841222de | [
"MIT"
] | 8 | 2021-11-09T13:35:48.000Z | 2022-03-04T15:56:52.000Z | setup.py | hubmapconsortium/python-sdk | 17eaec434f1f65190a6e53d0055fe382841222de | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="hubmap-sdk",
version="1.0.1",
author="Hubmap",
author_email="api-developers@hubmapconsortium.org",
description="Python Client Libary to use HuBMAP web services",
long_description=long_description,
long_description_content_type="text/markdown",
packages=['hubmap_sdk'],
keywords=[
"HuBMAP Sdk",
"python"
],
install_requires=[
"certifi==2021.10.8",
"chardet==4.0.0",
"idna==2.10",
"requests==2.25.1",
"urllib3==1.26.7"
],
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
python_requires='>=3.6'
)
| 25.142857 | 66 | 0.606818 |