hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d68975702955b40381a81581441db52c47f67ab | 43 | py | Python | src/network/__init__.py | ThomasRanvier/faces_recognition_nn | b9177134169b6e05d9d9b6ea3206628bdb127a5e | [
"MIT"
] | null | null | null | src/network/__init__.py | ThomasRanvier/faces_recognition_nn | b9177134169b6e05d9d9b6ea3206628bdb127a5e | [
"MIT"
] | null | null | null | src/network/__init__.py | ThomasRanvier/faces_recognition_nn | b9177134169b6e05d9d9b6ea3206628bdb127a5e | [
"MIT"
] | null | null | null | from .neural_network import Neural_network
| 21.5 | 42 | 0.883721 |
9d6b7d2817a9a11d4f368ca09bd16da81be04b5f | 1,496 | py | Python | rides/forms.py | andrenbrandao/pirauber | d7c5647ec6df698fa3d7397907ff629c74cc76b9 | [
"MIT"
] | null | null | null | rides/forms.py | andrenbrandao/pirauber | d7c5647ec6df698fa3d7397907ff629c74cc76b9 | [
"MIT"
] | 6 | 2020-06-05T23:27:38.000Z | 2022-02-10T08:14:16.000Z | rides/forms.py | andrenbrandao/pirauber | d7c5647ec6df698fa3d7397907ff629c74cc76b9 | [
"MIT"
] | null | null | null | from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.utils.translation import ugettext_lazy as _
from .models import Ride
| 31.166667 | 91 | 0.592914 |
9d6dfe9a0fb4cf150a1dbedc9b781a51974ddeed | 843 | py | Python | tests/testdata/models.py | dtpryce/MLServer | 02744b3c770141b0b1d9dad2a0256d243051de61 | [
"Apache-2.0"
] | null | null | null | tests/testdata/models.py | dtpryce/MLServer | 02744b3c770141b0b1d9dad2a0256d243051de61 | [
"Apache-2.0"
] | null | null | null | tests/testdata/models.py | dtpryce/MLServer | 02744b3c770141b0b1d9dad2a0256d243051de61 | [
"Apache-2.0"
] | null | null | null | import asyncio
from mlserver import MLModel
from mlserver.codecs import NumpyCodec
from mlserver.types import InferenceRequest, InferenceResponse
| 31.222222 | 87 | 0.71293 |
9d6f477bb8496ccbe8298b0d502cfaf9b42c5d1c | 10,459 | py | Python | PERFORMER.py | ShivamRajSharma/Transformer-Architecure_From_Scratch | f7f24cb5146c09e6cf38a41e5e5ef721389803c1 | [
"MIT"
] | 17 | 2020-09-13T07:53:41.000Z | 2022-03-17T09:58:23.000Z | PERFORMER.py | ShivamRajSharma/Transformer-Architecure_From_Scratch | f7f24cb5146c09e6cf38a41e5e5ef721389803c1 | [
"MIT"
] | null | null | null | PERFORMER.py | ShivamRajSharma/Transformer-Architecure_From_Scratch | f7f24cb5146c09e6cf38a41e5e5ef721389803c1 | [
"MIT"
] | 3 | 2020-12-15T14:20:47.000Z | 2022-01-24T02:26:04.000Z | from time import time
import torch
import torch.nn as nn
if __name__ == "__main__":
#Depends on the Tokenizer
input_vocab_size = 100
output_vocab_size = 200
#DEFAULT PerFORMERS PARAMETERS:-
pad_idx = 0
embedding_out = 512
num_layers = 6
forward_expansion = 4
head = 8
n_features = 256
dropout = 0.1
max_len = 512
inputs = torch.randint(0, 100, (32, 200))
targets = torch.randint(0, 100, (32,100))
model = Performers(
input_vocab_size,
output_vocab_size,
pad_idx,
embedding_out,
num_layers,
forward_expansion,
head,
n_features,
dropout,
max_len
)
start = time()
y = model(inputs, targets)
print(f'INFERENCE TIME = {time() - start}sec')
x = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'NUMBER OF PARAMETERS ARE = {x}') | 30.852507 | 95 | 0.581222 |
9d6fa2ce7adb3f0d8fb6ff64a2befb7535e72eca | 28,970 | py | Python | nogo/gtp_connection.py | douglasrebstock/alpha-zero-general | 2237522be5a1bbfebbc2fc1b2a8e8a6bcb6d5aab | [
"MIT"
] | null | null | null | nogo/gtp_connection.py | douglasrebstock/alpha-zero-general | 2237522be5a1bbfebbc2fc1b2a8e8a6bcb6d5aab | [
"MIT"
] | null | null | null | nogo/gtp_connection.py | douglasrebstock/alpha-zero-general | 2237522be5a1bbfebbc2fc1b2a8e8a6bcb6d5aab | [
"MIT"
] | null | null | null | """
gtp_connection.py
Module for playing games of Go using GoTextProtocol
Parts of this code were originally based on the gtp module
in the Deep-Go project by Isaac Henrion and Amos Storkey
at the University of Edinburgh.
"""
import signal, os
import traceback
from sys import stdin, stdout, stderr
from board_util import GoBoardUtil, BLACK, WHITE, EMPTY, BORDER, PASS, \
MAXSIZE, coord_to_point
import numpy as np
import re
import time
import random
def point_to_coord(point, boardsize):
"""
Transform point given as board array index
to (row, col) coordinate representation.
Special case: PASS is not transformed
"""
if point == PASS:
return PASS
else:
NS = boardsize + 1
return divmod(point, NS)
def format_point(move):
"""
Return move coordinates as a string such as 'a1', or 'pass'.
"""
column_letters = "ABCDEFGHJKLMNOPQRSTUVWXYZ"
#column_letters = "abcdefghjklmnopqrstuvwxyz"
if move == PASS:
return "pass"
row, col = move
if not 0 <= row < MAXSIZE or not 0 <= col < MAXSIZE:
raise ValueError
return column_letters[col - 1]+ str(row)
def move_to_coord(point_str, board_size):
"""
Convert a string point_str representing a point, as specified by GTP,
to a pair of coordinates (row, col) in range 1 .. board_size.
Raises ValueError if point_str is invalid
"""
if not 2 <= board_size <= MAXSIZE:
raise ValueError("board_size out of range")
s = point_str.lower()
if s == "pass":
return PASS
try:
col_c = s[0]
if (not "a" <= col_c <= "z") or col_c == "i":
raise ValueError
col = ord(col_c) - ord("a")
if col_c < "i":
col += 1
row = int(s[1:])
if row < 1:
raise ValueError
except (IndexError, ValueError):
# e.g. "a0"
raise ValueError("wrong coordinate")
if not (col <= board_size and row <= board_size):
# e.g. "a20"
raise ValueError("wrong coordinate")
return row, col
def coord_to_move(move, board_size):
"""
Convert a string point_str representing a point, as specified by GTP,
to a pair of coordinates (row, col) in range 1 .. board_size.
Raises ValueError if point_str is invalid
"""
if not 2 <= board_size <= MAXSIZE:
raise ValueError("board_size out of range")
#s = point_str.lower()
x = move%(board_size+1)
y = move//(board_size+1)
col = chr(x-1 + ord("a"))
#col = col.upper()
return col+str(y)
def color_to_int(c):
"""convert character to the appropriate integer code"""
color_to_int = {"b": BLACK , "w": WHITE, "e": EMPTY,
"BORDER": BORDER}
return color_to_int[c]
| 34.736211 | 150 | 0.542975 |
9d70c2235e5fc849eb97316fd49d7acf1fb36a6a | 2,634 | py | Python | seamless/highlevel/SubCell.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 15 | 2017-06-07T12:49:12.000Z | 2020-07-25T18:06:04.000Z | seamless/highlevel/SubCell.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 110 | 2016-06-21T23:20:44.000Z | 2022-02-24T16:15:22.000Z | seamless/highlevel/SubCell.py | sjdv1982/seamless | 1b814341e74a56333c163f10e6f6ceab508b7df9 | [
"MIT"
] | 6 | 2016-06-21T11:19:22.000Z | 2019-01-21T13:45:39.000Z | import weakref
from .Cell import Cell
def _set_observers(self):
pass
def __str__(self):
return "Seamless SubCell: %s" % ".".join(self._path)
| 30.988235 | 96 | 0.602885 |
9d70ca280f4f08aef01023da8fb208958fa5803b | 460 | py | Python | colos/sfbx/__init__.py | asmodehn/colos | 8894c3a758489b639638ba9aa9c83f7d621648eb | [
"MIT"
] | null | null | null | colos/sfbx/__init__.py | asmodehn/colos | 8894c3a758489b639638ba9aa9c83f7d621648eb | [
"MIT"
] | 4 | 2018-04-11T09:13:05.000Z | 2018-04-11T09:28:18.000Z | colos/sfbx/__init__.py | asmodehn/colos | 8894c3a758489b639638ba9aa9c83f7d621648eb | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# The aim of this package is to :
#- guarantee protected code execution is safe and *will* happen (eventually)
#- report usage via colosstat
# - recover when code fails ( possibly recording previous state, for example )
# one possibility is to implement another levelof abstraction ( like a language - cstk aim )
# another is to just isolate portions of python code with postconditions to guarantee success...
| 41.818182 | 96 | 0.741304 |
9d71192a0442b7eef7acad0763b92e91ecac841f | 965 | py | Python | plugins/help.py | A0vanc01/Frisky | d4d7f9892858b5412755c9dee594e5b60b6d2b94 | [
"MIT"
] | 5 | 2020-01-22T18:16:59.000Z | 2021-06-14T13:23:57.000Z | plugins/help.py | A0vanc01/Frisky | d4d7f9892858b5412755c9dee594e5b60b6d2b94 | [
"MIT"
] | 104 | 2020-02-12T00:36:14.000Z | 2022-02-10T08:18:28.000Z | plugins/help.py | A0vanc01/Frisky | d4d7f9892858b5412755c9dee594e5b60b6d2b94 | [
"MIT"
] | 4 | 2020-01-30T15:44:04.000Z | 2020-08-27T19:22:57.000Z | from frisky.events import MessageEvent
from frisky.plugin import FriskyPlugin, PluginRepositoryMixin
from frisky.responses import FriskyResponse
| 40.208333 | 96 | 0.643523 |
9d712c380762c48dece9d6503dff8952414ca037 | 1,663 | py | Python | cadnano/tests/testgroup.py | mctrinh/cadnano2.5 | d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736 | [
"BSD-3-Clause"
] | 69 | 2015-01-13T02:54:40.000Z | 2022-03-27T14:25:51.000Z | cadnano/tests/testgroup.py | mctrinh/cadnano2.5 | d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736 | [
"BSD-3-Clause"
] | 127 | 2015-01-01T06:26:34.000Z | 2022-03-02T12:48:05.000Z | cadnano/tests/testgroup.py | mctrinh/cadnano2.5 | d8254f24eef5fd77b4fb2b1a9642a8eea2e3c736 | [
"BSD-3-Clause"
] | 48 | 2015-01-22T19:57:49.000Z | 2022-03-27T14:27:53.000Z | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QGraphicsItem, QGraphicsRectItem, QGraphicsItemGroup
from PyQt5.QtCore import pyqtSlot
# end class
def testItemChangeRegression():
"""Make sure PyQt5 handles QGraphicsItem.itemChange correctly
as there was a regression in PyQt5 v 5.6 that was fixed in v 5.7
"""
a = MyRectItemNOIC()
b = MyRectItem(a)
item_group = MyItemGroup()
assert b.parentItem() is a
assert a.childItems()[0] is b
item_group.addToGroup(b)
assert item_group.childItems()[0] is b
assert b.parentItem() is item_group
e = MyRectItem()
c = MyRectItemNOIC(e)
assert c.parentItem() is e
item_group.addToGroup(c)
assert c.parentItem() is item_group
# end def
| 26.822581 | 80 | 0.683103 |
9d71751143901cbe72d8513a42c3b74da3d29bf0 | 998 | py | Python | composer/models/ssd/ssd_hparams.py | anisehsani/composer | 42599682d50409b4a4eb7c91fad85d67418cee13 | [
"Apache-2.0"
] | null | null | null | composer/models/ssd/ssd_hparams.py | anisehsani/composer | 42599682d50409b4a4eb7c91fad85d67418cee13 | [
"Apache-2.0"
] | null | null | null | composer/models/ssd/ssd_hparams.py | anisehsani/composer | 42599682d50409b4a4eb7c91fad85d67418cee13 | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 MosaicML. All Rights Reserved.
from dataclasses import dataclass
import yahp as hp
from composer.models.model_hparams import ModelHparams
| 22.681818 | 55 | 0.617234 |
9d73808fab2e4c633d3b7d43187bc4821f1bfb77 | 1,303 | py | Python | src/lib/base_dataset.py | CvHadesSun/Camera-Calibration | 5c054672749aa0b3be1bdff8b8f4f3d2fcf3ee85 | [
"MIT"
] | null | null | null | src/lib/base_dataset.py | CvHadesSun/Camera-Calibration | 5c054672749aa0b3be1bdff8b8f4f3d2fcf3ee85 | [
"MIT"
] | null | null | null | src/lib/base_dataset.py | CvHadesSun/Camera-Calibration | 5c054672749aa0b3be1bdff8b8f4f3d2fcf3ee85 | [
"MIT"
] | null | null | null | from os.path import join
from utils import getFileList | 40.71875 | 80 | 0.61934 |
9d73d6f049758b5497d67b41cd027577eaf0250d | 1,704 | py | Python | main.py | sunkr1995/genetic-drawing | 6e5cc755a55c1994770c3f18fb14f1cc651bb700 | [
"MIT"
] | null | null | null | main.py | sunkr1995/genetic-drawing | 6e5cc755a55c1994770c3f18fb14f1cc651bb700 | [
"MIT"
] | null | null | null | main.py | sunkr1995/genetic-drawing | 6e5cc755a55c1994770c3f18fb14f1cc651bb700 | [
"MIT"
] | null | null | null | '''
Author: your name
Date: 2021-06-18 10:13:00
LastEditTime: 2021-07-08 14:13:07
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: /genetic-drawing/main.py
'''
import cv2
import os
import time
from IPython.display import clear_output
from genetic_drawing import *
gen = GeneticDrawing('03.jpg', seed=time.time())
out = gen.generate(400, 50)
brushesRange = np.array([[0.1, 0.3], [0.3, 0.7]])
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
try:
for i in range(5):
brushesRange_tmp = brushesRange/(2**(i+1))
gen.brushesRange = brushesRange_tmp.tolist()
maskname = "masks-03/mask-{}.jpg".format(i)
gen.sampling_mask = cv2.cvtColor(cv2.imread(maskname), cv2.COLOR_BGR2GRAY)
#keep drawing on top of our previous result
out = gen.generate(100, 30)
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
except:
if not os.path.exists('out'):
os.mkdir("out")
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i])
#brushesRange_tmp = brushesRange/100
#gen.brushesRange = brushesRange_tmp.tolist()
##gen.brushesRange = [[0.005, 0.015],[0.015, 0.035]]
#gen.sampling_mask = cv2.cvtColor(cv2.imread("masks/mask-end.jpg"), cv2.COLOR_BGR2GRAY)
#
##keep drawing on top of our previous result
#out = gen.generate(50, 30)
#save all the images from the image buffer
if not os.path.exists('out'):
os.mkdir("out")
for i in range(len(gen.imgBuffer)):
cv2.imwrite(os.path.join("out", f"{i:06d}.png"), gen.imgBuffer[i]) | 34.08 | 87 | 0.669601 |
9d740fa3ec721433e495424e2743d9af67d910eb | 10,991 | py | Python | flair/models/sandbox/simple_sequence_tagger_model.py | bratao/flair | 67b53cc2a615a2e2a4e552d6f787c2efa708a939 | [
"MIT"
] | null | null | null | flair/models/sandbox/simple_sequence_tagger_model.py | bratao/flair | 67b53cc2a615a2e2a4e552d6f787c2efa708a939 | [
"MIT"
] | null | null | null | flair/models/sandbox/simple_sequence_tagger_model.py | bratao/flair | 67b53cc2a615a2e2a4e552d6f787c2efa708a939 | [
"MIT"
] | null | null | null | import logging
from typing import List, Union, Optional
import torch
import torch.nn
import torch.nn.functional as F
from tqdm import tqdm
import flair.nn
from flair.data import Dictionary, Sentence, Label
from flair.datasets import SentenceDataset, DataLoader
from flair.embeddings import TokenEmbeddings
from flair.training_utils import store_embeddings
log = logging.getLogger("flair")
| 35.569579 | 111 | 0.592849 |
9d7508b796c963b53ae0eb9f9680e4518db45e86 | 1,708 | py | Python | exercise/xiaohuar/spider-xiaohuar.com.py | PorYoung/bigData-camp-8d | 8fa31b48065da27fd1c4f8432232342cede6f56c | [
"MIT"
] | 1 | 2019-12-27T06:34:06.000Z | 2019-12-27T06:34:06.000Z | exercise/xiaohuar/spider-xiaohuar.com.py | PorYoung/bigData-camp-8d | 8fa31b48065da27fd1c4f8432232342cede6f56c | [
"MIT"
] | 1 | 2021-12-14T20:40:06.000Z | 2021-12-14T20:40:06.000Z | exercise/xiaohuar/spider-xiaohuar.com.py | PorYoung/bigData-camp-8d | 8fa31b48065da27fd1c4f8432232342cede6f56c | [
"MIT"
] | null | null | null | import requests
from bs4 import BeautifulSoup
url = 'http://xiaohuar.com/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
spider_xiaohuar_content(url, headers) | 38.818182 | 136 | 0.538056 |
9d75c627939ebcaa3bf24644789f819936e04c59 | 749 | py | Python | v1.1/auc_csv_merge.py | lz-pku-1997/so-many-tricks-for-Image-classification | 3df7a0672f88219f893b0fa23c31ae6b30d01264 | [
"MIT"
] | 2 | 2020-04-21T06:06:28.000Z | 2020-12-27T12:35:57.000Z | v1.1/auc_csv_merge.py | lz-pku-1997/so-many-tricks-for-Image-classification | 3df7a0672f88219f893b0fa23c31ae6b30d01264 | [
"MIT"
] | null | null | null | v1.1/auc_csv_merge.py | lz-pku-1997/so-many-tricks-for-Image-classification | 3df7a0672f88219f893b0fa23c31ae6b30d01264 | [
"MIT"
] | null | null | null | #csv
import glob
import pandas as pd
import numpy as np
io = glob.glob(r"*.csv")
len_io=len(io)
print('',len_io)
prob_list=[]
for i in range(len_io):
sub_1 = pd.read_csv(io[i])
denominator=len(sub_1)
for my_classes in ['healthy','multiple_diseases','rust','scab']:
sub_label_1 = sub_1.loc[:, my_classes].values
sort_1=np.argsort(sub_label_1)
for i,temp_sort in enumerate(sort_1):
sub_label_1[temp_sort]=i/denominator
sub_1.loc[:,my_classes]=sub_label_1
prob_list.append(sub_1.loc[:,'healthy':].values)
sub_1.loc[:,'healthy':] = np.mean(prob_list,axis =0)
sub_1.to_csv('out/submission.csv', index=False)
print(sub_1.head()) | 31.208333 | 69 | 0.663551 |
9d76b727796967801234a59f7efe009b01c9e636 | 10,468 | py | Python | masakari-7.0.0/masakari/objects/base.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | masakari-7.0.0/masakari/objects/base.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | masakari-7.0.0/masakari/objects/base.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2016 NTT Data.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Masakari common internal object model"""
import datetime
from oslo_utils import versionutils
from oslo_versionedobjects import base as ovoo_base
from oslo_versionedobjects import fields as obj_fields
from masakari import objects
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_obj_' + name
remotable_classmethod = ovoo_base.remotable_classmethod
remotable = ovoo_base.remotable
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The MasakariObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A MasakariObject becomes a dict, and anything that implements
ObjectListBase becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, MasakariObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
else:
return obj
def obj_equal_prims(obj_1, obj_2, ignore=None):
"""Compare two primitives for equivalence ignoring some keys.
This operation tests the primitives of two objects for equivalence.
Object primitives may contain a list identifying fields that have been
changed - this is ignored in the comparison. The ignore parameter lists
any other keys to be ignored.
:param:obj1: The first object in the comparison
:param:obj2: The second object in the comparison
:param:ignore: A list of fields to ignore
:returns: True if the primitives are equal ignoring changes
and specified fields, otherwise False.
"""
if ignore is not None:
keys = ['masakari_object.changes'] + ignore
else:
keys = ['masakari_object.changes']
prim_1 = _strip(obj_1.obj_to_primitive(), keys)
prim_2 = _strip(obj_2.obj_to_primitive(), keys)
return prim_1 == prim_2
| 35.364865 | 79 | 0.664215 |
9d7a01fbe97c35ca79d4cd01911da8cd9570eceb | 53 | py | Python | malaya/text/bahasa/news.py | ebiggerr/malaya | be757c793895522f80b929fe82353d90762f7fff | [
"MIT"
] | 88 | 2021-01-06T10:01:31.000Z | 2022-03-30T17:34:09.000Z | malaya/text/bahasa/news.py | zulkiflizaki/malaya | 2358081bfa43aad57d9415a99f64c68f615d0cc4 | [
"MIT"
] | 43 | 2021-01-14T02:44:41.000Z | 2022-03-31T19:47:42.000Z | malaya/text/bahasa/news.py | zulkiflizaki/malaya | 2358081bfa43aad57d9415a99f64c68f615d0cc4 | [
"MIT"
] | 38 | 2021-01-06T07:15:03.000Z | 2022-03-19T05:07:50.000Z | news = ['klik untuk membaca', 'klik untuk maklumat']
| 26.5 | 52 | 0.698113 |
9d7a0f0018ec32fb50d147552cd1d3e28431140d | 306 | py | Python | sonosscripts/modules.py | RobinDeBaets/SonosScripts | e3a4f27259d9881ebdc3176069e7fe428f88c244 | [
"WTFPL"
] | null | null | null | sonosscripts/modules.py | RobinDeBaets/SonosScripts | e3a4f27259d9881ebdc3176069e7fe428f88c244 | [
"WTFPL"
] | 1 | 2019-11-21T20:22:01.000Z | 2019-11-21T20:22:01.000Z | sonosscripts/modules.py | RobinDeBaets/SonosScripts | e3a4f27259d9881ebdc3176069e7fe428f88c244 | [
"WTFPL"
] | 1 | 2020-08-01T18:02:21.000Z | 2020-08-01T18:02:21.000Z |
from sonosscripts import stop, play_pause, previous, next, change_bass, change_volume, mute_volume
modules = {
"stop": stop,
"play_pause": play_pause,
"previous": previous,
"next": next,
"change_bass": change_bass,
"change_volume": change_volume,
"mute_volume": mute_volume
}
| 23.538462 | 98 | 0.69281 |
9d7ad5477f4bf8f12192323e1ee2103954aa57db | 3,925 | py | Python | twitter_bot/MyBot.py | diem-ai/datascience-projects | deef93217bd3b0cfc2ca7802933142d1dad7fcba | [
"MIT"
] | null | null | null | twitter_bot/MyBot.py | diem-ai/datascience-projects | deef93217bd3b0cfc2ca7802933142d1dad7fcba | [
"MIT"
] | null | null | null | twitter_bot/MyBot.py | diem-ai/datascience-projects | deef93217bd3b0cfc2ca7802933142d1dad7fcba | [
"MIT"
] | null | null | null | """
Class SaleBot
It is initialised by nlp model (bag-of-word, tf-idf, word2vec)
It returns response with a question as the input
"""
from gensim.corpora import Dictionary
#from gensim.models import FastText
from gensim.models import Word2Vec , WordEmbeddingSimilarityIndex
from gensim.similarities import SoftCosineSimilarity, SparseTermSimilarityMatrix
from gensim.models import TfidfModel
from multiprocessing import cpu_count
from nlp_helper import preprocessing
if __name__ == "__main__":
print("I'm a bot") | 37.380952 | 105 | 0.592866 |
9d7c94008fdd0c290d0ad7ba8082f2beff2eb070 | 2,452 | py | Python | Tensorflow_2X_PythonFiles/demo123_convolution_visualization.py | mahnooranjum/Tensorflow_DeepLearning | 65ab178d4c17efad01de827062d5c85bdfb9b1ca | [
"MIT"
] | null | null | null | Tensorflow_2X_PythonFiles/demo123_convolution_visualization.py | mahnooranjum/Tensorflow_DeepLearning | 65ab178d4c17efad01de827062d5c85bdfb9b1ca | [
"MIT"
] | null | null | null | Tensorflow_2X_PythonFiles/demo123_convolution_visualization.py | mahnooranjum/Tensorflow_DeepLearning | 65ab178d4c17efad01de827062d5c85bdfb9b1ca | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Demo123_Convolution_Visualization.ipynb
# **Spit some [tensor] flow**
We need to learn the intricacies of tensorflow to master deep learning
`Let's get this over with`
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
print(tf.__version__)
"""## Reference MachineLearningMastery.com"""
from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv2D
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import SGD, Adam
from glob import glob
import sys, os
import cv2
!wget https://www.theluxecafe.com/wp-content/uploads/2014/07/ferrari-spider-indian-theluxecafe.jpg
!ls
X = cv2.imread('ferrari-spider-indian-theluxecafe.jpg')
X = cv2.cvtColor(X, cv2.COLOR_BGR2RGB)
plt.imshow(X)
print(X.shape)
IMAGE_SIZE = X.shape
X = np.expand_dims(X, axis=0)
print(X.shape)
y = np.ndarray([1])
print(y.shape)
i_layer = Input(shape = IMAGE_SIZE)
h_layer = Conv2D(8, (3,3), strides = 1, activation='relu', padding='same')(i_layer)
h_layer = Flatten()(h_layer)
o_layer = Dense(1, activation='sigmoid')(h_layer)
model = Model(i_layer, o_layer)
model.summary()
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
report = model.fit(X, y, epochs = 10)
model.layers
conv_layer = model.layers[1]
print(conv_layer)
filters, biases = conv_layer.get_weights()
print(conv_layer.name, filters.shape)
f_min, f_max = filters.min(), filters.max()
filters = (filters - f_min) / (f_max - f_min)
plt.figure(figsize=(20,10))
n_filters, idx = 8, 1
for i in range(n_filters):
# get filter
f = filters[:, :, :, i]
for j in range(3):
ax = plt.subplot(n_filters, 3, idx)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(f[:, :, j], cmap='gray')
idx += 1
plt.show()
model_visual = Model(inputs=model.inputs, outputs=conv_layer.output)
model_visual.summary()
maps = model_visual(X)
print(maps.shape)
plt.figure(figsize=(20,10))
square = 4
idx = 1
for _ in range(square):
for _ in range(square):
if (idx > square * 2):
break
# specify subplot and turn of axis
ax = plt.subplot(square, square, idx)
ax.set_xticks([])
ax.set_yticks([])
plt.imshow(maps[0, :, :, idx-1], cmap='gray')
idx += 1
plt.show()
maps.shape[3]
for i in range(maps.shape[3]):
ax = plt.subplot()
plt.imshow(maps[0, :, :, i], cmap='gray')
ax.set_xticks([])
ax.set_yticks([])
plt.show()
| 21.137931 | 98 | 0.69168 |
9d8165f8ce202fddd44b2d3bc70e29ad7d9245a2 | 1,482 | py | Python | hail_scripts/v01/convert_tsv_to_vds.py | NLSVTN/hail-elasticsearch-pipelines | 8b895a2e46a33d347dd2a1024101a6d515027a03 | [
"MIT"
] | null | null | null | hail_scripts/v01/convert_tsv_to_vds.py | NLSVTN/hail-elasticsearch-pipelines | 8b895a2e46a33d347dd2a1024101a6d515027a03 | [
"MIT"
] | null | null | null | hail_scripts/v01/convert_tsv_to_vds.py | NLSVTN/hail-elasticsearch-pipelines | 8b895a2e46a33d347dd2a1024101a6d515027a03 | [
"MIT"
] | null | null | null | import argparse as ap
import hail
from pprint import pprint
import time
from hail_scripts.v01.utils.vds_utils import write_vds
p = ap.ArgumentParser(description="Convert a tsv table to a .vds")
p.add_argument("-c", "--chrom-column", required=True)
p.add_argument("-p", "--pos-column", required=True)
p.add_argument("-r", "--ref-column", required=True)
p.add_argument("-a", "--alt-column", required=True)
p.add_argument("table_path", nargs="+")
args = p.parse_args()
print(", ".join(args.vcf_path))
hc = hail.HailContext(log="./hail_{}.log".format(time.strftime("%y%m%d_%H%M%S")))
for table_path in args.table_path:
print("\n")
print("==> import_table: %s" % table_path)
output_path = table_path.replace(".tsv", "").replace(".gz", "").replace(".bgz", "") + ".vds"
print("==> output: %s" % output_path)
kt = hc.import_table(table_path, impute=True, no_header=args.no_header, delimiter=args.delimiter, missing=args.missing_value, min_partitions=1000)
#kt = kt.drop(columns_to_drop)
#kt = kt.rename(rename_columns)
kt = kt.filter("%(ref_column)s == %(alt_column)s" % args.__dict__, keep=False)
kt = kt.annotate("variant=Variant(%(chrom_column)s, %(pos_column)s, %(ref_column)s, %(alt_column)s)" % args.__dict__)
kt = kt.key_by('variant')
kt = kt.drop([args.chrom_column, args.pos_column, args.ref_column, args.alt_column])
vds = hail.VariantDataset.from_table(kt)
pprint(vds.variant_schema)
write_vds(vds, output_path)
| 36.146341 | 150 | 0.690958 |
9d81808e7a83247fd981f349fc73abe0b9de1e1e | 4,649 | py | Python | scripts/Old/fixSequenceIDs.py | paepcke/json_to_relation | acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9 | [
"BSD-3-Clause"
] | 4 | 2015-10-10T19:09:49.000Z | 2021-09-02T00:58:06.000Z | scripts/Old/fixSequenceIDs.py | paepcke/json_to_relation | acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9 | [
"BSD-3-Clause"
] | null | null | null | scripts/Old/fixSequenceIDs.py | paepcke/json_to_relation | acfa58d540f8f51d1d913d0c173ee3ded1b6c2a9 | [
"BSD-3-Clause"
] | 8 | 2015-05-16T14:33:33.000Z | 2019-10-24T08:56:25.000Z | #!/usr/bin/env python
# Copyright (c) 2014, Stanford University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on Dec 22, 2013
@author: paepcke
'''
import os
import re
import sys
from edxTrackLogJSONParser import EdXTrackLogJSONParser
from modulestoreImporter import ModulestoreImporter
from unidecode import unidecode
idExtractPat = re.compile(r'^"([^"]*)')
seqIDExtractPat = re.compile(r'","([^"]*)')
hashLookup = ModulestoreImporter(os.path.join(os.path.dirname(__file__),'data/modulestore_latest.json'),
useCache=True)
def makeInsertSafe(unsafeStr):
'''
Makes the given string safe for use as a value in a MySQL INSERT
statement. Looks for embedded CR or LFs, and turns them into
semicolons. Escapes commas and single quotes. Backslash is
replaced by double backslash. This is needed for unicode, like
\0245 (invented example)
@param unsafeStr: string that possibly contains unsafe chars
@type unsafeStr: String
@return: same string, with unsafe chars properly replaced or escaped
@rtype: String
'''
#return unsafeStr.replace("'", "\\'").replace('\n', "; ").replace('\r', "; ").replace(',', "\\,").replace('\\', '\\\\')
if unsafeStr is None or not isinstance(unsafeStr, basestring) or len(unsafeStr) == 0:
return ''
# Check for chars > 128 (illegal for standard ASCII):
for oneChar in unsafeStr:
if ord(oneChar) > 128:
# unidecode() replaces unicode with approximations.
# I tried all sorts of escapes, and nothing worked
# for all cases, except this:
unsafeStr = unidecode(unicode(unsafeStr))
break
return unsafeStr.replace('\n', "; ").replace('\r', "; ").replace('\\', '').replace("'", r"\'")
if __name__ == '__main__':
fixSequencIDs()
#INSERT INTO EdxTrackEvent (_id,long_answer) VALUES ('fbcefe06_fb7c_48aa_a12e_d85e6988dbda','first answer'),('bbd3ddf3_8ed0_4eee_8ff7_f5791b9e4a7e','second answer') ON DUPLICATE KEY UPDATE long_answer=VALUES(long_answer);
| 54.05814 | 757 | 0.687245 |
9d818b86a7daa5558c49d73a26208235e0d52b89 | 8,433 | py | Python | tests/test_logger_device.py | ska-telescope/lmc-base-classes | e3ac46a731aca4d49d53747b4352ec4be089ff5d | [
"BSD-3-Clause"
] | 3 | 2019-04-18T20:46:02.000Z | 2019-07-30T17:47:40.000Z | tests/test_logger_device.py | ska-telescope/lmc-base-classes | e3ac46a731aca4d49d53747b4352ec4be089ff5d | [
"BSD-3-Clause"
] | 26 | 2018-10-30T07:50:50.000Z | 2020-07-13T12:50:36.000Z | tests/test_logger_device.py | ska-telescope/lmc-base-classes | e3ac46a731aca4d49d53747b4352ec4be089ff5d | [
"BSD-3-Clause"
] | 4 | 2019-01-16T07:47:59.000Z | 2021-06-01T11:17:32.000Z | #########################################################################################
# -*- coding: utf-8 -*-
#
# This file is part of the SKALogger project
#
#
#
#########################################################################################
"""Contain the tests for the SKALogger."""
import re
import pytest
from tango import DevState
from tango.test_context import MultiDeviceTestContext
from ska_tango_base.base import ReferenceBaseComponentManager
from ska_tango_base.logger_device import SKALogger
from ska_tango_base.subarray import SKASubarray
import tango
# PROTECTED REGION ID(SKALogger.test_additional_imports) ENABLED START #
from ska_tango_base.control_model import (
AdminMode,
ControlMode,
HealthState,
LoggingLevel,
SimulationMode,
TestMode,
)
# PROTECTED REGION END # // SKALogger.test_additional_imports
# PROTECTED REGION ID(SKALogger.test_SKALogger_decorators) ENABLED START #
| 44.856383 | 94 | 0.681727 |
9d83b4f58893d59845ef72aeb0870f92b39fa121 | 2,053 | py | Python | baseline/find_pairs.py | parallelcrawl/DataCollection | 4308473e6b53779159a15c1416bff3f2291dd1f2 | [
"Apache-2.0"
] | 8 | 2018-02-08T16:03:00.000Z | 2022-01-19T11:41:38.000Z | baseline/find_pairs.py | christianbuck/CorpusMining | f9248c3528a415a1e5af2c5a54a60c16cd79ff1d | [
"Apache-2.0"
] | 3 | 2017-08-08T10:53:29.000Z | 2017-08-08T10:58:51.000Z | baseline/find_pairs.py | parallelcrawl/DataCollection | 4308473e6b53779159a15c1416bff3f2291dd1f2 | [
"Apache-2.0"
] | 4 | 2018-06-09T21:53:09.000Z | 2022-01-19T11:41:48.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import urlparse
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
buffer = []
buffer_url = None
for line in sys.stdin:
# line = line.decode("utf-8", "ignore")
url = line.split("\t", 1)[0]
if url != buffer_url:
process_buffer(buffer)
buffer = [line]
buffer_url = url
else:
buffer.append(line)
# print url != buffer_url
process_buffer(buffer)
| 31.106061 | 77 | 0.580614 |
9d84b7b6381a6f3c016023bcfd74caa6a922fa9b | 625 | py | Python | tests/test_jupyter_integration.py | boeddeker/graphviz | acf79bca4518781cad02c102e89ec4e9ce757088 | [
"MIT"
] | null | null | null | tests/test_jupyter_integration.py | boeddeker/graphviz | acf79bca4518781cad02c102e89ec4e9ce757088 | [
"MIT"
] | null | null | null | tests/test_jupyter_integration.py | boeddeker/graphviz | acf79bca4518781cad02c102e89ec4e9ce757088 | [
"MIT"
] | null | null | null | import pytest
from graphviz import jupyter_integration
| 32.894737 | 78 | 0.808 |
9d872c11430e2faa3e970e4a406f2f735e7a91bc | 122 | py | Python | gaussianmean.py | rjw57/fear-python-example | b95440fff6471d2555dce63ed8b26a0a7c8d2ed1 | [
"MIT"
] | 1 | 2016-06-27T08:28:23.000Z | 2016-06-27T08:28:23.000Z | gaussianmean.py | rjw57/fear-python-example | b95440fff6471d2555dce63ed8b26a0a7c8d2ed1 | [
"MIT"
] | null | null | null | gaussianmean.py | rjw57/fear-python-example | b95440fff6471d2555dce63ed8b26a0a7c8d2ed1 | [
"MIT"
] | null | null | null | import numpy as np
if __name__ == '__main__':
main()
| 13.555556 | 37 | 0.598361 |
9d874b69262d199893f7832d8c3dfc78745d2cab | 544 | py | Python | sarsa.py | lukaspestalozzi/URLNN-Project2 | 425d3a14f063d91ae4b6183aa866fa074dc1d791 | [
"MIT"
] | null | null | null | sarsa.py | lukaspestalozzi/URLNN-Project2 | 425d3a14f063d91ae4b6183aa866fa074dc1d791 | [
"MIT"
] | null | null | null | sarsa.py | lukaspestalozzi/URLNN-Project2 | 425d3a14f063d91ae4b6183aa866fa074dc1d791 | [
"MIT"
] | null | null | null |
import mountaincar as mc
import numpy as np
from collections import namedtuple
from collections import defaultdict
import matplotlib.pylab as plb
import matplotlib.pyplot as plt
from time import time
State = namedtuple('State', ['x', 'v'])
| 24.727273 | 85 | 0.740809 |
9d87c99f7edc4a51975ce4aad83b2a68eca0165b | 4,931 | py | Python | utils.py | nea23/greek_alphabets_tf-idf | 94094dd6d7383400e0f0a9d4a1b05744dd2f3ba9 | [
"MIT"
] | null | null | null | utils.py | nea23/greek_alphabets_tf-idf | 94094dd6d7383400e0f0a9d4a1b05744dd2f3ba9 | [
"MIT"
] | null | null | null | utils.py | nea23/greek_alphabets_tf-idf | 94094dd6d7383400e0f0a9d4a1b05744dd2f3ba9 | [
"MIT"
] | null | null | null | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
"""
The following functions are used to create an annotated heatmap and they were copied from:
https://matplotlib.org/stable/gallery/images_contours_and_fields/image_annotated_heatmap.html#using-the-helper-function-code-style
"""
def heatmap(data, row_labels, col_labels, ax=None,
**kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Parameters
----------
data
A 2D numpy array of shape (N, M).
row_labels
A list or array of length N with the labels for the rows.
col_labels
A list or array of length M with the labels for the columns.
ax
A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If
not provided, use current axes or create a new one. Optional.
cbar_kw
A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
cbarlabel
The label for the colorbar. Optional.
**kwargs
All other arguments are forwarded to `imshow`.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, **kwargs)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
# ... and label them with the respective list entries.
ax.set_xticklabels(col_labels)
ax.set_yticklabels(row_labels)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False,
labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=-30, ha="right",
rotation_mode="anchor")
# Turn spines off and create white grid.
# ax.spines[:].set_visible(False)
ax.set_xticks(np.arange(data.shape[1]+1)-.5, minor=True)
ax.set_yticks(np.arange(data.shape[0]+1)-.5, minor=True)
ax.grid(which="minor", color="w", linestyle='-', linewidth=3)
ax.tick_params(which="minor", bottom=False, left=False)
return im
def annotate_heatmap(im, data=None, valfmt="{x:.2f}",
textcolors=("black", "white"),
threshold=None, **textkw):
"""
A function to annotate a heatmap.
Parameters
----------
im
The AxesImage to be labeled.
data
Data used to annotate. If None, the image's data is used. Optional.
valfmt
The format of the annotations inside the heatmap. This should either
use the string format method, e.g. "$ {x:.2f}", or be a
`matplotlib.ticker.Formatter`. Optional.
textcolors
A pair of colors. The first is used for values below a threshold,
the second for those above. Optional.
threshold
Value in data units according to which the colors from textcolors are
applied. If None (the default) uses the middle of the colormap as
separation. Optional.
**kwargs
All other arguments are forwarded to each call to `text` used to create
the text labels.
"""
if not isinstance(data, (list, np.ndarray)):
data = im.get_array()
# Normalize the threshold to the images color range.
if threshold is not None:
threshold = im.norm(threshold)
else:
threshold = im.norm(data.max())/2.
# Set default alignment to center, but allow it to be
# overwritten by textkw.
kw = dict(horizontalalignment="center",
verticalalignment="center")
kw.update(textkw)
# Get the formatter in case a string is supplied
if isinstance(valfmt, str):
valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)
# Loop over the data and create a `Text` for each "pixel".
# Change the text's color depending on the data.
texts = []
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])
text = im.axes.text(j, i, valfmt(data[i, j], None), **kw)
texts.append(text)
return texts
"""
The following functions are used to get the top pairs from a correlation matrix and they were copied from:
https://stackoverflow.com/a/41453817
"""
def get_redundant_pairs(df):
'''Get diagonal and lower triangular pairs of correlation matrix'''
pairs_to_drop = set()
cols = df.columns
for i in range(0, df.shape[1]):
for j in range(0, i+1):
pairs_to_drop.add((cols[i], cols[j]))
return pairs_to_drop | 34.725352 | 131 | 0.651592 |
9d87fe4b4c7aa76322c36b84c9220f5fee728c3d | 6,675 | py | Python | built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/src/launch.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/src/launch.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | built-in/MindSpore/Official/cv/detection/CenterFace_for_MindSpore/src/launch.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""auto generate rank table and export envs"""
import sys
import subprocess
import os
import socket
import json
from argparse import ArgumentParser, REMAINDER
if __name__ == "__main__":
main()
| 43.914474 | 315 | 0.61588 |
9d88690768c73f37df5f9308e7658f80de5bdba2 | 1,475 | py | Python | orange3/Orange/widgets/credentials.py | rgschmitz1/BioDepot-workflow-builder | f74d904eeaf91ec52ec9b703d9fb38e9064e5a66 | [
"MIT"
] | 54 | 2017-01-08T17:21:49.000Z | 2021-11-02T08:46:07.000Z | orange3/Orange/widgets/credentials.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 22 | 2017-03-28T06:03:14.000Z | 2021-07-28T05:43:55.000Z | orange3/Orange/widgets/credentials.py | Synthia-3/BioDepot-workflow-builder | 4ee93abe2d79465755e82a145af3b6a6e1e79fd4 | [
"MIT"
] | 21 | 2017-01-26T21:12:09.000Z | 2022-01-31T21:34:59.000Z | import logging
import keyring
SERVICE_NAME = "Orange3 - {}"
log = logging.getLogger(__name__)
| 27.830189 | 88 | 0.614237 |
9d886ff7c8fb1d674ed9db521c7c448a657e5fe1 | 3,799 | py | Python | Incident-Response/Tools/cyphon/cyphon/responder/actions/tests/test_models.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 1 | 2021-07-24T17:22:50.000Z | 2021-07-24T17:22:50.000Z | Incident-Response/Tools/cyphon/cyphon/responder/actions/tests/test_models.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-28T03:40:31.000Z | 2022-02-28T03:40:52.000Z | Incident-Response/Tools/cyphon/cyphon/responder/actions/tests/test_models.py | sn0b4ll/Incident-Playbook | cf519f58fcd4255674662b3620ea97c1091c1efb | [
"MIT"
] | 2 | 2022-02-25T08:34:51.000Z | 2022-03-16T17:29:44.000Z | # -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
"""
# standard library
try:
from unittest.mock import Mock, patch
except ImportError:
from mock import Mock, patch
# third party
from django.test import TestCase
# local
import platforms.jira.handlers as jira_module
from responder.actions.models import Action
from tests.fixture_manager import get_fixtures
| 30.886179 | 72 | 0.630692 |
9d8881a2641e3115485a61059c62987f2d27bf5d | 4,805 | py | Python | predictions/lambda/handler.py | aaronshim/alexa-github-today | 4f3e7adffa9bb9f3d63cfc1f4a79f396078c787c | [
"MIT"
] | null | null | null | predictions/lambda/handler.py | aaronshim/alexa-github-today | 4f3e7adffa9bb9f3d63cfc1f4a79f396078c787c | [
"MIT"
] | null | null | null | predictions/lambda/handler.py | aaronshim/alexa-github-today | 4f3e7adffa9bb9f3d63cfc1f4a79f396078c787c | [
"MIT"
] | null | null | null | import json
import requests
from collections import defaultdict
from fuzzywuzzy import process
from random import sample
# Constants
"""
Constants for default responses that do not need any further computation.
"""
DEFAULT_STOP_RESPONSE = 'All right. See you next time!'
DEFAULT_ERROR_MESSAGE = "I'm sorry. I don't know how to do that yet."
DEFAULT_HELP_MESSAGE = "Try asking me about prediction markets. Ask me to look up midterm elections."
PREDEFINED_RESPONSES = {
'AMAZON.FallbackIntent': "I couldn't understand what you were asking. Why don't you ask me about elections?",
'AMAZON.CancelIntent': DEFAULT_STOP_RESPONSE,
'AMAZON.HelpIntent': DEFAULT_HELP_MESSAGE,
'AMAZON.StopIntent': DEFAULT_STOP_RESPONSE,
'AMAZON.NavigateHomeIntent': DEFAULT_STOP_RESPONSE,
}
"""
To be considered as a match, any other title would have to be within this percentage of the score of the best match.
"""
PERCENTAGE_THRESHOLD = 0.1
# API Helpers
def get_all_markets():
"""
Query the PredictIt API to get all available markets in a dictionary that maps from the name of the market to its ID.
"""
all_markets = requests.request(
'GET', 'https://www.predictit.org/api/marketdata/all/')
all_markets = json.loads(all_markets.content)
return dict((market['name'], market['id']) for market in all_markets['markets'])
def get_market(id):
"""
Query the PredictIt API to get the details of a particular market given the market's ID.
"""
market = requests.request(
'GET', "https://www.predictit.org/api/marketdata/markets/%d" % id)
return json.loads(market.content)
# "UI" Helpers
def market_message(market):
"""
Given the response from `get_market`, generates a message that conveys the relevant information of the particular market.
"""
if len(market['contracts']) > 1:
return "%s is too complicated." % market['name']
return "%s is trading at %d percent." % \
(market['name'], market['contracts'][0]['lastTradePrice'] * 100)
def response_from_message(message):
"""
Helper to wrap a message string into the minimum acceptable Alexa response JSON.
"""
return {
'version': '1.0',
'response': {
'outputSpeech': {
'type': 'PlainText',
'text': message,
}
}
}
# Main function
def main(event, context):
"""
Entry point for the Alexa action.
"""
request_type = event['request']['type']
if request_type != 'IntentRequest':
if request_type == 'LaunchRequest':
return response_from_message(DEFAULT_HELP_MESSAGE)
elif request_type == 'CanFulfillIntentRequest':
return can_fulfill(event['request']['intent'])
elif request_type == 'SessionEndedRequest':
return
intent = event['request']['intent']
intent_type = intent['name']
# Get the canned responses out of the way before we do any heavy lifting
# with external API calls.
if intent_type in PREDEFINED_RESPONSES:
return response_from_message(PREDEFINED_RESPONSES[intent_type])
# Sanity check.
if intent_type != 'Query' or 'Market' not in intent['slots']:
return response_from_message(DEFAULT_ERROR_MESSAGE)
keyword = intent['slots']['Market']['value']
markets = get_all_markets()
# Only take the ones that are within percentage threshold of the first
# result. Bucket them by score.
likely_markets = process.extract(keyword, markets.keys(), limit=100)
(_, best_score) = likely_markets[0]
result_markets = defaultdict(list) # Multimap score -> id's
for (name, score) in likely_markets:
if best_score - score <= PERCENTAGE_THRESHOLD * best_score:
result_markets[score].append(markets[name])
# List of market JSON response's.
result_markets = [get_market(id) for id in sum(
[sample(ids, 1) for (_, ids) in result_markets.items()], [])]
return response_from_message(' '.join(market_message(market) for market in result_markets))
| 33.838028 | 125 | 0.624766 |
9d88973447a6fc9a97038839f4db33428c51196b | 12,649 | py | Python | Train.py | prattcmp/speakerembedding | 5ed051261e69aaf7a1306c390b36cedb8da3f095 | [
"MIT"
] | null | null | null | Train.py | prattcmp/speakerembedding | 5ed051261e69aaf7a1306c390b36cedb8da3f095 | [
"MIT"
] | null | null | null | Train.py | prattcmp/speakerembedding | 5ed051261e69aaf7a1306c390b36cedb8da3f095 | [
"MIT"
] | null | null | null | import torch
import numpy as np
import logging, yaml, os, sys, argparse, time
from tqdm import tqdm
from collections import defaultdict
from Logger import Logger
import matplotlib
matplotlib.use('agg')
matplotlib.rcParams['agg.path.chunksize'] = 10000
import matplotlib.pyplot as plt
from scipy.io import wavfile
from random import sample
from sklearn.manifold import TSNE
from Modules import GE2E, GE2E_Loss
from Datasets import Dataset, Collater, Inference_Collater
from Noam_Scheduler import Modified_Noam_Scheduler
from Radam import RAdam
from Arg_Parser import Recursive_Parse
hp = Recursive_Parse(yaml.load(
open('Hyper_Parameters.yaml', encoding='utf-8'),
Loader=yaml.Loader
))
if not hp.Device is None:
os.environ['CUDA_VISIBLE_DEVICES']= str(hp.Device)
if not torch.cuda.is_available():
device = torch.device('cpu')
else:
device = torch.device('cuda:0')
torch.backends.cudnn.benchmark = True
torch.cuda.set_device(0)
logging.basicConfig(
level=logging.INFO, stream=sys.stdout,
format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
)
if hp.Use_Mixed_Precision:
try:
from apex import amp
except:
logging.warn('There is no apex modules in the environment. Mixed precision does not work.')
hp.Use_Mixed_Precision = False
if __name__ == '__main__':
argParser = argparse.ArgumentParser()
argParser.add_argument('-s', '--steps', default= 0, type= int)
args = argParser.parse_args()
new_Trainer = Trainer(steps= args.steps)
new_Trainer.Train() | 35.233983 | 137 | 0.591035 |
9d8b9ee2c96a9f3f72e8c6e40b49a6ccfdc17590 | 2,247 | py | Python | steamstore/client.py | saucesteals/steam.py | b1017f85f23c0eccafc6f35814d2e57cb4aa23e7 | [
"MIT"
] | null | null | null | steamstore/client.py | saucesteals/steam.py | b1017f85f23c0eccafc6f35814d2e57cb4aa23e7 | [
"MIT"
] | null | null | null | steamstore/client.py | saucesteals/steam.py | b1017f85f23c0eccafc6f35814d2e57cb4aa23e7 | [
"MIT"
] | 1 | 2021-04-11T00:38:19.000Z | 2021-04-11T00:38:19.000Z | import logging
import asyncio
import aiohttp
from .defaults import *
from .app import App
from .featured import FeaturedList
log = logging.getLogger(__name__)
| 26.127907 | 122 | 0.587895 |
9d8c97671a23367d026ea52b147ffe064cc2939a | 881 | py | Python | ga/gen_graph.py | k4t0mono/exercicios-ia | 06f76db20f519b8d7e9b5ee2cf5c7a72b21e188c | [
"BSD-3-Clause"
] | 1 | 2018-09-23T15:38:04.000Z | 2018-09-23T15:38:04.000Z | ga/gen_graph.py | k4t0mono/exercicios-ia | 06f76db20f519b8d7e9b5ee2cf5c7a72b21e188c | [
"BSD-3-Clause"
] | null | null | null | ga/gen_graph.py | k4t0mono/exercicios-ia | 06f76db20f519b8d7e9b5ee2cf5c7a72b21e188c | [
"BSD-3-Clause"
] | null | null | null | import sys
import numpy as np
import matplotlib.pyplot as plt
f = open(sys.argv[1], 'r')
lines = f.readlines()
f.close()
pop_size = int(lines.pop(0))
pops = []
for l in lines:
if l[0] == '[':
pops.append(l.strip())
for j in range(len(pops)):
p = []
for n in pops[j][1:-1].split(','):
p.append(int(n))
d = {}
for i in range(-16, 16):
d[i] = 0
for i in p:
d[i] += 1
x = []
y = []
for k in d:
x.append(k)
y.append(d[k])
axes = plt.gca()
axes.set_xlim([-17, 16])
axes.set_ylim([0, pop_size+1])
# plt.scatter(x, y, s=5, c=[(0,0,0)], alpha=0.5)
plt.bar(x, y, 1, color='blue')
plt.title('Population {:03d}'.format(j))
plt.xlabel('x')
plt.ylabel('qnt')
name = 'pop{:03d}.png'.format(j)
plt.savefig(name)
print('saving {}'.format(name))
plt.clf()
| 17.979592 | 52 | 0.506243 |
9d8f0a7d44e8c877c0f58c7e9fe5bd054fd5c40a | 7,486 | py | Python | src/analyses/analyses.py | zahariaa/disentangled-dynamics | 2dbdf9884f6f90ff67073f571191227e7abce81d | [
"MIT"
] | null | null | null | src/analyses/analyses.py | zahariaa/disentangled-dynamics | 2dbdf9884f6f90ff67073f571191227e7abce81d | [
"MIT"
] | null | null | null | src/analyses/analyses.py | zahariaa/disentangled-dynamics | 2dbdf9884f6f90ff67073f571191227e7abce81d | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
analyses for bVAE entanglement, etc
"""
import torch
import sys
sys.path.append("..") # Adds higher directory to python modules path.
import matplotlib.pyplot as plt
import numpy as np
from data.dspritesb import dSpriteBackgroundDataset
from torchvision import transforms
ds = dSpriteBackgroundDataset(transform=transforms.Resize((32,32)),shapetype = 'circle')
# Build sweeps through model ...
def sweepCircleLatents(model,latents=np.linspace(0,1,16),def_latents=None):
"""sweepCircleLatents(model,latents,def_latents):
generates input images that sweep through each latent variable,
and evaluates them on given model
model = loaded model, e.g., vae = staticVAE32(n_latent = 4)
latents = latents to sweep through. defaults to
np.linspace(0,1,16)
def_latents = 'default latents': defines the non-swept latents.
defaults to [0.5,0.5,0.5,0.5] if None
---e.g.,---
yhat, x = sweepCircleLatents(vae)
"""
# Initialization
nsweep = len(latents)
if type(model).__name__ == 'encoderBVAE_like':
n_latent = model.fc.out_features
encoder = model
else:
n_latent = model.n_latent
encoder = model.encode
if def_latents is None:
def_latents = 0.5*np.ones(n_latent)
# Generate stimulus sweeps
x = torch.zeros((n_latent,nsweep,1,32,32))
for i in np.arange(0,nsweep):
x[0,i,:,:,:] = ds.arbitraryCircle(latents[i],def_latents[1],def_latents[2],def_latents[3])
x[1,i,:,:,:] = ds.arbitraryCircle(def_latents[0],latents[i],def_latents[2],def_latents[3])
x[2,i,:,:,:] = ds.arbitraryCircle(def_latents[0],def_latents[1],latents[i],def_latents[3])
x[3,i,:,:,:] = ds.arbitraryCircle(def_latents[0],def_latents[1],def_latents[2],latents[i])
# ... and evaulate them all at once
yhat = encoder(x)
if not (type(model).__name__ == 'encoderBVAE_like' or type(model).__name__ == 'dynamicAE32'):
yhat = yhat[0]
return yhat,x
# Plot sweeps through model
def plotCircleSweep(x=None,nimgs=5):
"""plotCircleSweep(yhat,x):
plots a subset of stimuli,
generated from sweepCircleLatents()
---e.g.,---
yhat, x = sweepCircleLatents(vae)
plotCircleSweep(x)
alternatively,
plotCircleSweep(sweepCircleLatents(vae))
"""
# Initialization
if x is None and type(nimgs) is tuple:
x = yhat[1]
# Start a-plottin'
fig, ax = plt.subplots(nimgs,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k')
for latentdim in range(4):
cnt = -1
for img in np.linspace(0,15,nimgs).astype(int):
cnt+=1
plt.sca(ax[cnt,latentdim])
plt.set_cmap('gray')
ax[cnt,latentdim].imshow(
x[latentdim*16+img,:,:,:].squeeze(), vmin=0, vmax=1)
plt.axis('off')
return fig, ax
def plotLatentsSweep(yhat,nmodels=1):
"""plotLatentsSweep(yhat):
plots model latents and a subset of the corresponding stimuli,
generated from sweepCircleLatents()
---e.g.,---
yhat, x = sweepCircleLatents(vae)
plotCircleSweep(yhat,x)
alternatively,
plotLatentsSweep(sweepCircleLatents(vae))
"""
# Initialization
if type(yhat) is tuple:
yhat = yhat[0]
# Start a-plottin'
fig, ax = plt.subplots(nmodels,4,figsize=(9, 15), dpi= 80, facecolor='w', edgecolor='k', sharey='row',sharex='col')
for latentdim in range(4):
if nmodels > 1:
for imodel in range(nmodels):
plt.sca(ax[imodel,latentdim])
plt.plot(yhat[imodel][latentdim*16+np.arange(0,16),:].detach().numpy())
# ax[imodel,latentdim].set_aspect(1./ax[imodel,latentdim].get_data_ratio())
ax[imodel,latentdim].spines['top'].set_visible(False)
ax[imodel,latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[imodel,latentdim].spines['left'].set_visible(False)
# ax[imodel,latentdim].set_yticklabels([])
ax[imodel,latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[imodel,latentdim].spines['bottom'].set_visible(False)
ax[imodel,latentdim].set_xticklabels([])
ax[imodel,latentdim].tick_params(axis='x', length=0)
else:
imodel=0
plt.sca(ax[latentdim])
plt.plot(yhat[latentdim*16+np.arange(0,16),:].detach().numpy())
ax[latentdim].set_aspect(1./ax[latentdim].get_data_ratio())
ax[latentdim].spines['top'].set_visible(False)
ax[latentdim].spines['right'].set_visible(False)
if latentdim>0:
ax[latentdim].spines['left'].set_visible(False)
ax[latentdim].tick_params(axis='y', length=0)
# if imodel<nmodels-1 or latentdim>0:
ax[latentdim].spines['bottom'].set_visible(False)
ax[latentdim].set_xticklabels([])
ax[latentdim].tick_params(axis='x', length=0)
return fig, ax
def colorAxisNormalize(colorbar):
"""colorAxisNormalize(colorbar):
normalizes a color axis so it is centered on zero.
useful for diverging colormaps
(e.g., cmap='bwr': blue=negative, red=positive, white=0)
input is already initialized colorbar object from a plot
---e.g.,---
corr_vae = np.corrcoef(yhat_vae.detach().numpy().T)
plt.set_cmap('bwr')
plt.imshow(corr_vae)
cb = plt.colorbar()
colorAxisNormalize(cb)
---or---
colorAxisNormalize(plt.colorbar())
"""
cm = np.max(np.abs(colorbar.get_clim()))
colorbar.set_clim(-cm,cm)
def showReconstructionsAndErrors(model):
"""showReconstructionsAndErrors(model):
generates random inputs, runs them through a specified model
to generate their reconstructions. plots the inputs,
reconstructions, and their difference
---e.g.---
from staticvae.models import staticVAE32
vae = staticVAE32(n_latent = 4)
vae.eval()
checkpoint = torch.load('../staticvae/trained/staticvae32_dsprites_circle_last_500K',map_location='cpu')
vae.load_state_dict(checkpoint['model_states']['net'])
showReconstructionsAndErrors(model)
"""
fig=plt.figure(figsize=(18, 16), dpi= 80, facecolor='w',
edgecolor='k')
cnt = 0
for ii in range(12):
x,label = ds[np.random.randint(1000)]
x = x[np.newaxis, :, :]
mu,logvar = model.encode(x.float())
recon = model.decode(mu).detach()
diff = x - recon
cnt += 1
ax = plt.subplot(6,6,cnt)
plt.set_cmap('gray')
ax.imshow(x.squeeze(), vmin=0, vmax=1)
plt.title('true')
plt.axis('off')
cnt += 1
ax = plt.subplot(6,6,cnt)
ax.imshow(recon.squeeze(), vmin=0, vmax=1)
plt.title('recon')
plt.axis('off')
cnt += 1
ax = plt.subplot(6,6,cnt)
plt.set_cmap('bwr')
img = ax.imshow(diff.numpy().squeeze())
colorAxisNormalize(fig.colorbar(img))
plt.title('diff')
plt.axis('off')
| 36.339806 | 119 | 0.593909 |
9d9030a3ab27bda98f5076efe7e1d4f4d61c1b31 | 2,684 | py | Python | Chapter_BestPractices/Centering_Scaling.py | ML-PSE/Machine_Learning_for_PSE | b53578d7cc0e0eca4907527b188a60de06d6710e | [
"Apache-2.0"
] | 2 | 2022-02-20T18:57:46.000Z | 2022-03-03T07:07:12.000Z | Chapter_BestPractices/Centering_Scaling.py | ML-PSE/Machine_Learning_for_PSE | b53578d7cc0e0eca4907527b188a60de06d6710e | [
"Apache-2.0"
] | null | null | null | Chapter_BestPractices/Centering_Scaling.py | ML-PSE/Machine_Learning_for_PSE | b53578d7cc0e0eca4907527b188a60de06d6710e | [
"Apache-2.0"
] | null | null | null | ##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Centering & Scaling
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% Standard scaling
import numpy as np
from sklearn.preprocessing import StandardScaler
X = np.array([[ 1000, 0.01, 300],
[ 1200, 0.06, 350],
[ 1500, 0.1, 320]])
scaler = StandardScaler().fit(X) # computes mean & std column-wise
X_scaled = scaler.transform(X) # transform using computed mean and std
# check mean = 0 and variance = 1 for every variable/column after scaling
print(X_scaled.mean(axis=0)) # return 1D array of size(3,1)
print(X_scaled.std(axis=0)) # return 1D array of size(3,1)
# access mean and variance via object properties
print(scaler.mean_) # return 1D array of size(3,1)
print(scaler.var_) # return 1D array of size(3,1)
#%% Normalization
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler() # create object
X_scaled = scaler.fit_transform(X) # fit & transform
# check min = 0 and max = 1 for every variable/column after scaling
print(X_scaled.min(axis=0))
print(X_scaled.max(axis=0))
# access min and max via object properties
print(scaler.data_min_)
print(scaler.data_max_)
##%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
## Robust Centering & Scaling
## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%% Generate oulier-infested data
X = np.random.normal(40, 1, (1500,1))
X[200:300] = X[200:300] +8; X[1000:1150] = X[1000:1150] + 8
# plot
import matplotlib.pyplot as plt
plt.plot(X, '.-')
plt.xlabel('sample #'), plt.ylabel('variable measurement')
plt.title('Raw measurements')
#%% Transform via standard scaling
scaler = StandardScaler().fit(X)
X_scaled = scaler.transform(X)
# mean and std
print('Estimated mean = ', scaler.mean_[0])
print('Estimated standard deviation = ', np.sqrt(scaler.var_[0]))
# plot
plt.figure()
plt.plot(X_scaled, '.-')
plt.xlabel('sample #'), plt.ylabel('scaled variable measurement')
plt.xlim((0,1500))
plt.title('Standard scaling')
#%% Transform via robust MAD scaling
# compute median and MAD
from scipy import stats
median = np.median(X)
MAD = stats.median_absolute_deviation(X)
# scale
X_scaled = (X - median)/MAD[0]
# median and MAD
print('Estimated robust location = ', median)
print('Estimated robust spread = ', MAD)
# plot
plt.figure()
plt.plot(X_scaled, '.-')
plt.xlabel('sample #'), plt.ylabel('scaled variable measurement')
plt.xlim((0,1500))
plt.title('Robust MAD scaling')
| 31.209302 | 80 | 0.592399 |
9d9115d7ba282f909762763e4412827f039f107a | 943 | py | Python | pbtaskrunner/models.py | arxcruz/pbtaskrunner | 26aff681593aae0d72520509fd1fbecbc3c8a9a6 | [
"Apache-2.0"
] | null | null | null | pbtaskrunner/models.py | arxcruz/pbtaskrunner | 26aff681593aae0d72520509fd1fbecbc3c8a9a6 | [
"Apache-2.0"
] | null | null | null | pbtaskrunner/models.py | arxcruz/pbtaskrunner | 26aff681593aae0d72520509fd1fbecbc3c8a9a6 | [
"Apache-2.0"
] | null | null | null | from pbtaskrunner import db
from pbtaskrunner import app
from datetime import datetime
| 31.433333 | 64 | 0.710498 |
9d91be2759fba448a3db8257c92c32db569fc6fc | 2,244 | py | Python | web/addons/mass_mailing/models/mass_mailing_report.py | diogocs1/comps | 63df07f6cf21c41e4527c06e2d0499f23f4322e7 | [
"Apache-2.0"
] | 1 | 2019-12-29T11:53:56.000Z | 2019-12-29T11:53:56.000Z | odoo/addons/mass_mailing/models/mass_mailing_report.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | null | null | null | odoo/addons/mass_mailing/models/mass_mailing_report.py | tuanquanghpvn/odoo8-tutorial | 52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e | [
"MIT"
] | 3 | 2020-10-08T14:42:10.000Z | 2022-01-28T14:12:29.000Z | # -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp import tools
| 42.339623 | 101 | 0.572638 |
9d92cc65827cd5fd979d0843a2269e9633857396 | 97 | py | Python | main.py | chengxianga2008/abn_amro | 66172747328b33a591ea4e4fcbb902cb823b91e0 | [
"BSD-2-Clause"
] | null | null | null | main.py | chengxianga2008/abn_amro | 66172747328b33a591ea4e4fcbb902cb823b91e0 | [
"BSD-2-Clause"
] | null | null | null | main.py | chengxianga2008/abn_amro | 66172747328b33a591ea4e4fcbb902cb823b91e0 | [
"BSD-2-Clause"
] | null | null | null | import app
if __name__ == "__main__":
app.daily_summary("data/Input.txt", "data/Output.csv") | 24.25 | 58 | 0.701031 |
9d934505c9a5de277afc3e1a3c4cc83a509daf62 | 2,750 | py | Python | modules/springerlink.py | Christoph-D/paperget | 9887936039ecc9fafe4dcce7988e75e964a05bcd | [
"MIT"
] | 3 | 2016-06-17T15:52:02.000Z | 2017-12-21T02:44:49.000Z | modules/springerlink.py | Christoph-D/paperget | 9887936039ecc9fafe4dcce7988e75e964a05bcd | [
"MIT"
] | null | null | null | modules/springerlink.py | Christoph-D/paperget | 9887936039ecc9fafe4dcce7988e75e964a05bcd | [
"MIT"
] | 1 | 2021-02-16T21:10:33.000Z | 2021-02-16T21:10:33.000Z | import urllib, re
urllib._urlopener = FakeUseragentURLopener()
download_pdf_regex = re.compile('.*<li class="pdf"><a class="sprite pdf-resource-sprite" href="([^"]*)" title="Download PDF.*')
viewstate_regex = re.compile('.*<input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="([^"]*)" />.*')
eventvalidation_regex = re.compile('.*<input type="hidden" name="__EVENTVALIDATION" id="__EVENTVALIDATION" value="([^"]*)" />.*')
import base
base.register_module('http://www\.springerlink\.com/content/.*',
{'name': 'springerlink',
'download_pdf': download_pdf,
'download_bib': download_bib,
})
base.register_module('http://link\.springer\.com/chapter/.*',
{'name': 'springerlink_chapter',
'download_pdf': download_pdf_chapter,
})
| 49.107143 | 129 | 0.651273 |
9d956d3bf237c9754179486589b614a0b07bc05b | 1,533 | py | Python | app/__init__.py | alexander-emelyanov/microblog | f549768b410f1ce70fbfcbcdf89fb945793168e2 | [
"MIT"
] | null | null | null | app/__init__.py | alexander-emelyanov/microblog | f549768b410f1ce70fbfcbcdf89fb945793168e2 | [
"MIT"
] | null | null | null | app/__init__.py | alexander-emelyanov/microblog | f549768b410f1ce70fbfcbcdf89fb945793168e2 | [
"MIT"
] | null | null | null | import os
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.login import LoginManager
from flask.ext.openid import OpenID
from config import basedir, ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, MAIL_PASSWORD, MAIL_SECURE
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
from app import models
lm = LoginManager()
lm.init_app(app)
lm.login_view = 'login'
oid = OpenID(app, os.path.join(basedir, 'tmp'))
from app import views
# Error handling
if not app.debug:
import logging
from logging.handlers import SMTPHandler, RotatingFileHandler
# SMTP based handler configuration
credentials = None
secure = None
if MAIL_USERNAME or MAIL_PASSWORD:
credentials = (MAIL_USERNAME, MAIL_PASSWORD)
if MAIL_SECURE:
secure = MAIL_SECURE
mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT), MAIL_USERNAME, ADMINS, 'Microblog failure', credentials, secure)
mail_handler.setLevel(logging.ERROR)
# File based handler
file_handler = RotatingFileHandler('tmp/microblog.log', 'a', 1 * 1024 * 1024, 10)
file_handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.INFO)
# Set handlers
app.logger.setLevel(logging.INFO)
app.logger.addHandler(mail_handler)
app.logger.addHandler(file_handler)
app.logger.info('Microblog startup') | 26.894737 | 121 | 0.739726 |
9d99ee239305997e26415c20f473a94ad6005845 | 330 | py | Python | PersonalWebApp/Blog/migrations/0002_remove_post_wallpaper_representation.py | CiganOliviu/personal_website | abedf67efc2e7e212c32815f645d3b3709f9f177 | [
"MIT"
] | 1 | 2021-04-02T16:45:56.000Z | 2021-04-02T16:45:56.000Z | PersonalWebApp/Blog/migrations/0002_remove_post_wallpaper_representation.py | CiganOliviu/personal_website | abedf67efc2e7e212c32815f645d3b3709f9f177 | [
"MIT"
] | null | null | null | PersonalWebApp/Blog/migrations/0002_remove_post_wallpaper_representation.py | CiganOliviu/personal_website | abedf67efc2e7e212c32815f645d3b3709f9f177 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-09-03 17:04
from django.db import migrations
| 18.333333 | 47 | 0.593939 |
9d9caa03a4ae2fbdbadf5bfc3fd2600ade753a1b | 3,460 | py | Python | modules/colors.py | trybefore/discordbot | 1ffce8149cde586e8c5883e8200b02937c5a15f6 | [
"MIT"
] | 3 | 2020-09-15T23:19:18.000Z | 2021-02-17T10:24:54.000Z | modules/colors.py | trybefore/discordbot | 1ffce8149cde586e8c5883e8200b02937c5a15f6 | [
"MIT"
] | 3 | 2021-06-22T10:57:14.000Z | 2021-06-22T10:57:15.000Z | modules/colors.py | trybefore/discordbot | 1ffce8149cde586e8c5883e8200b02937c5a15f6 | [
"MIT"
] | 2 | 2020-05-03T20:54:57.000Z | 2020-09-12T18:49:13.000Z | from threading import Lock
import discord
from discord.ext import commands
from loguru import logger
from local_types import Snowflake
from modules import is_bot_admin
| 31.454545 | 161 | 0.57948 |
9d9d2695df7ed5d007311b6af26fc83339dd2f8b | 526 | py | Python | src/test/python/loader_native.py | dlech/xlang | ace2c924cc1fbecd05804866e183124cbb73bd48 | [
"MIT"
] | null | null | null | src/test/python/loader_native.py | dlech/xlang | ace2c924cc1fbecd05804866e183124cbb73bd48 | [
"MIT"
] | null | null | null | src/test/python/loader_native.py | dlech/xlang | ace2c924cc1fbecd05804866e183124cbb73bd48 | [
"MIT"
] | 1 | 2022-01-23T06:01:40.000Z | 2022-01-23T06:01:40.000Z | import sys
sys.path.append("./generated")
sys.path.append("../../package/pywinrt/projection/pywinrt")
import _winrt
_winrt.init_apartment(_winrt.MTA)
| 30.941176 | 82 | 0.747148 |
9d9de2c097d8a8da90ec0340d6b529e57bfc179c | 2,247 | py | Python | src/main/scripts/evalDelly.py | cwhelan/cloudbreak | bcff41d5309cfffb1faffc1d46e3f85007f84981 | [
"MIT"
] | 4 | 2015-02-10T07:10:28.000Z | 2016-09-18T19:29:53.000Z | src/main/scripts/evalDelly.py | cwhelan/cloudbreak | bcff41d5309cfffb1faffc1d46e3f85007f84981 | [
"MIT"
] | null | null | null | src/main/scripts/evalDelly.py | cwhelan/cloudbreak | bcff41d5309cfffb1faffc1d46e3f85007f84981 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import subprocess
import evalBedFile
# Delly file format (when only del summaries in file - cat *.del.txt | grep Deletion)
# The summary line contains the chromosome, the estimated start and end of the structural variant,
# the size of the variant, the number of supporting pairs, the average mapping quality and a unique structural variant id.
# 2 3666033 3666250 217 2 1.5 >Deletion_JCVICHR2SIM_00000053<
delly_filename = sys.argv[1]
truth_filename = sys.argv[2]
score_values = []
print_hits = False
print_bed = False
if len(sys.argv) == 5 and sys.argv[3] == "--printHits":
threshold = float(sys.argv[4])
score_values.append(threshold)
print_hits = True
elif len(sys.argv) == 5 and sys.argv[3] == "--printBed":
threshold = float(sys.argv[4])
score_values.append(threshold)
print_bed = True
else:
delly_file = open(delly_filename, "r")
for line in delly_file:
if line.startswith("#"):
continue
fields = line.split("\t")
# use num pairs as score for now
score = float(fields[4])
score_values.append(score)
delly_file.close()
unique_score_values = list(set(score_values))
unique_score_values.sort()
if not print_hits and not print_bed:
print "\t".join(["Thresh", "Calls", "TP", "WrongType", "Short", "TPR"])
for v in unique_score_values:
calls_gte_threshold = []
delly_file = open(delly_filename, "r")
non_del_calls = 0
for line in delly_file:
if line.startswith("#"):
continue
fields = line.split("\t")
if float(fields[4]) >= v:
chrom = fields[0]
ostart = fields[1]
oend = fields[2]
bed_line = "\t".join([chrom, ostart, oend])
#print bed_line.strip()
calls_gte_threshold.append(bed_line)
if print_bed:
print "\n".join(calls_gte_threshold)
continue
(qualified_calls, matches, short_calls) = evalBedFile.eval_bed_deletions(truth_filename, calls_gte_threshold, print_hits)
tpr = float(matches) / (qualified_calls)
if not print_hits:
print "\t".join(map(str, [v, qualified_calls, matches, non_del_calls, short_calls, tpr]))
| 31.208333 | 125 | 0.652425 |
9d9e064b6bf0f12b09cc360b5115a0ae4d5fbeff | 1,645 | py | Python | examples/basic_dsp_example.py | Camotubi/basic_dsp | 38a380439cc8936c64febbc12227df78d95fce7f | [
"Apache-2.0",
"MIT"
] | 40 | 2015-11-23T02:23:35.000Z | 2022-03-18T11:19:11.000Z | examples/basic_dsp_example.py | Camotubi/basic_dsp | 38a380439cc8936c64febbc12227df78d95fce7f | [
"Apache-2.0",
"MIT"
] | 47 | 2015-11-23T01:58:38.000Z | 2021-01-11T07:53:37.000Z | examples/basic_dsp_example.py | Camotubi/basic_dsp | 38a380439cc8936c64febbc12227df78d95fce7f | [
"Apache-2.0",
"MIT"
] | 9 | 2018-05-19T07:25:26.000Z | 2022-01-09T20:51:40.000Z | import ctypes
import struct
import time
#
# A small example how to use basic_dsp in a different language.
#
lib = ctypes.WinDLL('basic_dsp.dll')
new64Proto = ctypes.WINFUNCTYPE (
ctypes.c_void_p, # Return type.
ctypes.c_int,
ctypes.c_int,
ctypes.c_double,
ctypes.c_ulong,
ctypes.c_double)
new64 = new64Proto (("new64", lib))
getValue64Proto = ctypes.WINFUNCTYPE (
ctypes.c_double, # Return type.
ctypes.c_void_p,
ctypes.c_ulong)
getValue64 = getValue64Proto (("get_value64", lib))
offset64Proto = ctypes.WINFUNCTYPE (
VecResult, # Return type.
ctypes.c_void_p,
ctypes.c_double)
offset64 = offset64Proto (("real_offset64", lib))
vec = new64(
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_double(0.0),
ctypes.c_ulong(100000),
ctypes.c_double(1.0))
val = getValue64(vec, ctypes.c_ulong(0))
print('At the start: vec[0] = {}'.format(val))
start = time.clock()
iterations = 100000
toNs = 1e9 / iterations
increment = 5.0
for x in range(0, iterations):
vecRes = offset64(vec, ctypes.c_double(increment))
vec = vecRes.result
end = time.clock()
print('{} ns per iteration, each iteration has {} samples'.format((end - start) * toNs, iterations))
print('Result code: {} (0 means no error)'.format(vecRes.resultCode))
vecRes = offset64(vec, ctypes.c_double(5.0))
vec = vecRes.result
val = getValue64(vec, ctypes.c_ulong(0))
print('After {} iterations of increment by {}: vec[0] = {}'.format(iterations + 1, increment, val))
| 26.967213 | 100 | 0.677204 |
9da16db4956d4af0439ae0a5ca6c02568b1d609f | 53,171 | py | Python | src/pytris.py | CSID-DGU/2019-2-OSSPC-MDJ-1 | 2987e11b65bc9e31a30cadd39eea4214e2261998 | [
"MIT"
] | 1 | 2019-09-24T04:55:29.000Z | 2019-09-24T04:55:29.000Z | src/pytris.py | CSID-DGU/2019-2-OSSPC-MDJ-1 | 2987e11b65bc9e31a30cadd39eea4214e2261998 | [
"MIT"
] | null | null | null | src/pytris.py | CSID-DGU/2019-2-OSSPC-MDJ-1 | 2987e11b65bc9e31a30cadd39eea4214e2261998 | [
"MIT"
] | 7 | 2019-09-24T05:14:24.000Z | 2019-12-10T04:15:28.000Z | #!/usr/bin/env python
# coding: utf-8
import pygame
import operator
from mino import *
from random import *
from pygame.locals import *
from ui import *
from screeninfo import get_monitors
from pygame.surface import Surface
import sys
from function import *
#
screen_width = 0
screen_height = 0
for m in get_monitors():
screen_width = int(m.width*0.7)
screen_height = int(m.height*0.7)
# Define
block_size = 25
width = 10 # Board width
height = 20 # Board height
framerate = 30 # Bigger -> Slower
framerate_n = 30
pygame.init()
size = [screen_width, screen_height]
clock = pygame.time.Clock()
screen = pygame.display.set_mode(size)
pygame.time.set_timer(pygame.USEREVENT, framerate * 10)
pygame.time.set_timer(pygame.USEREVENT, framerate_n * 10)
pygame.display.set_caption("ACOTRIS")
background_file = '../assets/images/backgroundimage.png'
# draw single board
# Draw multi board
#background image
# insert image x,y , r , c
# image
image_aco1 = pygame.image.load('../assets/images/aco1.png')
image_aco2 = pygame.image.load('../assets/images/aco2.png')
image_aco3 = pygame.image.load('../assets/images/aco3.png')
image_manual = pygame.image.load('../assets/images/manual.png')
image_winner = pygame.image.load('../assets/images/winner1.png')
image_trophy = pygame.image.load('../assets/images/trophy.png')
rect_aco1b = pygame.image.load('../assets/images/aco1.png').convert()
rect_aco2b = pygame.image.load('../assets/images/aco2.png').convert()
rect_aco3b = pygame.image.load('../assets/images/aco3.png').convert()
rect_aco1 = pygame.transform.scale(rect_aco1b, (int(screen_width*0.12), int(screen_height*0.13)))
rect_aco2 = pygame.transform.scale(rect_aco2b, (int(screen_width*0.13), int(screen_height*0.16)))
rect_aco3 = pygame.transform.scale(rect_aco3b, (int(screen_width*0.14), int(screen_height*0.18)))
# Initial values
blink = False
start_single = False # sinlge mode
start_multi = False # multi mode
pause = False
done = False
game_over = False
multi_over = False
show_score = False
show_manual = False
screen_Start = True
game_mode = False
score = 0
score_n = 0
level = 1
level_n = 1
goal = 1
goal_n = 1
bottom_count = 0
bottom_count_n = 0
hard_drop = False
hard_drop_n = False
player = 0
dx, dy = 3, 0 # Minos location status
dp, dq = 3, 0
rotation = 0 # Minos rotation status
rotation_n = 0
mino = randint(1, 7) # Current mino
mino_n = randint(1,7)
next_mino = randint(1, 7) # Next mino
next_mino_n = randint(1,7)
hold = False # Hold status
hold_n=False
hold_mino = -1 # Holded mino
hold_mino_n = -1
name_location = 0
name = [65, 65, 65]
#
type = 0
level1 = 0
level2 = 0
with open('leaderboard.txt') as f:
lines = f.readlines()
lines = [line.rstrip('\n') for line in open('leaderboard.txt')]
leaders = {}
for i in lines:
leaders[i.split(' ')[0]] = int(i.split(' ')[1])
leaders = sorted(leaders.items(), key=operator.itemgetter(1), reverse=True)
matrix= [[0 for y in range(height + 1)] for x in range(width)] # Board matrix
matrix_n = [[0 for k in range(height + 1)] for p in range(width)]
###########################################################
# Loop Start
###########################################################
while not done:
# Pause screen
if pause:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
if start_single == True:
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
elif start_multi == True:
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
#pause
pause_surface = screen.convert_alpha()
pause_surface.fill((0, 0, 0, 0))
pygame.draw.rect(pause_surface, ui_variables.black_t, [0, 0, int(screen_width), int(screen_height)])
screen.blit(pause_surface, (0, 0))
pause_text = ui_variables.DG_70.render("PAUSED", 1, ui_variables.white)
pause_start = ui_variables.DG_small.render("Press esc to continue", 1, ui_variables.white)
screen.blit(pause_text, (screen_width*0.415, screen_height*0.35))
if blink:
screen.blit(pause_start, (screen_width*0.36, screen_height*0.6))
blink = False
else:
blink = True
pygame.display.update()
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation, matrix)
erase_mino(dp, dq, mino_n, rotation_n, matrix_n)
if event.key == K_ESCAPE:
pause = False
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_q:
done = True
# Game screen
# Start_single screen
elif start_single:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
# Set speed
if not game_over:
keys_pressed = pygame.key.get_pressed()
if keys_pressed[K_DOWN]:
pygame.time.set_timer(pygame.USEREVENT, framerate * 1)
else:
pygame.time.set_timer(pygame.USEREVENT, framerate * 10)
# Draw a mino
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Erase a mino
if not game_over:
erase_mino(dx, dy, mino, rotation, matrix)
# Move mino down
if not is_bottom(dx, dy, mino, rotation, matrix):
dy += 1
# Create new mino
else:
if hard_drop or bottom_count == 6:
hard_drop = False
bottom_count = 0
score += 10 * level
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
if is_stackable(next_mino, matrix):
mino = next_mino
next_mino = randint(1, 7)
dx, dy = 3, 0
rotation = 0
hold = False
else:
start_single = False
game_over = True
single = True
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count += 1
# Erase line
erase_count = 0
for j in range(21):
is_full = True
for i in range(10):
if matrix[i][j] == 0:
is_full = False
if is_full:
erase_count += 1
k = j
while k > 0:
for i in range(10):
matrix[i][k] = matrix[i][k - 1]
k -= 1
if erase_count == 1:
score += 50 * level
elif erase_count == 2:
score += 150 * level
elif erase_count == 3:
score += 350 * level
elif erase_count == 4:
score += 1000 * level
# Increase level
goal -= erase_count
if goal < 1 and level < 15:
level += 1
goal += level * 5
framerate = int(framerate * 0.8)
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation, matrix)
if event.key == K_ESCAPE:
pause = True
#Q
elif event.key == K_q:
done = True
# Hard drop
elif event.key == K_SPACE:
while not is_bottom(dx, dy, mino, rotation, matrix):
dy += 1
hard_drop = True
pygame.time.set_timer(pygame.USEREVENT, 1)
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Hold
elif event.key == K_LSHIFT:
if hold == False:
if hold_mino == -1:
hold_mino = mino
mino = next_mino
next_mino = randint(1, 7)
else:
hold_mino, mino = mino, hold_mino
dx, dy = 3, 0
rotation = 0
hold = True
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Turn right
elif event.key == K_UP:
if is_turnable_r(dx, dy, mino, rotation, matrix):
rotation += 1
# Kick
elif is_turnable_r(dx, dy - 1, mino, rotation, matrix):
dy -= 1
rotation += 1
elif is_turnable_r(dx + 1, dy, mino, rotation, matrix):
dx += 1
rotation += 1
elif is_turnable_r(dx - 1, dy, mino, rotation, matrix):
dx -= 1
rotation += 1
elif is_turnable_r(dx, dy - 2, mino, rotation, matrix):
dy -= 2
rotation += 1
elif is_turnable_r(dx + 2, dy, mino, rotation, matrix):
dx += 2
rotation += 1
elif is_turnable_r(dx - 2, dy, mino, rotation, matrix):
dx -= 2
rotation += 1
if rotation == 4:
rotation = 0
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Turn left
elif event.key == K_z or event.key == K_LCTRL:
if is_turnable_l(dx, dy, mino, rotation, matrix):
rotation -= 1
# Kick
elif is_turnable_l(dx, dy - 1, mino, rotation, matrix):
dy -= 1
rotation -= 1
elif is_turnable_l(dx + 1, dy, mino, rotation, matrix):
dx += 1
rotation -= 1
elif is_turnable_l(dx - 1, dy, mino, rotation, matrix):
dx -= 1
rotation -= 1
elif is_turnable_l(dx, dy - 2, mino, rotation, matrix):
dy -= 2
rotation += 1
elif is_turnable_l(dx + 2, dy, mino, rotation, matrix):
dx += 2
rotation += 1
elif is_turnable_l(dx - 2, dy, mino, rotation, matrix):
dx -= 2
if rotation == -1:
rotation = 3
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Move left
elif event.key == K_LEFT:
if not is_leftedge(dx, dy, mino, rotation, matrix):
dx -= 1
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
# Move right
elif event.key == K_RIGHT:
if not is_rightedge(dx, dy, mino, rotation, matrix):
dx += 1
draw_mino(dx, dy, mino, rotation, matrix)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
pygame.display.update()
# Start_multi screen
elif start_multi:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
screen.fill(ui_variables.black)
background_image_alpha(screen, background_file, screen_width, screen_height)
if not multi_over:
keys_pressed = pygame.key.get_pressed()
if keys_pressed[K_DOWN]:
pygame.time.set_timer(pygame.USEREVENT, framerate*1)
else:
pygame.time.set_timer(pygame.USEREVENT, framerate*10)
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
# Erase a mino
if not multi_over:
erase_mino(dx, dy, mino, rotation, matrix)
# Move mino down
if not is_bottom(dx, dy, mino, rotation, matrix):
dy += 1
# Create new mino
else:
if hard_drop or bottom_count == 6:
hard_drop = False
bottom_count = 0
score += 10 * level
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
if is_stackable(next_mino, matrix):
mino = next_mino
next_mino = randint(1, 7)
dx, dy = 3, 0
rotation = 0
hold = False
else:
start_multi = False
multi_over = True
player = 1
single = False
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count += 1
# Erase line
erase_count = 0
for j in range(21):
is_full = True
for i in range(10):
if matrix[i][j] == 0:
is_full = False
if is_full:
erase_count += 1
k = j
while k > 0:
for i in range(10):
matrix[i][k] = matrix[i][k - 1]
k -= 1
if erase_count == 1:
score += 50 * level
elif erase_count == 2:
score += 150 * level
elif erase_count == 3:
score += 350 * level
elif erase_count == 4:
score += 1000 * level
# Increase level
goal -= erase_count
if goal < 1 and level < 15:
level += 1
goal += level * 5
framerate = int(framerate * 0.8)
level_2 = level
draw_mino(dp, dq, mino_n, rotation_n ,matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
if not multi_over:
erase_mino(dp, dq, mino_n, rotation_n, matrix_n)
# Move mino down
if not is_bottom(dp, dq, mino_n, rotation_n, matrix_n):
dq += 1
else:
if hard_drop_n or bottom_count_n == 6:
hard_drop_n = False
bottom_count_n = 0
score_n+=10*level_n
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
if is_stackable(next_mino_n, matrix_n):
mino_n = next_mino_n
next_mino_n = randint(1,7)
dp, dq = 3, 0
rotation_n = 0
hold_n = False
else:
start_multi = False
multi_over= True
player = 2
single = False
pygame.time.set_timer(pygame.USEREVENT, 1)
else:
bottom_count_n += 1
erase_count_n = 0
for j in range(21):
is_full_n = True
for i in range(10):
if matrix_n[i][j] == 0:
is_full_n = False
if is_full_n:
erase_count_n += 1
k = j
while k > 0:
for i in range(10):
matrix_n[i][k] = matrix_n[i][k-1]
k -= 1
if erase_count_n == 1:
score_n += 50 * level_n
elif erase_count_n == 2:
score_n += 150 * level_n
elif erase_count_n == 3:
score_n += 350 * level_n
elif erase_count_n == 4:
score_n += 1000 * level_n
# Increase level
goal_n -= erase_count_n
if goal_n < 1 and level_n < 15:
level_n += 1
goal_n += level_n * 5
framerate_n = int(framerate_n * 0.8)
level1 = level_n
elif event.type == KEYDOWN:
erase_mino(dx, dy, mino, rotation, matrix)
erase_mino(dp, dq, mino_n, rotation_n, matrix_n)
if event.key == K_ESCAPE:
pause = True
#Q
elif event.key == K_q:
done = True
# Hard drop
elif event.key == K_SPACE:
while not is_bottom(dx, dy, mino, rotation, matrix):
dy += 1
hard_drop = True
pygame.time.set_timer(pygame.USEREVENT, framerate)
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_LCTRL:
while not is_bottom(dp, dq, mino_n, rotation_n, matrix_n):
dq += 1
hard_drop_n = True
pygame.time.set_timer(pygame.USEREVENT, framerate_n)
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
# Hold
elif event.key == K_RSHIFT:
if hold == False:
if hold_mino == -1:
hold_mino = mino
mino = next_mino
next_mino = randint(1, 7)
else:
hold_mino, mino = mino, hold_mino
dx, dy = 3, 0
rotation = 0
hold = True
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_LSHIFT:
if hold_n == False:
if hold_mino_n == -1:
hold_mino_n = mino_n
mino_n = next_mino_n
next_mino_n = randint(1,7)
else:
hold_mino_n, mino_n = mino_n, hold_mino_n
dp, dq = 3, 0
rotation_n = 0
hold_n = True
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
# Turn right
elif event.key == K_UP :
if is_turnable_r(dx, dy, mino, rotation, matrix):
rotation += 1
# Kick
elif is_turnable_r(dx, dy - 1, mino, rotation, matrix):
dy -= 1
rotation += 1
elif is_turnable_r(dx + 1, dy, mino, rotation, matrix):
dx += 1
rotation += 1
elif is_turnable_r(dx - 1, dy, mino, rotation, matrix):
dx -= 1
rotation += 1
elif is_turnable_r(dx, dy - 2, mino, rotation, matrix):
dy -= 2
rotation += 1
elif is_turnable_r(dx + 2, dy, mino, rotation, matrix):
dx += 2
rotation += 1
elif is_turnable_r(dx - 2, dy, mino, rotation, matrix):
dx -= 2
rotation += 1
if rotation == 4:
rotation = 0
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_w:
if is_turnable_r(dp, dq, mino_n, rotation_n, matrix_n):
rotation_n += 1
# Kick
elif is_turnable_r(dp, dq - 1, mino_n, rotation_n, matrix_n):
dq -= 1
rotation_n += 1
elif is_turnable_r(dp + 1, dq, mino_n,rotation_n, matrix_n):
dp += 1
rotation_n += 1
elif is_turnable_r(dp - 1, dq, mino_n, rotation_n, matrix_n):
dp -= 1
rotation_n += 1
elif is_turnable_r(dp, dq - 2, mino_n, rotation_n, matrix_n):
dq -= 2
rotation_n+= 1
elif is_turnable_r(dp + 2, dq, mino_n,rotation_n, matrix_n):
dp += 2
rotation_n+= 1
elif is_turnable_r(dp - 2, dq, mino_n, rotation_n, matrix_n):
dp -= 2
rotation_n += 1
if rotation_n == 4:
rotation_n = 0
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
# Move left
elif event.key == K_LEFT:
if not is_leftedge(dx, dy, mino, rotation, matrix):
dx -= 1
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_a:
if not is_leftedge(dp, dq, mino_n, rotation_n, matrix_n):
dp -= 1
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
# Move right
elif event.key == K_RIGHT:
if not is_rightedge(dx, dy, mino, rotation, matrix):
dx += 1
draw_mino(dx, dy, mino, rotation, matrix)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
elif event.key == K_d:
if not is_rightedge(dp, dq, mino_n, rotation_n, matrix_n):
dp += 1
draw_mino(dp, dq, mino_n, rotation_n, matrix_n)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
pygame.display.update()
# Game over screen
elif game_over:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
over_text_1 = ui_variables.DG_70.render("GAME OVER", 1, ui_variables.white)
over_start = ui_variables.DG_v_small.render("Press return to continue", 1, ui_variables.white)
draw_single_board(next_mino, hold_mino, score, level, goal, matrix)
#pause
over_surface = screen.convert_alpha()
over_surface.fill((0, 0, 0, 0))
pygame.draw.rect(over_surface, ui_variables.black_t, [0, 0, int(screen_width), int(screen_height)])
screen.blit(over_surface, (0, 0))
name_1 = ui_variables.DGM40.render(chr(name[0]), 1, ui_variables.white)
name_2 = ui_variables.DGM40.render(chr(name[1]), 1, ui_variables.white)
name_3 = ui_variables.DGM40.render(chr(name[2]), 1, ui_variables.white)
underbar_1 = ui_variables.DGM40.render("_", 1, ui_variables.white)
underbar_2 = ui_variables.DGM40.render("_", 1, ui_variables.white)
underbar_3 = ui_variables.DGM40.render("_", 1, ui_variables.white)
screen.blit(over_text_1, (int(screen_width*0.37), int(screen_height*0.2)))
screen.blit(name_1, (int(screen_width*0.4), int(screen_height*0.5)))
screen.blit(name_2, (int(screen_width*0.5), int(screen_height*0.5)))
screen.blit(name_3, (int(screen_width*0.6), int(screen_height*0.5)))
if blink:
screen.blit(over_start, (int(screen_width*0.38), int(screen_height*0.7)))
blink = False
else:
if name_location == 0:
screen.blit(underbar_1, (int(screen_width*0.4), int(screen_height*0.52)))
elif name_location == 1:
screen.blit(underbar_2, (int(screen_width*0.5), int(screen_height*0.52)))
elif name_location == 2:
screen.blit(underbar_3, (int(screen_width*0.6), int(screen_height*0.52)))
blink = True
pygame.display.update()
elif event.type == KEYDOWN:
if event.key == K_RETURN:
outfile = open('leaderboard.txt','a')
outfile.write(chr(name[0]) + chr(name[1]) + chr(name[2]) + ' ' + str(score) + '\n')
outfile.close()
pygame.time.set_timer(pygame.USEREVENT, 1)
sys.exit()
game_over = False
hold = False
dx, dy = 3, 0
dp, dq = 3, 0
rotation = 0
rotation_n =0
mino = randint(1, 7)
mino_n = randint(1,7)
next_mino = randint(1, 7)
next_mino_n = randint(1,7)
hold_mino = -1
hold_mino_n = -1
framerate = 30
framerate_n = 30
score = 0
score_n = 0
level = 1
level_n = 1
goal = level * 5
goal_n = level_n*5
bottom_count = 0
bottom_count_n = 0
hard_drop = False
hard_drop_n = False
if event.key == K_RIGHT:
if name_location != 2:
name_location += 1
else:
name_location = 0
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_LEFT:
if name_location != 0:
name_location -= 1
else:
name_location = 2
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_UP:
if name[name_location] != 90:
name[name_location] += 1
else:
name[name_location] = 65
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_DOWN:
if name[name_location] != 65:
name[name_location] -= 1
else:
name[name_location] = 90
pygame.time.set_timer(pygame.USEREVENT, 1)
elif event.key == K_q:
done = True
elif multi_over:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
title = "ACOTRIS"
winner_text = "{}P win".format(player)
title_text_1 = ui_variables.DG_big.render(title, 1, ui_variables.white)
over_text_1 = ui_variables.DG_70.render(winner_text, 1, ui_variables.white)
draw_multi_board_1(next_mino_n, hold_mino_n, score_n, level_n, goal_n, matrix_n)
draw_multi_board_2(next_mino, hold_mino, score, level, goal, matrix)
#pause
over_surface = screen.convert_alpha()
over_surface.fill((0, 0, 0, 0))
pygame.draw.rect(over_surface, ui_variables.black_t, [0, 0, int(screen_width), int(screen_height)])
screen.blit(over_surface, (0, 0))
screen.blit(title_text_1,(int(screen_width*0.35), int(screen_height*0.1)))
screen.blit(over_text_1, (int(screen_width*0.39), int(screen_height*0.75)))
insert_image(image_winner, screen_width*0.25, screen_height*0.12, int(screen_width*0.55), int(screen_height*0.65))
insert_image(image_trophy, screen_width*0.21, screen_height*0.13, int(screen_width*0.1), int(screen_height*0.18))
insert_image(image_trophy, screen_width*0.7, screen_height*0.13, int(screen_width*0.1), int(screen_height*0.18))
pygame.display.update()
if event.type == KEYDOWN:
if event.key == K_q:
done = True
elif event.key == K_RETURN:
done = True
elif game_mode:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
keys = pygame.key.get_pressed()
# Q
if event.key == K_q:
done = True
elif keys[pygame.K_s] and keys[pygame.K_e]:
start_single = True
level = 1
goal = level * 5
type = 1
elif keys[pygame.K_s] and keys[pygame.K_r]:
level = 5
start_single = True
goal = level * 5
type = 2
elif keys[pygame.K_s] and keys[pygame.K_t]:
level = 10
start_single = True
goal = level * 5
type = 3
elif keys[pygame.K_m] and keys[pygame.K_e]:
level = 1
goal = level * 5
level_n = 1
goal_n = level_n*5
start_multi= True
type = 1
elif keys[pygame.K_m] and keys[pygame.K_r]:
level = 5
goal = level * 5
level_n = 5
goal_n = level_n*5
start_multi = True
type = 2
elif keys[pygame.K_m] and keys[pygame.K_t]:
level = 10
start_multi = True
goal = level * 5
level_n = 10
goal_n = level_n*5
type = 3
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
screen.fill(ui_variables.black)
background_image(background_file, screen_width, int(screen_height/2), int(screen_height/2))
game_mode_title = ui_variables.DG_small.render("( !)", 1, ui_variables.white)
game_mode_choice = ui_variables.DG_v_small.render("", 1, ui_variables.white)
game_mode_speed = ui_variables.DG_v_small.render("", 1, ui_variables.white)
game_mode_single = ui_variables.DG_v_small.render(" Single (S)", 1, ui_variables.white)
game_mode_single_des = ui_variables.DG_v_small.render(" !!", 1, ui_variables.white)
game_mode_multi = ui_variables.DG_v_small.render(" Multi (M)", 1, ui_variables.white)
game_mode_multi_des = ui_variables.DG_v_small.render(" !!", 1, ui_variables.white)
game_speed_easy = ui_variables.DG_v_small.render(" (E)", 1, ui_variables.white)
game_speed_normal = ui_variables.DG_v_small.render(" (R)", 1, ui_variables.white)
game_speed_hard = ui_variables.DG_v_small.render(" (T)", 1, ui_variables.white)
game_speed_easy_des = ui_variables.DG_v_small.render("EASY !", 1, ui_variables.white)
game_speed_normal_des = ui_variables.DG_v_small.render("NORMAL !!", 1, ui_variables.white)
game_speed_hard_des = ui_variables.DG_v_small.render("HARD !!!", 1, ui_variables.white)
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.055)],
[screen_width,int(screen_height*0.055)],2)
screen.blit(game_mode_title, (int(screen_width*0.1)+int(int(screen_width*0.3)*0.4), int(screen_height*0.065)))
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.125)],
[screen_width,int(screen_height*0.125)],2)
pygame.draw.rect(screen, ui_variables.white, [int(screen_width*0.175), int(screen_height*0.2), int(screen_width*0.2), int(screen_height*0.075)], 2)
pygame.draw.rect(screen, ui_variables.white, [int(screen_width*0.625), int(screen_height*0.2), int(screen_width*0.2), int(screen_height*0.075)], 2)
screen.blit(game_mode_choice, (int(screen_width*0.198), int(screen_height*0.215)))
screen.blit(game_mode_speed, (int(screen_width*0.655), int(screen_height*0.215)))
screen.blit(game_mode_single, (int(screen_width*0.15), int(screen_height*0.35)))
screen.blit(game_mode_multi, (int(screen_width*0.15), int(screen_height*0.55)))
screen.blit(game_mode_single_des, (int(screen_width*0.179), int(screen_height*0.4)))
screen.blit(game_mode_multi_des, (int(screen_width*0.179), int(screen_height*0.6)))
screen.blit(game_speed_easy, (int(screen_width*0.6), int(screen_height*0.3)))
screen.blit(game_speed_normal, (int(screen_width*0.6), int(screen_height*0.45)))
screen.blit(game_speed_hard, (int(screen_width*0.6), int(screen_height*0.6)))
screen.blit(game_speed_easy_des, (int(screen_width*0.65), int(screen_height*0.35)))
screen.blit(game_speed_normal_des, (int(screen_width*0.65), int(screen_height*0.5)))
screen.blit(game_speed_hard_des, (int(screen_width*0.65), int(screen_height*0.65)))
insert_image(image_aco1, int(screen_width*0.79), int(screen_height*0.295), int(screen_width*0.1), int(screen_height*0.1))
insert_image(image_aco2, int(screen_width*0.8), int(screen_height*0.445), int(screen_width*0.1), int(screen_height*0.1))
insert_image(image_aco3, int(screen_width*0.8), int(screen_height*0.595), int(screen_width*0.1), int(screen_height*0.1))
pygame.display.update()
# Manual screen
elif show_manual:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
if event.key == K_SPACE:
game_mode = True
elif event.key == K_q:
done = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
screen.fill(ui_variables.black)
background_image('../assets/images/manual.png', screen_width, screen_height, 0)
show_score_manual = ui_variables.DG_small.render("Manual", 1, ui_variables.white)
show_desc1_manual = ui_variables.DGM23.render("Pytris 7 ", 1, ui_variables.white)
show_desc2_manual = ui_variables.DGM23.render(" , , ", 1, ui_variables.white)
show_desc3_manual = ui_variables.DGM23.render(" .", 1, ui_variables.white)
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.055)],
[screen_width,int(screen_height*0.055)],2)
screen.blit(show_score_manual, (int(screen_width*0.3)+int(int(screen_width*0.3)*0.5), int(screen_height*0.06)))
screen.blit(show_desc1_manual, (int(screen_width*0.05)+int(int(screen_width*0.1)*0.5), int(screen_height*0.15)))
screen.blit(show_desc2_manual, (int(screen_width*0.05)+int(int(screen_width*0.1)*0.5), int(screen_height*0.2)))
screen.blit(show_desc3_manual, (int(screen_width*0.05)+int(int(screen_width*0.1)*0.5), int(screen_height*0.25)))
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.125)],
[screen_width,int(screen_height*0.125)],2)
title_start = ui_variables.DGM23.render("<Press space to start>", 1, ui_variables.white)
screen.blit(title_start, (screen_width*0.37, screen_height*0.75))
pygame.display.update()
# Show score
elif show_score:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
# Q
if event.key == K_q:
done = True
#space
elif event.key == K_SPACE:
show_manual = True
elif event.type == USEREVENT:
pygame.time.set_timer(pygame.USEREVENT, 300)
screen.fill(ui_variables.black)
background_image(background_file, screen_width, int(screen_height/2), int(screen_height/2))
show_score_list = list()
i = 0
try:
while i<10:
j=0
temp = ui_variables.DG_small.render('%2d' % ((i+1))+' '+'{:>6s}'.format(leaders[i][j]) + ' ' + '{:<8s}'.format(str(leaders[i][j+1])), 1, ui_variables.white)
show_score_list.append(temp)
i+=1
except:
show_manual = True
show_name_y = int(screen_height*0.17)
prop = (show_name_y*0.3)
for element in show_score_list:
screen.blit(element, (int(screen_width*0.3)+int(int(screen_width*0.3)*0.25), show_name_y))
show_name_y += prop
show_button_right = ui_variables.DGM23.render("<Press space to start>", 1, ui_variables.white)
show_score_title = ui_variables.DG_small.render("Ranking", 1, ui_variables.white)
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.055)],
[screen_width,int(screen_height*0.055)],2)
screen.blit(show_score_title, (int(screen_width*0.3)+int(int(screen_width*0.3)*0.5), int(screen_height*0.065)))
pygame.draw.line(screen, ui_variables.white,
[0, int(screen_height*0.125)],
[screen_width,int(screen_height*0.125)],2)
screen.blit(show_button_right, (int(screen_width*0.33)+int(int(screen_width*0.33)*0.2), show_name_y+prop))
pygame.display.flip()
# Start screen
else:
for event in pygame.event.get():
if event.type == QUIT:
done = True
elif event.type == KEYDOWN:
if event.key == K_SPACE:
show_score=True
#Q
elif event.key == K_q:
done = True
screen.fill(ui_variables.white)
background_image(background_file, screen_width, int(screen_height/2), int(screen_height/2))
insert_image(image_aco1, screen_width*0.52, screen_height*0.29, 150, 130)
insert_image(image_aco2, screen_width*0.65, screen_height*0.22, 180, 180)
insert_image(image_aco3, screen_width*0.8, screen_height*0.18, 210, 210)
title = ui_variables.DG_big.render("ACOTRIS", 1, ui_variables.black)
title_uni = ui_variables.DG_small.render("in DGU", 1, ui_variables.black)
title_start = ui_variables.DGM23.render("<Press space to start>", 1, ui_variables.white)
title_info = ui_variables.DGM13.render("Copyright (c) 2017 Jason Kim All Rights Reserved.", 1, ui_variables.white)
if blink:
screen.blit(title_start, (91, 195))
blink = False
else:
blink = True
screen.blit(title, (screen_width*0.028, screen_height*0.3))
screen.blit(title_uni, (screen_width*0.37, screen_height*0.3))
screen.blit(title_start, (screen_width*0.37, screen_height*0.55))
screen.blit(title_info, (screen_width*0.35, screen_height*0.93))
if not show_score:
pygame.display.update()
clock.tick(3)
pygame.quit()
| 41.313908 | 179 | 0.514134 |
9da1a92cdcf88a9e292d7bdc3fb0eeb027139777 | 2,305 | py | Python | chemex/experiments/cpmg/fast/liouvillian.py | marcuscangussu/chemex_bouvignies | ce9ec20a42604eb5995abb0f8a84094b29747651 | [
"BSD-3-Clause"
] | null | null | null | chemex/experiments/cpmg/fast/liouvillian.py | marcuscangussu/chemex_bouvignies | ce9ec20a42604eb5995abb0f8a84094b29747651 | [
"BSD-3-Clause"
] | null | null | null | chemex/experiments/cpmg/fast/liouvillian.py | marcuscangussu/chemex_bouvignies | ce9ec20a42604eb5995abb0f8a84094b29747651 | [
"BSD-3-Clause"
] | null | null | null | """
Created on Sep 1, 2011
@author: guillaume
"""
from scipy import zeros
from chemex.bases.two_states.fast import R_IXY, DR_IXY, DW, KAB, KBA
def compute_liouvillians(pb=0.0, kex=0.0, dw=0.0,
r_ixy=5.0, dr_ixy=0.0):
"""
Compute the exchange matrix (Liouvillian)
The function assumes a 2-site (A <-> B) exchanging system.
The matrix is written in 6x6 cartesian basis, that is {Nx, Ny, Nz}{a,b}.
Here the thermal equilibrium is assumed to be 0. This is justified because of
the +/- phase cycling of the first 90 degree pulse at the beginning of the
cpmg block.
Parameters
----------
pb : float
Fractional population of state B.
0.0 for 0%, 1.0 for 100%.
kex : float
Exchange rate between state A and B in /s.
dw : float
Chemical shift difference between states A and B in rad/s.
r_nz : float
Longitudinal relaxation rate of state {a,b} in /s.
r_nxy : float
Transverse relaxation rate of state a in /s.
dr_nxy : float
Transverse relaxation rate difference between states a and b in /s.
cs_offset : float
Offset from the carrier in rad/s.
Returns
-------
out: numpy.matrix
Liouvillian describing free precession of one
isolated spin in presence of two-site exchange.
"""
kab = kex * pb
kba = kex - kab
l_free = R_IXY * r_ixy
l_free += DR_IXY * dr_ixy
l_free += DW * dw
l_free += KAB * kab
l_free += KBA * kba
return l_free
def compute_iy_eq(pb):
"""
Returns the equilibrium magnetization vector.
Parameters
----------
pb : float
Fractional population of state B.
0.0 for 0%, 1.0 for 100%.
Returns
-------
out: numpy.matrix
Magnetization vector at equilibrium.
"""
mag_eq = zeros((4, 1))
mag_eq[1, 0] += (1.0 - pb)
mag_eq[3, 0] += pb
return mag_eq
def get_iy(mag):
"""
Returns the amount of magnetization along z.
Parameters
----------
mag : ndarray
Magnetization vector.
Returns
-------
magy_a, magy_b : float
Amount of magnetization in state a and b along z.
"""
magy_a = mag[1, 0]
magy_b = mag[3, 0]
return magy_a, magy_b
| 21.745283 | 81 | 0.59436 |
9da1d621b03730a6eb8d7bba6dfd398419916f66 | 7,261 | py | Python | test/nba/test_fzrs.py | jgershen/sportsball | 8aa2a599091fb14d1897f2e4b77384e9ee6b0eed | [
"MIT"
] | 21 | 2016-03-12T00:59:04.000Z | 2022-03-01T21:32:51.000Z | test/nba/test_fzrs.py | jgershen/sportsball | 8aa2a599091fb14d1897f2e4b77384e9ee6b0eed | [
"MIT"
] | 1 | 2017-04-17T04:39:46.000Z | 2017-04-17T04:39:46.000Z | test/nba/test_fzrs.py | jgershen/sportsball | 8aa2a599091fb14d1897f2e4b77384e9ee6b0eed | [
"MIT"
] | 4 | 2016-07-25T11:55:52.000Z | 2019-06-19T20:55:53.000Z |
import tempfile
import shutil
import os
import pandas
import numpy as np
import datetime
import pkg_resources
from unittest import TestCase
from dfs.nba.featurizers import feature_generators
from dfs.nba.featurizers import fantasy_points_fzr, last5games_fzr, nf_stats_fzr, vegas_fzr, \
opp_ffpg_fzr, salary_fzr | 49.060811 | 115 | 0.450764 |
9da1ed6becdb22c4f8292e530b55e6268710e72f | 1,346 | py | Python | tests/test_status.py | ehdgua01/blocksync | da0198dde87d284ea3c9472c10f51028e05014a0 | [
"MIT"
] | 5 | 2020-06-03T09:30:15.000Z | 2021-12-14T23:48:47.000Z | tests/test_status.py | ehdgua01/blocksync | da0198dde87d284ea3c9472c10f51028e05014a0 | [
"MIT"
] | 2 | 2021-03-19T07:37:57.000Z | 2021-06-18T11:54:46.000Z | tests/test_status.py | ehdgua01/blocksync | da0198dde87d284ea3c9472c10f51028e05014a0 | [
"MIT"
] | null | null | null | from blocksync._consts import ByteSizes
from blocksync._status import Blocks
| 30.590909 | 80 | 0.724368 |
9da20747a22e24702a7eb51c79e588aff84309dd | 275 | py | Python | tests/helpers.py | hawkfish/sudoku | eaae1aa3080032266db0fcfc8a6520a9cb5690fe | [
"MIT"
] | null | null | null | tests/helpers.py | hawkfish/sudoku | eaae1aa3080032266db0fcfc8a6520a9cb5690fe | [
"MIT"
] | null | null | null | tests/helpers.py | hawkfish/sudoku | eaae1aa3080032266db0fcfc8a6520a9cb5690fe | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import os
| 21.153846 | 57 | 0.643636 |
9da26db5109dcd203a39bfcab1fbaa5c755f0368 | 33,787 | py | Python | Software/python/config_dialog.py | edavalosanaya/SKORE | 72e742611ba96b0df542781ded0685f525bea82b | [
"MIT"
] | 1 | 2020-09-20T19:00:17.000Z | 2020-09-20T19:00:17.000Z | Software/python/config_dialog.py | MrCodingRobot/SKORE | 72e742611ba96b0df542781ded0685f525bea82b | [
"MIT"
] | null | null | null | Software/python/config_dialog.py | MrCodingRobot/SKORE | 72e742611ba96b0df542781ded0685f525bea82b | [
"MIT"
] | null | null | null | # General Utility Libraries
import sys
import os
import warnings
# PyQt5, GUI Library
from PyQt5 import QtCore, QtGui, QtWidgets
# Serial and Midi Port Library
import rtmidi
import serial
import serial.tools.list_ports
# SKORE Library
from lib_skore import read_config, update_config
import globals
#-------------------------------------------------------------------------------
# Classes
#-------------------------------------------------------------------------------
# Main Code
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
config_dialog = ConfigDialog()
config_dialog.show()
sys.exit(app.exec_())
| 48.336195 | 184 | 0.671797 |
9da270a879210ead826c86bdc8c185c7e2c0effa | 1,814 | py | Python | valorant/caller.py | frissyn/valorant.py | 49abceab5cc1f3af016ce0b1d253d10089aeb0b4 | [
"MIT"
] | 56 | 2021-01-22T01:48:23.000Z | 2022-03-31T20:44:23.000Z | valorant/caller.py | Tominous/valorant.py | b462441ab4ab403123ad245cab30f3abbd891a66 | [
"MIT"
] | 20 | 2021-02-03T10:40:37.000Z | 2022-03-24T11:23:57.000Z | valorant/caller.py | Tominous/valorant.py | b462441ab4ab403123ad245cab30f3abbd891a66 | [
"MIT"
] | 15 | 2021-03-24T01:17:58.000Z | 2022-02-01T02:10:27.000Z | import requests
from .values import ROUTES
from .values import LOCALES
from .values import REGIONS
from .values import ENDPOINTS
| 27.484848 | 85 | 0.555678 |
9da470ea36af0b767f746d020e41a7f0c5dba94a | 153 | py | Python | python/niveau1/2-Repetitions/6.py | ThomasProg/France-IOI | 03ea502e03f686d74ecf31a17273aded7b8e8a1f | [
"MIT"
] | 2 | 2022-02-13T13:35:13.000Z | 2022-03-31T21:02:11.000Z | python/niveau1/2-Repetitions/6.py | ThomasProg/France-IOI | 03ea502e03f686d74ecf31a17273aded7b8e8a1f | [
"MIT"
] | null | null | null | python/niveau1/2-Repetitions/6.py | ThomasProg/France-IOI | 03ea502e03f686d74ecf31a17273aded7b8e8a1f | [
"MIT"
] | 1 | 2020-11-15T15:21:24.000Z | 2020-11-15T15:21:24.000Z | for i in range(30):
print("a_", end="")
print()
for i in range(30):
print("b_", end="")
print()
for i in range(30):
print("c_", end="")
| 15.3 | 23 | 0.51634 |
9da846794dabe811239a290251111e03ccfb593a | 1,256 | py | Python | test_LearnSubtitles.py | heitor31415/LearnSubtitles | 153178ea11d700a49a1f3692de39e8fc81e3cc4e | [
"MIT"
] | 8 | 2020-02-13T03:08:25.000Z | 2021-01-11T20:28:39.000Z | test_LearnSubtitles.py | heitor31415/LearnSubtitles | 153178ea11d700a49a1f3692de39e8fc81e3cc4e | [
"MIT"
] | 1 | 2020-04-28T19:48:16.000Z | 2020-04-29T12:28:15.000Z | test_LearnSubtitles.py | heitor31415/LearnSubtitles | 153178ea11d700a49a1f3692de39e8fc81e3cc4e | [
"MIT"
] | 1 | 2020-03-14T00:46:36.000Z | 2020-03-14T00:46:36.000Z | import os
import pytest
from typing import Any, Callable, Dict, List
import LearnSubtitles as ls
def prepare(language: str) -> List:
""" Create LearnSubtitles objects for every subtitle in folder 'language' """
test_dir = "testfiles/" + language
subs = [
ls.LearnSubtitles(os.path.abspath(os.path.join(test_dir, x)), language)
for x in os.listdir(test_dir)
]
return subs
languages = ["de", "en", "pt"] # supported languages
| 26.723404 | 85 | 0.648089 |
9daad46c18973b22ab6ea33d444cd0187d68fcac | 2,455 | py | Python | programs/graduation-project/featureselection.py | Dilmuratjan/MyProject | 26f4ee708eb4a7ceef780842ad737fef64a39d7e | [
"WTFPL"
] | 2 | 2017-02-19T15:11:06.000Z | 2017-02-22T18:34:10.000Z | programs/graduation-project/featureselection.py | Dilmuratjan/MyProject | 26f4ee708eb4a7ceef780842ad737fef64a39d7e | [
"WTFPL"
] | null | null | null | programs/graduation-project/featureselection.py | Dilmuratjan/MyProject | 26f4ee708eb4a7ceef780842ad737fef64a39d7e | [
"WTFPL"
] | 4 | 2017-02-26T08:10:30.000Z | 2017-05-02T10:02:03.000Z | import pandas as pd
import numpy as np
from time import time
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
train = pd.read_excel('stats.xls', sheet_name='train')
test = pd.read_excel('stats.xls', sheet_name='test')
array_train = train.values
array_test = test.values
X = array_train[0:, 1:11]
y = np.asarray(train[''], dtype="|S6")
X_test = array_test[0:, 1:11]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
#
# X_indices = np.arange(X.shape[-1])
#
# # #############################################################################
# # Univariate feature selection with F-test for feature scoring
# # We use the default selection function: the 10% most significant features
# selector = SelectPercentile(f_classif, percentile=10)
# selector.fit(X, y)
# scores = -np.log10(selector.pvalues_)
# scores /= scores.max()
# plt.bar(X_indices - .45, scores, width=.2,
# label=r'Univariate score ($-Log(p_{value})$)', color='darkorange',
# edgecolor='black')
#
# # #############################################################################
# # Compare to the weights of an SVM
# clf = svm.SVC(kernel='linear')
# clf.fit(X, y)
#
# svm_weights = (clf.coef_ ** 2).sum(axis=0)
# svm_weights /= svm_weights.max()
#
# plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight',
# color='navy', edgecolor='black')
#
# clf_selected = svm.SVC(kernel='linear')
# clf_selected.fit(selector.transform(X), y)
#
# svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
# svm_weights_selected /= svm_weights_selected.max()
#
# plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
# width=.2, label='SVM weights after selection', color='c',
# edgecolor='black')
#
#
# plt.title("Comparing feature selection")
# plt.xlabel('Feature number')
# plt.yticks(())
# plt.axis('tight')
# plt.legend(loc='upper right')
# plt.show() | 27.277778 | 81 | 0.638697 |
9dabb9b1903ea38cf40c186f6bcbd195fb25dff0 | 618 | py | Python | logpot/admin/file.py | moremorefor/logpot | 26a48766dc764f93aa29f6d949af8a05de5d9152 | [
"MIT"
] | 4 | 2016-08-31T08:03:09.000Z | 2019-03-15T07:11:49.000Z | logpot/admin/file.py | moremorefor/logpot | 26a48766dc764f93aa29f6d949af8a05de5d9152 | [
"MIT"
] | 4 | 2021-05-10T00:34:14.000Z | 2022-03-11T23:22:06.000Z | logpot/admin/file.py | moremorefor/logpot | 26a48766dc764f93aa29f6d949af8a05de5d9152 | [
"MIT"
] | 1 | 2017-08-08T22:51:13.000Z | 2017-08-08T22:51:13.000Z | #-*- coding: utf-8 -*-
from logpot.admin.base import AuthenticateView
from logpot.utils import ImageUtil
from flask import flash, redirect
from flask_admin import expose
from flask_admin.contrib.fileadmin import FileAdmin
from flask_admin.babel import gettext
import os
import os.path as op
from operator import itemgetter
from datetime import datetime
| 24.72 | 53 | 0.775081 |
9dabcfa6524e1e4a0e2b51dbe24a327024815ea3 | 24 | py | Python | emailutil/__init__.py | cityofaustin/atd-utils-email | bcf2c55fe770745a2ed6da22e44971ef6ceaae37 | [
"CC0-1.0"
] | null | null | null | emailutil/__init__.py | cityofaustin/atd-utils-email | bcf2c55fe770745a2ed6da22e44971ef6ceaae37 | [
"CC0-1.0"
] | null | null | null | emailutil/__init__.py | cityofaustin/atd-utils-email | bcf2c55fe770745a2ed6da22e44971ef6ceaae37 | [
"CC0-1.0"
] | null | null | null | from .emailutil import * | 24 | 24 | 0.791667 |
9dacec32c244293fcf0c09720725cd6c562e10da | 4,888 | py | Python | fast_downloader_mt/main.py | Kirozen/fast-downloader | febdcc8b6a6ad3b8d263a8923b8f24e8402df618 | [
"MIT"
] | null | null | null | fast_downloader_mt/main.py | Kirozen/fast-downloader | febdcc8b6a6ad3b8d263a8923b8f24e8402df618 | [
"MIT"
] | null | null | null | fast_downloader_mt/main.py | Kirozen/fast-downloader | febdcc8b6a6ad3b8d263a8923b8f24e8402df618 | [
"MIT"
] | null | null | null | from __future__ import annotations
import multiprocessing
import os
import re
import sys
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass, field
from itertools import chain
from pathlib import Path
from urllib.parse import urlparse
import click
import requests
from requests.models import HTTPError
from rich.progress import (
BarColumn,
DownloadColumn,
Progress,
TextColumn,
TimeRemainingColumn,
TransferSpeedColumn,
)
BUFFER_SIZE = 32768
progress = Progress(
TextColumn("[bold blue]{task.fields[filename]}", justify="right"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"",
DownloadColumn(),
"",
TransferSpeedColumn(),
"",
TimeRemainingColumn(),
)
if __name__ == "__main__":
fast_downloader()
| 28.091954 | 88 | 0.625818 |
9dad12fdcaa78561145c587bd080d424b377a384 | 1,060 | py | Python | backend/app/core/security.py | rufusnufus/BTSParking | 3bb6e7fd20943f258e297428ab1624c4f2786444 | [
"MIT"
] | 2 | 2021-11-13T08:05:14.000Z | 2021-12-02T11:36:11.000Z | backend/app/core/security.py | rufusnufus/BTSParking | 3bb6e7fd20943f258e297428ab1624c4f2786444 | [
"MIT"
] | 44 | 2021-11-23T10:06:11.000Z | 2021-12-18T07:23:22.000Z | backend/app/core/security.py | rufusnufus/BTSParking | 3bb6e7fd20943f258e297428ab1624c4f2786444 | [
"MIT"
] | null | null | null | import os
import time
from hashlib import sha256
import requests
from dotenv import load_dotenv
from fastapi.security import OAuth2PasswordBearer
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
load_dotenv(os.path.join(BASE_DIR, "../.env"))
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="api/v1/activate-login-code")
| 30.285714 | 84 | 0.724528 |
9dad8057a50b53867020fcecaeb0676d2cfff102 | 4,362 | py | Python | sitch/sitchlib/geo_correlator.py | codecuisine/sensor | 06fb0908178af1ab673b95e7f435b873cc62e61b | [
"ECL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | 68 | 2016-08-08T17:28:59.000Z | 2021-11-26T09:31:52.000Z | sitch/sitchlib/geo_correlator.py | codecuisine/sensor | 06fb0908178af1ab673b95e7f435b873cc62e61b | [
"ECL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | 61 | 2016-08-20T21:01:01.000Z | 2020-07-22T06:10:45.000Z | sitch/sitchlib/geo_correlator.py | codecuisine/sensor | 06fb0908178af1ab673b95e7f435b873cc62e61b | [
"ECL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | 40 | 2017-01-28T23:06:22.000Z | 2021-08-13T15:09:43.000Z | """Correlate based on geograpgic information."""
from alert_manager import AlertManager
from utility import Utility
| 44.969072 | 208 | 0.570381 |
9dadf1bb28dc34ec81f4c906780d3dcd3137e862 | 1,697 | py | Python | grid_search_results_v1/get_vals_heatmap.py | malfarasplux/pnet2019 | ae34d5c84fb4d3985634b237a14dfb69e98b8339 | [
"BSD-3-Clause"
] | 1 | 2020-11-29T12:42:30.000Z | 2020-11-29T12:42:30.000Z | grid_search_results_v1/get_vals_heatmap.py | malfarasplux/pnet2019 | ae34d5c84fb4d3985634b237a14dfb69e98b8339 | [
"BSD-3-Clause"
] | null | null | null | grid_search_results_v1/get_vals_heatmap.py | malfarasplux/pnet2019 | ae34d5c84fb4d3985634b237a14dfb69e98b8339 | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
N =[20,40,50,75,100,150,200]
scale = [0.0001, 0.001, 0.005, 0.01, 0.1, 1, 10]
mem = [0.001, 0.01, 0.1, 0.13, 0.25, 0.5, 1]
sigexp = [0.01, 0.1, 0.5, 1, 2, 5, 10]
val_key = {}
with open("./grid_search_results_v1/F1_report.txt") as f:
for i, line in enumerate(f):
lineval = line.split()[0]
print ("line {0} = {1}".format(i, lineval))
val_key[lineval.split(".txt:")[0][7:]] = float(lineval.split(".txt:")[1])
F1_matrix = np.zeros((len(scale),len(mem)),dtype=np.float)
N_i = str(200)
sigexp_i = str(0.1)
for i in range(len(scale)):
scale_i = str(scale[i])
for j in range(len(mem)):
mem_i = str(mem[j])
key_i = N_i + "_" + scale_i + "_" + mem_i + "_" + sigexp_i
F1_matrix[i,j] = val_key[key_i]
fig, ax = plt.subplots()
im = ax.imshow(F1_matrix)
ax.set_title("Grid search F1 opt")
ax.set_xticks(np.arange(len(mem)))
ax.set_yticks(np.arange(len(scale)))
ax.set_xticklabels(mem)
ax.set_yticklabels(scale)
ax.set_xlabel('mem')
ax.set_ylabel('scale')
cbar = ax.figure.colorbar(im, ax=ax)
# Loop over data dimensions and create text annotations.
for i in range(len(scale)):
for j in range(len(mem)):
text = ax.text(j, i, F1_matrix[i, j],
ha="center", va="center", color="w")
| 38.568182 | 160 | 0.476134 |
9daef14a7cdf5e935df51508fb1293fad577407c | 72 | py | Python | build/scripts-3.5/mooc_anon.py | acheamponge/mooc_anon | b06dec9c4c47011f69ff4f6e21a0f5862e2ffd5c | [
"MIT"
] | 3 | 2019-07-08T01:16:57.000Z | 2021-09-23T12:44:02.000Z | build/scripts-3.5/mooc_anon.py | acheamponge/mooc_anon | b06dec9c4c47011f69ff4f6e21a0f5862e2ffd5c | [
"MIT"
] | null | null | null | build/scripts-3.5/mooc_anon.py | acheamponge/mooc_anon | b06dec9c4c47011f69ff4f6e21a0f5862e2ffd5c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
print("hey there, this is my first pip package")
| 18 | 48 | 0.708333 |
9daf2e07854e8ace58237146dcb7ca501dc5a1ae | 111 | py | Python | odata_query/django/__init__.py | itd-fsc/odata-query | 7d5239b775633594ce52d4eda5754c2ad078eb75 | [
"MIT"
] | 26 | 2021-06-11T07:42:08.000Z | 2022-02-16T04:42:45.000Z | odata_query/django/__init__.py | itd-fsc/odata-query | 7d5239b775633594ce52d4eda5754c2ad078eb75 | [
"MIT"
] | 13 | 2021-08-07T21:38:22.000Z | 2022-03-28T17:25:47.000Z | odata_query/django/__init__.py | itd-fsc/odata-query | 7d5239b775633594ce52d4eda5754c2ad078eb75 | [
"MIT"
] | 6 | 2021-07-28T04:46:14.000Z | 2022-03-15T08:22:19.000Z | from .django_q import AstToDjangoQVisitor
from .django_q_ext import *
from .shorthand import apply_odata_query
| 27.75 | 41 | 0.855856 |
9dafa0a196d3c478e9ef8c55c4f9dd2dd56b60ad | 1,457 | py | Python | _snippets/scrape_RAND_pdfs.py | vashu1/data_snippets | b0ae5230d60c2054c7b9278093533b7f71f3758b | [
"MIT"
] | 1 | 2021-02-10T20:33:43.000Z | 2021-02-10T20:33:43.000Z | _snippets/scrape_RAND_pdfs.py | vashu1/data_snippets | b0ae5230d60c2054c7b9278093533b7f71f3758b | [
"MIT"
] | null | null | null | _snippets/scrape_RAND_pdfs.py | vashu1/data_snippets | b0ae5230d60c2054c7b9278093533b7f71f3758b | [
"MIT"
] | null | null | null | # scrape articles from RAND site, see https://vashu11.livejournal.com/20523.html
import re
import requests
from bs4 import BeautifulSoup
import os
content = ['https://www.rand.org/pubs/papers.html'] + ['https://www.rand.org/pubs/papers.{}.html'.format(i) for i in range(2, 108)]
os.mkdir('pdfs')
for page in content[11:]:
print('PAGE', page)
articles = get_articles(page)
for article in articles:
print('ARTICLE', article)
c = 0
for d in get_pdfs(article):
name, link = d
if c > 0:
name += '_{}'.format(c)
print('NAME', name)
r = requests.get(link)
l = len(r.content)
print('LEN', l)
with open('./pdfs/' + re.sub('[^\w\-_\. ]', '_', name) + '.pdf', 'wb') as f:
f.write(r.content)
c += 1
| 38.342105 | 180 | 0.577213 |
9db042c12b1460a61eed0c0cb77f85501b0f72a1 | 215 | py | Python | plugins/dbnd-snowflake/src/dbnd_snowflake/__init__.py | FHoffmannCode/dbnd | 82beee1a8c752235bf21b4b0ceace5ab25410e52 | [
"Apache-2.0"
] | null | null | null | plugins/dbnd-snowflake/src/dbnd_snowflake/__init__.py | FHoffmannCode/dbnd | 82beee1a8c752235bf21b4b0ceace5ab25410e52 | [
"Apache-2.0"
] | null | null | null | plugins/dbnd-snowflake/src/dbnd_snowflake/__init__.py | FHoffmannCode/dbnd | 82beee1a8c752235bf21b4b0ceace5ab25410e52 | [
"Apache-2.0"
] | null | null | null | from dbnd._core.commands.metrics import log_snowflake_table
from dbnd_snowflake.snowflake_resources import log_snowflake_resource_usage
__all__ = [
"log_snowflake_resource_usage",
"log_snowflake_table",
]
| 23.888889 | 75 | 0.827907 |
9db66809b3f7cfe04fff2e0d4fd9725d23130f54 | 2,422 | py | Python | inputs/fino2_dats.py | a2edap/WE-Validate | 6e4be8228c9b4f66fb1a056f7566030b79441f2e | [
"BSD-3-Clause"
] | 1 | 2022-01-21T08:09:03.000Z | 2022-01-21T08:09:03.000Z | inputs/fino2_dats.py | a2edap/WE-Validate | 6e4be8228c9b4f66fb1a056f7566030b79441f2e | [
"BSD-3-Clause"
] | null | null | null | inputs/fino2_dats.py | a2edap/WE-Validate | 6e4be8228c9b4f66fb1a056f7566030b79441f2e | [
"BSD-3-Clause"
] | 1 | 2021-06-14T09:32:36.000Z | 2021-06-14T09:32:36.000Z | # A parser for multiple FINO2 .dat files in a directory.
import os
import pathlib
import pandas as pd
import numpy as np
import glob
import sys
| 31.051282 | 78 | 0.514038 |
9db67e536e2a5337dee11670942d6aa03db5b908 | 2,481 | py | Python | bin/ess/dependencies.py | clu3bot/cora | de4d1af983c135184ebaf557271fa14c7c0e1849 | [
"MIT"
] | null | null | null | bin/ess/dependencies.py | clu3bot/cora | de4d1af983c135184ebaf557271fa14c7c0e1849 | [
"MIT"
] | null | null | null | bin/ess/dependencies.py | clu3bot/cora | de4d1af983c135184ebaf557271fa14c7c0e1849 | [
"MIT"
] | null | null | null | import subprocess as sp
import os
import time
import platform
from os.path import exists
#colar vars
permissions()
getos()
check_file()
#dependencies
| 20.675 | 84 | 0.584442 |
9db6de217e5adf7d8e64871e558fa7b849812773 | 3,880 | py | Python | calculate_Total-Hetero.py | evodify/population-genetic-analyses | 5295f9d68736ac02fc5f3ece43dadd5bf4e98e6f | [
"MIT"
] | 3 | 2018-01-31T09:57:10.000Z | 2021-02-03T18:34:01.000Z | calculate_Total-Hetero.py | evodify/population-genetic-analyses | 5295f9d68736ac02fc5f3ece43dadd5bf4e98e6f | [
"MIT"
] | null | null | null | calculate_Total-Hetero.py | evodify/population-genetic-analyses | 5295f9d68736ac02fc5f3ece43dadd5bf4e98e6f | [
"MIT"
] | 1 | 2019-09-02T06:13:29.000Z | 2019-09-02T06:13:29.000Z | #! /usr/bin/env python
'''
This script calculates total heterozygosity.
#Example input:
CHROM POS REF sample1 sample2 sample3 sample4 sample5 sample6 sample7 sample8
chr_1 1 A W N N A N N N N
chr_1 2 C Y Y N C C N C N
chr_1 3 C N C N C C C C C
chr_1 4 T T T N T T T T T
chr_2 1 A A A N A A A A A
chr_2 2 C C C N C C C C C
chr_2 3 C N N N N N N N N
chr_2 4 C C T C C C C C C
chr_2 5 T T C T Y T Y T T
chr_3 1 G G N N G N N N N
chr_3 2 C S C N C C N C N
chr_3 3 N N N N N N N N N
chr_3 4 N T T N T T T T N
chr_3 5 G - N N G G G C G
#Example input2:
CHROM POS REF sample1 sample2 sample3 sample4 sample5 sample6 sample7 sample8
chr_1 1 A/A A/T ./. ./. A/A ./. ./. ./. ./.
chr_1 2 C/C T/C T/C ./. C/C C/C ./. C/C ./.
chr_1 3 C/C ./. C/C ./. C/C C/C C/C C/C C/C
chr_1 4 T/T T/T T/T ./. T/T T/T T/T T/T T/T
chr_2 1 A/A A/A A/A ./. A/A A/A A/A A/A A/A
chr_2 2 C/C C/C C/C ./. C/C C/C C/C C/C C/C
chr_2 3 C/C ./. ./. ./. ./. ./. ./. ./. ./.
chr_2 4 C/C C/C T/T C/C C/C C/C C/C C/C C/C
chr_2 5 T/T T/T C/C T/T T/C T/T T/C T/T T/T
chr_3 1 G/G G/G ./. ./. G/G ./. ./. ./. ./.
chr_3 2 C/C G/C C/C ./. C/C C/C ./. C/C ./.
chr_3 3 ./. ./. ./. ./. ./. ./. ./. ./. ./.
chr_3 4 ./. T/T T/T ./. T/T T/T T/T T/T ./.
chr_3 5 G/G -/- ./. ./. G/G G/G G/G C/C G/G
#Example output:
test.tab 0.1125
#command:
$ python calculate_Total-Hetero.py -i input.tab -o output.tab -s "sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8"
#contact:
Dmytro Kryvokhyzha dmytro.kryvokhyzha@evobio.eu
'''
############################# modules #############################
import calls # my custom module
import numpy as np
############################# options #############################
parser = calls.CommandLineParser()
parser.add_argument('-i', '--input', help = 'name of the input file', type=str, required=True)
parser.add_argument('-o', '--output', help = 'name of the output file', type=str, required=True)
parser.add_argument('-s', '--samples', help = 'column names of the samples to process (optional)', type=str, required=False)
args = parser.parse_args()
# check if samples names are given and if all sample names are present in a header
sampleNames = calls.checkSampleNames(args.samples, args.input)
############################# functions #############################
############################# program #############################
print('Opening the file...')
counter = 0
with open(args.input) as datafile:
header_line = datafile.readline()
header_words = header_line.split()
# index samples
sampCol = calls.indexSamples(sampleNames, header_words)
# count number of sample
nSample = len(sampleNames)
############################## perform counting ####################
print('Counting heterozygots ...')
Hcount = []
for line in datafile:
words = line.split()
# select samples
sample_charaters = calls.selectSamples(sampCol, words)
# check if one- or two-character code
if any(["/" in gt for gt in sample_charaters]):
sample_charaters = calls.twoToOne(sample_charaters)
# count hetero
Nmising = calls.countPerPosition(sample_charaters, 'N')
nHeter = calls.countHeteroPerPosition(sample_charaters)
nTotal = float(nSample - Nmising)
if nTotal != 0:
Hcount.append(float(nHeter/nTotal))
# track progress
counter += 1
if counter % 1000000 == 0:
print str(counter), "lines processed"
# make output header
outputFile = open(args.output, 'w')
heteroT = round(np.mean(Hcount), 4)
outputFile.write("%s\t%s\n" % (args.input, heteroT))
datafile.close()
outputFile.close()
print('Done!')
| 30.077519 | 130 | 0.549227 |
9db72ff4ce32323ddaf8107b708ab0ac40987bfc | 2,748 | py | Python | src/bfh.py | Pella86/Snake4d | cdf3773b42efc888affa33dd22ebe56a48f6d979 | [
"MIT"
] | 79 | 2018-05-23T09:39:00.000Z | 2021-11-29T02:26:07.000Z | src/bfh.py | Pella86/Snake4d | cdf3773b42efc888affa33dd22ebe56a48f6d979 | [
"MIT"
] | 1 | 2020-06-13T17:57:14.000Z | 2020-06-16T15:53:40.000Z | src/bfh.py | Pella86/Snake4d | cdf3773b42efc888affa33dd22ebe56a48f6d979 | [
"MIT"
] | 6 | 2018-06-28T13:03:38.000Z | 2021-03-06T14:24:32.000Z | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 27 17:24:58 2018
@author: Mauro
"""
#==============================================================================
# Imports
#==============================================================================
import struct
#==============================================================================
# Helpers
#==============================================================================
#==============================================================================
# Constants
#==============================================================================
# little conversion table for the supported files
type_to_size = {}
type_to_size['I'] = 4
type_to_size['d'] = 8
type_to_size['c'] = 1
#==============================================================================
# Binary file class
#==============================================================================
| 26.941176 | 79 | 0.409025 |
9db736834f35ad283117ff978c76815cc0ba771c | 8,726 | py | Python | bin/read_analysis.py | louperelo/longmetarg | 026b66c3621a4bcc71f5bc8a73955faf57978985 | [
"MIT"
] | null | null | null | bin/read_analysis.py | louperelo/longmetarg | 026b66c3621a4bcc71f5bc8a73955faf57978985 | [
"MIT"
] | null | null | null | bin/read_analysis.py | louperelo/longmetarg | 026b66c3621a4bcc71f5bc8a73955faf57978985 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pandas as pd
from scipy import stats
import numpy as np
#import seaborn as sns
#import matplotlib.pyplot as plt
import math
from Bio import SeqIO
import io
import re
import pysam
from functools import reduce
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("--bam_file", metavar="<BAM>", dest="bam", help="enter the path to the alignment.bam file. By default 'aln_F4.bam' will be used",
type=str, default="aln_F4.bam")
parser.add_argument("--reads_fasta", metavar="<FASTA>", dest="fasta", help="enter the path to the original fasta file being analysed. By default 'reads.fasta' will be used",
type=str, default="reads.fasta")
parser.add_argument("--ident", metavar="<IDENT>", dest="ident", help="enter the int value for minimum identity. By default 80 will be used",
type=int, default= 80)
parser.add_argument("--cov_length", metavar="<COV>", dest="cov", help="enter the int value for minimum coverage length. By default 95 will be used",
type=int, default= 95)
parser.add_argument("--folder_out", metavar="<OUT>", dest="out", help="enter name for output files. By default 'arg_results' will be used",
type=str, default="../out_dir/")
parser.add_argument("--aro_idx", metavar="<IDX>", dest="idx", help="enter the path to the aro_index.csv file. By default 'aro_index.tsv' will be used",
type=str, default="aro_index.tsv")
# print help message for user
parser.print_help()
# get command line arguments
args = parser.parse_args()
# read files from path
bam = args.bam
fasta = args.fasta
ident = args.ident
covlen = args.cov
folder = args.out
idx = args.idx
#read list of cigar tuples and get number of matches (0), insertions (1) or deletions (2)
#auxiliary function in parse_bam()
#Joins information from BAM file in pandas dataframe
#query sequence: query_name, query_length
#reference sequence: reference_name (gives one string, is split into ARO, ID, gene name and NCBI reference id), reference_start, reference_length
#alignment: query_alignment_length, number of mismatches and gaps (tag 'NM)
#calculates sequence identity % (identity(A,B)=100*(identical nucleotides / min(length(A),length(B)))), with identical nucleotides = query_alignment_length - NM
#calculates cover length % (query_alignment_length*100 / reference_length)
pd.options.mode.chained_assignment = None
#Filter df for highest identity and coverlength rates
#Filter assembly fasta for contigs of interest (data) and save to out_name.fasta
#for taxonomic analysis
#check for and eliminate less significant (lower cover identity) overlaps
#generate list of index numbers of non-overlapping hits from df sorted by coverage identity (highest first)
#in case of overlaps, keep the hit with the highest coverage identity
if __name__ == "__main__":
#extract data of interest from bam file, filter best hits and eliminate overlaps
result_df = overlaps(filter_best(parse_bam(bam), ident, covlen))
#add corresponding drug class from CARD aro_index.tsv to result_df
rgdrug_dict = pd.read_csv(idx, sep='\t').set_index('ARO Name').to_dict()['Drug Class']
result_df['drug_class'] = result_df['ref_genename'].map(rgdrug_dict)
#save result_df as tsv
result_df.to_csv("argHitsDf.tsv", sep='\t')
#save reads/contigs of hits in result_df in 'result.fasta' for further analysis with PlasFlow or Blast/Diamond
arg_contigs(result_df, fasta, "argHits.fasta")
| 47.68306 | 180 | 0.655168 |
9db737d0aa2bbc9904ff5f6209cdc235a2493a9c | 6,315 | py | Python | parkinglot/admin.py | YangWanjun/areaparking | b08bc9b8f8d5f602d823115263b9d040edb9f245 | [
"Apache-2.0"
] | 1 | 2018-08-02T04:00:44.000Z | 2018-08-02T04:00:44.000Z | parkinglot/admin.py | YangWanjun/areaparking | b08bc9b8f8d5f602d823115263b9d040edb9f245 | [
"Apache-2.0"
] | null | null | null | parkinglot/admin.py | YangWanjun/areaparking | b08bc9b8f8d5f602d823115263b9d040edb9f245 | [
"Apache-2.0"
] | null | null | null | import datetime
from django.contrib import admin
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Max
from . import models, forms
from address.biz import geocode
from utils import common
from utils.django_base import BaseAdmin
# Register your models here.
# @admin.register(models.LeaseManagementCompany)
# class LeaseManagementCompanyAdmin(BaseAdmin):
# list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email')
#
#
# @admin.register(models.BuildingManagementCompany)
# class BuildingManagementCompanyAdmin(BaseAdmin):
# list_display = ('name', 'department', 'position', 'staff', 'address', 'tel', 'email')
| 33.951613 | 133 | 0.62977 |
9db76eb5840b9b7ac5d4ffae358c55f69c7c5da4 | 965 | py | Python | graficas.py | dianuchitop/el26 | e84bb35ca9d6a603d515a624a85dae27cd4d10f2 | [
"MIT"
] | null | null | null | graficas.py | dianuchitop/el26 | e84bb35ca9d6a603d515a624a85dae27cd4d10f2 | [
"MIT"
] | null | null | null | graficas.py | dianuchitop/el26 | e84bb35ca9d6a603d515a624a85dae27cd4d10f2 | [
"MIT"
] | null | null | null | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
filenames=["euler.dat","rk4.dat","leapfrog.dat"]
fig, axs = plt.subplots(nrows=3, ncols=3)
ax=axs[0][0]
ax.set_title('Euler')
ax=axs[0][1]
ax.set_title('RK4')
ax=axs[0][2]
ax.set_title('Leap_frog')
for i in range(3):
f=open(filenames[i],"r")
s=list(map(float,f.readline().split()))
s1=list(map(float,f.readline().split()))
time=list(map(float,f.readline().split()))
ax=axs[0][i]
ax.set_xlabel("time")
ax.set_ylabel("posistion")
ax.plot(time,s )
ax.set_ylim(-1.5,1.5)
ax.set_xlim(0,15)
ax=axs[1][i]
ax.plot(time, s1)
ax.set_ylim(-1.5,1.5)
ax.set_xlim(0,15)
ax.set_xlabel("time")
ax.set_ylabel("velocity")
ax=axs[2][i]
ax.plot(s, s1)
ax.set_ylim(-2.0,2.0)
ax.set_xlim(-2.0,2.0)
ax.set_xlabel("position")
ax.set_ylabel("velocity")
fig.subplots_adjust(hspace=1, wspace=1)
plt.savefig('graficas.png')
plt.show()
| 24.74359 | 48 | 0.635233 |
9db821a6f16092b02b4cd4951deab910f4dfd292 | 565 | py | Python | __scraping__/zipnet.in - requests/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | __scraping__/zipnet.in - requests/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | __scraping__/zipnet.in - requests/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | import requests
from bs4 import BeautifulSoup
from time import sleep
url = "http://zipnet.in/index.php?page=missing_person_search&criteria=browse_all&Page_No=1"
r = requests.get(url)
soup = BeautifulSoup(r.content, 'html.parser')
all_tables = soup.findAll('table')
for table in all_tables:
print('--- table ---')
all_rows = table.findAll('tr')
for row in all_rows:
all_cols = row.findAll('td')
if len(all_cols) > 1:
fields = all_cols[0].string
details = all_cols[1].string
print(fields, details)S
| 26.904762 | 91 | 0.660177 |
9dbc6591cdea251b119f8bcead36767b18ac8b75 | 4,654 | py | Python | mailpile/plugins/contacts.py | k0nsl/Mailpile | 556f5f9040c4e01b005b4d633f3213668a474936 | [
"Apache-2.0"
] | null | null | null | mailpile/plugins/contacts.py | k0nsl/Mailpile | 556f5f9040c4e01b005b4d633f3213668a474936 | [
"Apache-2.0"
] | null | null | null | mailpile/plugins/contacts.py | k0nsl/Mailpile | 556f5f9040c4e01b005b4d633f3213668a474936 | [
"Apache-2.0"
] | null | null | null | import mailpile.plugins
from mailpile.commands import Command
from mailpile.mailutils import Email, ExtractEmails
from mailpile.util import *
mailpile.plugins.register_command('C:', 'contact=', Contact)
mailpile.plugins.register_command('_vcard', 'vcard=', VCard)
| 30.220779 | 84 | 0.613666 |
9dbe26545533c7c7d397d2847ba2a1eeca8ad8ef | 1,663 | py | Python | hw2/codes/plot.py | Trinkle23897/Artificial-Neural-Network-THU-2018 | 3326ed131298caaaf3fd0b6af80de37fd1ff9526 | [
"MIT"
] | 38 | 2019-01-23T07:14:19.000Z | 2022-03-07T06:03:21.000Z | hw2/codes/plot.py | ywythu/Artificial-Neural-Network-THU-2018 | 3326ed131298caaaf3fd0b6af80de37fd1ff9526 | [
"MIT"
] | null | null | null | hw2/codes/plot.py | ywythu/Artificial-Neural-Network-THU-2018 | 3326ed131298caaaf3fd0b6af80de37fd1ff9526 | [
"MIT"
] | 17 | 2019-03-30T06:33:06.000Z | 2021-12-24T10:42:39.000Z | import numpy as np
from pylab import *
D = 10
acc1 = np.load('res/small/acc.npy').reshape(D, -1).mean(axis=0)
loss1 = np.load('res/small/loss.npy').reshape(D, -1).mean(axis=0)
acc2 = np.load('res/large/acc.npy').reshape(D, -1).mean(axis=0)
loss2 = np.load('res/large/loss.npy').reshape(D, -1).mean(axis=0)
cut = int(acc1.shape[0] / 10 * 4)
print(' 1: %.2f %.6f'%(100*acc1[:cut].max(), loss1[:cut].min()))
print(' 2: %.2f %.6f'%(100*acc2[:cut].max(), loss2[:cut].min()))
iter_ = np.arange(acc1.shape[0]) * D
print(acc1.shape, iter_.shape[0])
figure()
p = subplot(111)
p.plot(iter_[:cut], loss1[:cut], '-', label='Original CNN')
p.plot(iter_[:cut], loss2[:cut], '-', label='Designed CNN')
p.set_ylim((0, .4))
p.set_xlabel(r'# of Iterations')
p.set_ylabel(r'Loss')
p.legend(loc='upper right')
tight_layout()
savefig("loss.pdf")
figure()
p = subplot(111)
p.plot(iter_[:cut], acc1[:cut], '-', label='Original CNN')
p.plot(iter_[:cut], acc2[:cut], '-', label='Designed CNN')
p.set_ylim((.9, 1))
p.set_xlabel(r'# of Iterations')
p.set_ylabel(r'Accuracy')
p.legend(loc='lower right')
tight_layout()
savefig("acc.pdf")
# 1: 23:24:44.414 Testing, total mean loss 0.019417, total acc 0.863300 - 23:24:33.131
# 2s: 20:20:39.807 Testing, total mean loss 0.003224, total acc 0.967700 - 20:18:21.597
# 2r: 20:48:01.448 Testing, total mean loss 0.002306, total acc 0.981300 - 20:45:16.709
#-2r: 20:38:47.940 Testing, total mean loss 0.002271, total acc 0.981500 - 20:35:59.910
# 3s: 00:38:10.865 Testing, total mean loss 0.001759, total acc 0.980098 - 00:33:01.622
# 3r: 21:24:04.253 Testing, total mean loss 0.001675, total acc 0.980588 - 21:19:28.262 | 41.575 | 91 | 0.654841 |
9dbe2a0458905fed950a4384ff34ad0dc77f394d | 696 | py | Python | app/helpers/__init__.py | jaywonder20/Flask_Api_Starter | d3cf69f4742923737e826261f5e737f00d1c6270 | [
"MIT"
] | 1 | 2020-07-28T13:28:42.000Z | 2020-07-28T13:28:42.000Z | app/helpers/__init__.py | jaywonder20/Flask_Api_Starter | d3cf69f4742923737e826261f5e737f00d1c6270 | [
"MIT"
] | null | null | null | app/helpers/__init__.py | jaywonder20/Flask_Api_Starter | d3cf69f4742923737e826261f5e737f00d1c6270 | [
"MIT"
] | null | null | null | from flask_restful import reqparse
parser = reqparse.RequestParser()
parser.add_argument('email_address', help='field cannot be blank.')
| 33.142857 | 86 | 0.616379 |
9dc09ed0aa1f145f5e2a90e86cf3072696bbd4e9 | 3,435 | py | Python | tests/fakedb.py | justinfay/dbkit | 2aef6376a60965d7820c91692046f4bcf7d43640 | [
"MIT"
] | 4 | 2016-02-08T05:43:39.000Z | 2020-08-25T21:37:55.000Z | tests/fakedb.py | justinfay/dbkit | 2aef6376a60965d7820c91692046f4bcf7d43640 | [
"MIT"
] | 8 | 2015-04-24T13:39:42.000Z | 2016-04-07T01:58:53.000Z | tests/fakedb.py | justinfay/dbkit | 2aef6376a60965d7820c91692046f4bcf7d43640 | [
"MIT"
] | null | null | null | """
A fake DB-API 2 driver.
"""
# DB names used to trigger certain behaviours.
INVALID_DB = 'invalid-db'
INVALID_CURSOR = 'invalid-cursor'
HAPPY_OUT = 'happy-out'
apilevel = '2.0'
threadsafety = 2
paramstyle = 'qmark'
| 22.598684 | 71 | 0.604076 |
9dc60e93e26c2a9f12204a366a70cced0bf9b339 | 4,081 | py | Python | chapter_3_featurization/text_features.py | fancyerii/voicebook | def82da8577086d0361643a05fec2463006533a9 | [
"Apache-2.0"
] | 1 | 2020-03-05T01:19:17.000Z | 2020-03-05T01:19:17.000Z | chapter_3_featurization/text_features.py | fancyerii/voicebook | def82da8577086d0361643a05fec2463006533a9 | [
"Apache-2.0"
] | null | null | null | chapter_3_featurization/text_features.py | fancyerii/voicebook | def82da8577086d0361643a05fec2463006533a9 | [
"Apache-2.0"
] | null | null | null | '''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: js@neurolex.co
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ js@neurolex.co.
================================================
## TEXT_FEATURES.PY ##
================================================
extract all text features:
nltk_features()
spacy_features()
gensim_features()
'''
import transcribe as ts
import sounddevice as sd
import soundfile as sf
import nltk_features as nf
import spacy_features as spf
import gensim_features as gf
import numpy as np
import os, json
# # record and get transcript
# if 'test.wav' not in os.listdir():
# sync_record('test.wav', 10, 44100, 2)
# # now extract all text features
# data=text_featurize('test.wav', True)
| 34.584746 | 121 | 0.639304 |
9dc760639ffd67ca1391d622bcca50ed7b1b5700 | 5,178 | py | Python | neurotin/logs/scores.py | mscheltienne/neurotin-analysis | 841b7d86c0c990169cceb02b40d9eb6bd0d07612 | [
"MIT"
] | null | null | null | neurotin/logs/scores.py | mscheltienne/neurotin-analysis | 841b7d86c0c990169cceb02b40d9eb6bd0d07612 | [
"MIT"
] | null | null | null | neurotin/logs/scores.py | mscheltienne/neurotin-analysis | 841b7d86c0c990169cceb02b40d9eb6bd0d07612 | [
"MIT"
] | null | null | null | from typing import List, Tuple, Union
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from ..utils._checks import (
_check_participant,
_check_participants,
_check_type,
)
from ..utils._docs import fill_doc
def _check_scores_idx(scores: Union[int, list, tuple]) -> List[int]:
"""Check that the scores passed are valid."""
_check_type(scores, ("int", list, tuple), item_name="scores")
if isinstance(scores, int):
scores = [scores]
elif isinstance(scores, tuple):
scores = list(scores)
for score in scores:
_check_type(score, ("int",), item_name="score")
assert all(1 <= score <= 10 for score in scores)
return scores
| 28.295082 | 79 | 0.609888 |
9dcae389894300bd7f91c57ac11fc79ac0e2fd30 | 14,770 | py | Python | backend/core/migrations/0001_initial.py | mashuq/academia | 571b3db58de4a70210ebd9d92c0f152016aec861 | [
"Unlicense"
] | null | null | null | backend/core/migrations/0001_initial.py | mashuq/academia | 571b3db58de4a70210ebd9d92c0f152016aec861 | [
"Unlicense"
] | null | null | null | backend/core/migrations/0001_initial.py | mashuq/academia | 571b3db58de4a70210ebd9d92c0f152016aec861 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.1.6 on 2021-02-25 05:46
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 47.491961 | 207 | 0.561882 |
9dcd01c7a81f81cad912ec87f997c4e5ba58f9bb | 2,448 | py | Python | minifold/log.py | nokia/minifold | 3687d32ab6119dc8293ae370c8c4ba9bbbb47deb | [
"BSD-3-Clause"
] | 15 | 2018-09-03T09:40:59.000Z | 2021-07-16T16:14:46.000Z | src/log.py | Infinite-Blue-1042/minifold | cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16 | [
"BSD-3-Clause"
] | null | null | null | src/log.py | Infinite-Blue-1042/minifold | cd0aa9207f9e1819ed2ecbb24373cdcfe27abd16 | [
"BSD-3-Clause"
] | 8 | 2019-01-25T07:18:59.000Z | 2021-04-07T17:54:54.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of the minifold project.
# https://github.com/nokia/minifold
__author__ = "Marc-Olivier Buob"
__maintainer__ = "Marc-Olivier Buob"
__email__ = "marc-olivier.buob@nokia-bell-labs.com"
__copyright__ = "Copyright (C) 2018, Nokia"
__license__ = "BSD-3"
import sys
from pprint import pformat
DEBUG = 0
INFO = 1
WARNING = 2
ERROR = 3
# Shell colors
DEFAULT = 0
RED = 1
GREEN = 2
YELLOW = 3
BLUE = 4
PINK = 5
CYAN = 6
GRAY = 7
# Shell style
DEFAULT = 0
BOLD = 1
UNDERLINED = 4
BLINKING = 5
HIGHLIGHTED = 7
| 24 | 114 | 0.562908 |
9dcdcb702db69a33b8fb22a29cccef585723a801 | 4,515 | py | Python | cardgame_channels_app/migrations/0001_initial.py | cyface/cardgame_channels | 22f2bef190ee20999eae27e6aa9ce138a78ae47f | [
"MIT"
] | null | null | null | cardgame_channels_app/migrations/0001_initial.py | cyface/cardgame_channels | 22f2bef190ee20999eae27e6aa9ce138a78ae47f | [
"MIT"
] | null | null | null | cardgame_channels_app/migrations/0001_initial.py | cyface/cardgame_channels | 22f2bef190ee20999eae27e6aa9ce138a78ae47f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-18 11:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 43.834951 | 169 | 0.575858 |
9dce2d32fa35d3b007796ab403b5019d5baeeffb | 2,820 | py | Python | data_collection/omscs_website/omscs_cleaner.py | yashchitalia/jack-holmes | 1ce3c65c1477390fb15d99a14f608f62745548b1 | [
"Apache-2.0"
] | 1 | 2017-03-30T02:25:18.000Z | 2017-03-30T02:25:18.000Z | data_collection/omscs_website/omscs_cleaner.py | yashchitalia/jack-holmes | 1ce3c65c1477390fb15d99a14f608f62745548b1 | [
"Apache-2.0"
] | null | null | null | data_collection/omscs_website/omscs_cleaner.py | yashchitalia/jack-holmes | 1ce3c65c1477390fb15d99a14f608f62745548b1 | [
"Apache-2.0"
] | null | null | null | from bs4 import BeautifulSoup
import re
import urllib
import pickle as pkl
unclean_dat = pkl.load(open('omscs_website_data.p', 'rb'))
clean_dat = {}
for course_number in unclean_dat.keys():
curr_unclean_dat = unclean_dat[course_number]
curr_clean_dat = {}
for attribute in curr_unclean_dat.keys():
if attribute == 'Instructor':
try:
instructor_name = str(curr_unclean_dat[attribute][0])
except:
continue
curr_clean_dat[attribute] = instructor_name
elif attribute == 'Name':
try:
class_name = str(curr_unclean_dat[attribute])
except:
continue
curr_clean_dat[attribute] = class_name
elif attribute in ['Overview', 'Prerequisites', 'Grading', 'Technical', 'Reading']:
final_string= ''
unclean_list = curr_unclean_dat[attribute]
unclean_list.pop(0)
for item in unclean_list:
try:
if str(type(item)) == "<class 'bs4.element.NavigableString'>":
item = item.encode('ascii', errors='backslashreplace')
if str(item) == '\n':
continue
final_string = final_string+ ' ' + str(item)
elif str(type(item)) == "<class 'bs4.element.Tag'>":
if item.next == '\n':
continue
final_string = final_string+ ' '+ str(item.next)
except UnicodeEncodeError:
item = item.encode('ascii', errors='backslashreplace')
if str(item) == '\n':
continue
final_string = final_string+ ' ' + str(item)
html_cleaned_string = cleanhtml(final_string)
curr_clean_dat[attribute] = html_cleaned_string
continue
clean_dat[course_number] = curr_clean_dat
pkl.dump(clean_dat, open('omscs_cleaned_data.p', 'wb'))
| 40.285714 | 91 | 0.575887 |
9dce34cc1f5685467f230a6aaddab0a3ca10dd09 | 1,116 | py | Python | testinfra/test_hypervisor-runc.py | devbox-tools/sfc | 0a5a9c3db165b35506f84d4c2dbfc1dace3fcea1 | [
"Apache-2.0"
] | 1 | 2019-02-26T13:25:17.000Z | 2019-02-26T13:25:17.000Z | testinfra/test_hypervisor-runc.py | devbox-tools/sfc | 0a5a9c3db165b35506f84d4c2dbfc1dace3fcea1 | [
"Apache-2.0"
] | null | null | null | testinfra/test_hypervisor-runc.py | devbox-tools/sfc | 0a5a9c3db165b35506f84d4c2dbfc1dace3fcea1 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import utils
import yaml
| 39.857143 | 75 | 0.713262 |
9dcee3a8fc687322519c4ee6dd19ea787ec8d273 | 280 | py | Python | Frameworks/urls.py | MiniJez/TP_Django | e7540f3178d44efeab69a8c8bea14a70fdaa9b4e | [
"MIT"
] | null | null | null | Frameworks/urls.py | MiniJez/TP_Django | e7540f3178d44efeab69a8c8bea14a70fdaa9b4e | [
"MIT"
] | null | null | null | Frameworks/urls.py | MiniJez/TP_Django | e7540f3178d44efeab69a8c8bea14a70fdaa9b4e | [
"MIT"
] | null | null | null | from django.urls import path
from .views import index, create, delete, update
urlpatterns = [
path('', index, name='index'),
path('create/', create, name='create'),
path('delete/<int:pk>', delete, name='delete'),
path('update/<int:pk>', update, name='update'),
] | 28 | 51 | 0.639286 |
9dd02fb84f2d21edf2c3f482fb528f7ff864783d | 1,831 | py | Python | scrape.py | valvoda/holjplus | 6a214911b477adf1253b43e46f7f5afc3076a86a | [
"MIT"
] | null | null | null | scrape.py | valvoda/holjplus | 6a214911b477adf1253b43e46f7f5afc3076a86a | [
"MIT"
] | null | null | null | scrape.py | valvoda/holjplus | 6a214911b477adf1253b43e46f7f5afc3076a86a | [
"MIT"
] | null | null | null | """
Adapted from https://realpython.com/python-web-scraping-practical-introduction/
for the purpose of scraping https://publications.parliament.uk/pa/ld/ldjudgmt.HTML
to create an expanded HOLJ+ corpus
"""
import requests
from requests import get
from requests.exceptions import RequestException
from contextlib import closing
if __name__ == "__main__":
sc = Scrape()
print("Testing the scaper:")
raw_html = sc.simple_get('https://realpython.com/blog/')
assert (len(raw_html) > 0), "Error, does not get"
no_html = sc.simple_get("https://doesnotexist.com/thereshouldbenothing/")
assert (no_html == None), "Error, does get"
print("Working")
| 30.516667 | 84 | 0.616057 |
9dd06c5c9ed12f49b25dc9756a8a419ae3530b18 | 1,881 | py | Python | emotional_ai/model.py | fuluny/Emotional-AI | 1372933ec410f72cd500513ea560f43167382e34 | [
"MIT"
] | null | null | null | emotional_ai/model.py | fuluny/Emotional-AI | 1372933ec410f72cd500513ea560f43167382e34 | [
"MIT"
] | null | null | null | emotional_ai/model.py | fuluny/Emotional-AI | 1372933ec410f72cd500513ea560f43167382e34 | [
"MIT"
] | null | null | null | # #!/usr/bin/python
import os
import numpy as np
import pandas as pd
from keras.models import load_model
from keras.models import Sequential
from keras.utils import np_utils
from keras.layers.core import Dense, Activation, Dropout
from keras import optimizers
from matplotlib import pyplot as plt
print('Loading data...')
data = pd.read_csv('fer2013.csv')
#data = pd.read_csv('testdata.csv')
im = data['pixels']
im_list = []
print('Pre-processing data...')
for i in range(len(im)):
im_list.append(list(map(int,im[i].split())))
X_train = np.asarray(im_list).astype('float32')
y_train = np_utils.to_categorical(np.asarray(data['emotion']))
X_train *= 2.0/255
X_train -= 1
input_dim = X_train.shape[1]
nb_classes = y_train.shape[1]
# Parameters were chosen from most commonly used and sometimes at random
# Further development of the model may be needed
print('Making model')
model = Sequential()
# Dense define number of nodes
model.add(Dense(1000, input_dim=input_dim))
# Activation defines the output
model.add(Activation('relu'))
# Dropout to avoid overfitting.
model.add(Dropout(0.15))
model.add(Dense(500))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dropout(0.15))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
print(model.summary())
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
print("Training...")
model.fit(X_train, y_train, epochs=100, validation_split=0.1, verbose=2)
scores = model.evaluate(X_train, y_train, verbose=0)
print(scores)
# save model to HDF5
model.save('model.h5')
print("Saved model to disk")
| 25.767123 | 81 | 0.747475 |
9dd27bec72ba1ef4b5afcb916eaaa9109718bd5c | 2,487 | py | Python | detect_port_services.py | amir78729/penetration-test-project | c85376303ce0451e2e3a3150617484d5e6837168 | [
"MIT"
] | 1 | 2022-02-04T19:29:18.000Z | 2022-02-04T19:29:18.000Z | detect_port_services.py | amir78729/penetration-test-project | c85376303ce0451e2e3a3150617484d5e6837168 | [
"MIT"
] | null | null | null | detect_port_services.py | amir78729/penetration-test-project | c85376303ce0451e2e3a3150617484d5e6837168 | [
"MIT"
] | null | null | null | from socket import socket, gaierror, getservbyport, AF_INET, SOCK_STREAM, setdefaulttimeout
from tqdm import tqdm
from datetime import datetime
if __name__ == '__main__':
detect_port_services(
ip=input('TARGET IP ADDRESS: '),
range_start=int(input('START OF RANGE : ')),
range_end=int(input('END OF RANGE : ')),
)
| 38.859375 | 115 | 0.556494 |
9dd2a344fe4c04f0564d9da26c93b7f70200954e | 14,829 | py | Python | zvdata/apps/data_app.py | freedom6xiaobai/zvt | f4ba510a30f1014cc0e48b85370b0d3936bd851a | [
"MIT"
] | 1 | 2019-10-28T08:03:26.000Z | 2019-10-28T08:03:26.000Z | zvdata/apps/data_app.py | freedom6xiaobai/zvt | f4ba510a30f1014cc0e48b85370b0d3936bd851a | [
"MIT"
] | null | null | null | zvdata/apps/data_app.py | freedom6xiaobai/zvt | f4ba510a30f1014cc0e48b85370b0d3936bd851a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from collections import OrderedDict
from typing import List
import dash_core_components as dcc
import dash_html_components as html
import dash_table
import pandas as pd
from dash import dash
from dash.dependencies import Input, Output, State
from zvdata import IntervalLevel
from zvdata.app import app
from zvdata.chart import Drawer
from zvdata.domain import global_providers, get_schemas, get_schema_by_name, get_schema_columns
from zvdata.normal_data import NormalData, IntentType
from zvdata.reader import DataReader
from zvdata.utils.pd_utils import df_is_not_null
from zvdata.utils.time_utils import now_pd_timestamp, TIME_FORMAT_DAY
current_df = None
layout = html.Div(
[
html.Div(
[
# provider selector
dcc.Dropdown(
id='provider-selector',
placeholder='select provider',
options=[{'label': provider, 'value': provider} for provider in
global_providers]),
# schema selector
dcc.Dropdown(id='schema-selector', placeholder='select schema'),
# level selector
dcc.Dropdown(id='level-selector', placeholder='select level',
options=[{'label': level.value, 'value': level.value} for level in
IntervalLevel],
value=IntervalLevel.LEVEL_1DAY.value),
# column selector
html.Div(id='schema-column-selector-container', children=None),
dcc.Dropdown(
id='properties-selector',
options=[
{'label': 'undefined', 'value': 'undefined'}
],
value='undefined',
multi=True
),
# codes filter
dcc.Input(id='input-code-filter', type='text', placeholder='input codes',
style={'width': '400px'}),
# time range filter
dcc.DatePickerRange(
id='date-picker-range',
start_date='2009-01-01',
end_date=now_pd_timestamp(),
display_format=TIME_FORMAT_DAY
),
# load data for table
html.Button('load data', id='btn-load-data', n_clicks_timestamp=0),
# table container
html.Div(id='data-table-container', children=None),
# selected properties
html.Label('setting y_axis and chart type for the columns:'),
# col setting container
html.Div(id='col-setting-container', children=dash_table.DataTable(
id='col-setting-table',
columns=[
{'id': 'property', 'name': 'property', 'editable': False},
{'id': 'y_axis', 'name': 'y_axis', 'presentation': 'dropdown'},
{'id': 'chart', 'name': 'chart', 'presentation': 'dropdown'}
],
dropdown={
'y_axis': {
'options': [
{'label': i, 'value': i}
for i in ['y1', 'y2', 'y3', 'y4', 'y5']
]
},
'chart': {
'options': [
{'label': chart_type.value, 'value': chart_type.value}
for chart_type in NormalData.get_charts_by_intent(IntentType.compare_self)
]
}
},
editable=True
), ),
html.Div(id='table-type-label', children=None),
html.Div(
[
html.Div([dcc.Dropdown(id='intent-selector')],
style={'width': '50%', 'display': 'inline-block'}),
html.Div([dcc.Dropdown(id='chart-selector')],
style={'width': '50%', 'display': 'inline-block'})
]
),
html.Div(id='chart-container', children=None)
])
]
)
def properties_to_readers(properties, level, codes, start_date, end_date) -> List[DataReader]:
provider_schema_map_cols = {}
for prop in properties:
provider = prop['provider']
schema_name = prop['schema']
key = (provider, schema_name)
if key not in provider_schema_map_cols:
provider_schema_map_cols[key] = []
provider_schema_map_cols[key].append(prop['column'])
readers = []
for item, columns in provider_schema_map_cols.items():
provider = item[0]
schema_name = item[1]
schema = get_schema_by_name(schema_name)
readers.append(DataReader(data_schema=schema, provider=provider, codes=codes, level=level,
columns=columns, start_timestamp=start_date, end_timestamp=end_date,
time_field=schema.time_field()))
return readers
operators_df = [['ge ', '>='],
['le ', '<='],
['lt ', '<'],
['gt ', '>'],
['ne ', '!='],
['eq ', '='],
['contains '],
['datestartswith ']]
operators_sql = [['>= ', '>='],
['<= ', '<='],
['< ', '<'],
['> ', '>'],
['!= ', '!='],
['== ', '='],
['contains '],
['datestartswith ']]
| 36.796526 | 120 | 0.52458 |
9dd308c092689ec19be480b950fd1043adb5873d | 1,139 | py | Python | api-gateway/fcgi/handwritten/python/fcgi_codec.py | intel/cloud-client-ai-service-framework | 01676b08878f7a58201854aedb181134eafef7a2 | [
"Apache-2.0"
] | 3 | 2022-03-25T17:28:53.000Z | 2022-03-29T03:30:25.000Z | api-gateway/fcgi/handwritten/python/fcgi_codec.py | intel/cloud-client-ai-service-framework | 01676b08878f7a58201854aedb181134eafef7a2 | [
"Apache-2.0"
] | null | null | null | api-gateway/fcgi/handwritten/python/fcgi_codec.py | intel/cloud-client-ai-service-framework | 01676b08878f7a58201854aedb181134eafef7a2 | [
"Apache-2.0"
] | 1 | 2022-03-27T12:44:19.000Z | 2022-03-27T12:44:19.000Z | import numpy as np
| 30.783784 | 127 | 0.600527 |
9dd3506fa61a6efdbedcfd729d5128ff929686bf | 4,333 | py | Python | src/hmmmr/non_batched_functions.py | carojasq/HMMMR | f94846d8f02fe8993a0e5fb55e936dd1c1596187 | [
"MIT"
] | null | null | null | src/hmmmr/non_batched_functions.py | carojasq/HMMMR | f94846d8f02fe8993a0e5fb55e936dd1c1596187 | [
"MIT"
] | 1 | 2019-11-01T08:32:04.000Z | 2019-11-01T08:32:04.000Z | src/hmmmr/non_batched_functions.py | carojasq/HMMMR | f94846d8f02fe8993a0e5fb55e936dd1c1596187 | [
"MIT"
] | 1 | 2019-04-05T00:06:31.000Z | 2019-04-05T00:06:31.000Z | from common_libs import *
from cublas_functions import *
linalg.init()
# Matrix product, there is a batch equivalent for this function too
# Make sure it has 2 dimensions (use reshape in the case is 1d)
def cublas_matrix_product_gemm_non_batched(handle, a_gpu, b_gpu):
"""
:param handle:
:param a_gpu: Be carefull to pass X here
:param b_gpu: Xt should be here
:return:
"""
cublas_dot = get_single_dot_function(b_gpu)
if len(a_gpu.shape)!=2 or len(a_gpu.shape)!=2:
raise ValueError('Make sure the arrays are 2 dimensional')
n, l = a_gpu.shape
k, m = b_gpu.shape
c_gpu = gpuarray.empty((n, m), b_gpu.dtype)
lda = max(1, a_gpu.strides[0] // a_gpu.dtype.itemsize)
ldb = max(1, b_gpu.strides[0] // b_gpu.dtype.itemsize)
ldc = max(1, c_gpu.strides[0] // c_gpu.dtype.itemsize)
alpha = np.float32(1.0)
beta = np.float32(0.0)
transa = transb = 'n'
cublas_dot(handle, transb, transa, m, n, k, alpha, b_gpu.gpudata, ldb, a_gpu.gpudata, lda, beta, c_gpu.gpudata, ldc)
return c_gpu
"TODO: Fix this function, like linalg.inv"
| 41.663462 | 120 | 0.686591 |
9dd7404e8264756d1a9d92df88241f2bdb03e559 | 793 | py | Python | tools/run/mrcnnalt.py | MartinPlantinga/TomatoNet | 52f3f993665865d1e74b24c43bf4a722c470eac1 | [
"BSD-2-Clause"
] | 1 | 2022-03-13T23:52:22.000Z | 2022-03-13T23:52:22.000Z | tools/run/mrcnnalt.py | MartinPlantinga/TomatoNet | 52f3f993665865d1e74b24c43bf4a722c470eac1 | [
"BSD-2-Clause"
] | null | null | null | tools/run/mrcnnalt.py | MartinPlantinga/TomatoNet | 52f3f993665865d1e74b24c43bf4a722c470eac1 | [
"BSD-2-Clause"
] | null | null | null | import os
from time import localtime, strftime
pwd = os.curdir
root_dir = pwd + './../'
weights_path = '{}data/imagenet_models/VGG16.v2.caffemodel'.format(root_dir)
cfg_path = '{}experiments/cfgs/mask_rcnn_alt_opt.yml'.format(root_dir)
log_file="{}experiments/logs/mask_rcnn_alt_opt_{}".format(root_dir, strftime("%d-%m-%Y_%H_%M", localtime()))
#print log_file
exec_log_file = "exec &> >(tee -a \"{}\")".format(log_file)
#echo Logging output to "$LOG"
#os.system(exec &> >(tee -a "$LOG")
exec_python = "python ../train_mask_rcnn_alt_opt.py --gpu 0 --net_name 'VGG16' --weights {} --imdb 'voc_2012_train' --cfg {}".format(weights_path, cfg_path)
exec_all = "'/bin/bash -c {}' ; {}".format(exec_log_file, exec_python)
#os.system(exec_all)
print exec_all
os.system(exec_all)
| 41.736842 | 158 | 0.696091 |
9dd7a2e49e2ed72a4a6612efc5a036e4272aa367 | 1,325 | py | Python | toontown/ai/DistributedTrashcanZeroMgr.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 99 | 2019-11-02T22:25:00.000Z | 2022-02-03T03:48:00.000Z | toontown/ai/DistributedTrashcanZeroMgr.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 42 | 2019-11-03T05:31:08.000Z | 2022-03-16T22:50:32.000Z | toontown/ai/DistributedTrashcanZeroMgr.py | TheFamiliarScoot/open-toontown | 678313033174ea7d08e5c2823bd7b473701ff547 | [
"BSD-3-Clause"
] | 57 | 2019-11-03T07:47:37.000Z | 2022-03-22T00:41:49.000Z | from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from toontown.ai import DistributedPhaseEventMgr
| 42.741935 | 87 | 0.768302 |
9dd862d583434b6ed73a9e6519551c5f6c54561e | 1,575 | py | Python | examples/run_fieldtrip_IF.py | annapasca/ephypype | 6dbacdd6913234a28b690b401862ff062accecc7 | [
"BSD-3-Clause"
] | 18 | 2018-04-18T12:14:52.000Z | 2022-02-25T19:31:44.000Z | examples/run_fieldtrip_IF.py | annapasca/ephypype | 6dbacdd6913234a28b690b401862ff062accecc7 | [
"BSD-3-Clause"
] | 106 | 2017-12-09T13:34:30.000Z | 2022-03-12T01:02:17.000Z | examples/run_fieldtrip_IF.py | annapasca/ephypype | 6dbacdd6913234a28b690b401862ff062accecc7 | [
"BSD-3-Clause"
] | 13 | 2017-05-28T20:38:56.000Z | 2022-03-06T15:58:02.000Z | """
.. _ft_seeg_example:
=========================================
Apply bipolar montage to depth electrodes
=========================================
This scripts shows a very simple example on how to create an Interface wrapping
a desired function of a Matlab toolbox (|FieldTrip|).
.. |FieldTrip| raw:: html
<a href="http://www.fieldtriptoolbox.org/" target="_blank">FieldTrip</a>
The **input** data should be a **.mat** file containing a FieldTrip data struct
"""
# Authors: Annalisa Pascarella <a.pascarella@iac.cnr.it>
# License: BSD (3-clause)
import os.path as op
import ephypype
from ephypype.nodes.FT_tools import Reference
from ephypype.datasets import fetch_ieeg_dataset
###############################################################################
# Let us fetch the data first. It is around 675 MB download.
base_path = op.join(op.dirname(ephypype.__file__), '..', 'examples')
data_path = fetch_ieeg_dataset(base_path)
ft_path = '/usr/local/MATLAB/R2018a/toolbox/MEEG/fieldtrip-20200327/'
refmethod = 'bipolar'
channels_name = '{\'RAM*\', \'RHH*\', \'RTH*\', \'ROC*\', \'LAM*\',\'LHH*\', \'LTH*\'}' # noqa
# Now we call the interface Reference to apply a bipolar montage to sEEG data
reference_if = Reference()
reference_if.inputs.data_file = op.join(data_path, 'SubjectUCI29_data.mat')
reference_if.inputs.channels = channels_name
reference_if.inputs.ft_path = ft_path
reference_if.inputs.refmethod = refmethod
reference_if.inputs.script = ''
out = reference_if.run()
print('Rereferenced data saved at {}'.format(out.outputs.data_output))
| 32.8125 | 95 | 0.665397 |
9dd8b07faafc812e62e163fe5ae0d1616164fd3e | 2,224 | py | Python | tree.py | korbi98/TicTacToeGo_Zero | b8ea4562f3ddf914a53fc380f2266f13ab887e04 | [
"MIT"
] | null | null | null | tree.py | korbi98/TicTacToeGo_Zero | b8ea4562f3ddf914a53fc380f2266f13ab887e04 | [
"MIT"
] | null | null | null | tree.py | korbi98/TicTacToeGo_Zero | b8ea4562f3ddf914a53fc380f2266f13ab887e04 | [
"MIT"
] | 1 | 2021-12-20T12:03:49.000Z | 2021-12-20T12:03:49.000Z | # Simple tree structure
import numpy as np
import math | 35.870968 | 87 | 0.616007 |
9dd8bbfb2717a06b4b3ec45eb064716d069fb7b0 | 269 | py | Python | vibrant_frequencies/cli.py | garstka/vibrant-frequencies | e237bf97089c87ca3e9335ba0d2abd09756b98fc | [
"MIT"
] | 2 | 2019-01-31T15:13:37.000Z | 2020-11-19T03:24:12.000Z | vibrant_frequencies/cli.py | garstka/vibrant-frequencies | e237bf97089c87ca3e9335ba0d2abd09756b98fc | [
"MIT"
] | null | null | null | vibrant_frequencies/cli.py | garstka/vibrant-frequencies | e237bf97089c87ca3e9335ba0d2abd09756b98fc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Console script for vibrant_frequencies."""
import logging
import click
from .prototype import visualize
if __name__ == "__main__":
main()
| 14.944444 | 48 | 0.67658 |
9dda3faed30d9ee945694fcad8f057ec177bc507 | 6,568 | py | Python | rak_net/protocol/handler.py | L0RD-ZER0/aio-rak-net | 0ec0b6ac4daf6a4b146ac94ac2d0313c13975363 | [
"MIT"
] | 1 | 2021-12-02T04:37:08.000Z | 2021-12-02T04:37:08.000Z | rak_net/protocol/handler.py | L0RD-ZER0/aio-rak-net | 0ec0b6ac4daf6a4b146ac94ac2d0313c13975363 | [
"MIT"
] | null | null | null | rak_net/protocol/handler.py | L0RD-ZER0/aio-rak-net | 0ec0b6ac4daf6a4b146ac94ac2d0313c13975363 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import TYPE_CHECKING
from .packet import (
ConnectionRequest,
ConnectionRequestAccepted,
NewIncomingConnection,
OfflinePing,
OfflinePong,
OnlinePing,
OnlinePong,
OpenConnectionRequest1,
OpenConnectionReply1,
OpenConnectionRequest2,
OpenConnectionReply2,
IncompatibleProtocolVersion,
)
from .protocol_info import ProtocolInfo
from ..utils import InternetAddress
if TYPE_CHECKING:
from ..server import Server
__all__ = 'Handler',
| 40.294479 | 134 | 0.676309 |
9ddc3d1e0254e6926c024e8ba5ff8037971f9673 | 5,434 | py | Python | software/pynguin/pynguin/testcase/execution/monkeytypeexecutor.py | se2p/artifact-pynguin-ssbse2020 | 32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6 | [
"CC-BY-4.0"
] | 3 | 2020-08-20T10:27:13.000Z | 2021-11-02T20:28:16.000Z | software/pynguin/pynguin/testcase/execution/monkeytypeexecutor.py | se2p/artifact-pynguin-ssbse2020 | 32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6 | [
"CC-BY-4.0"
] | null | null | null | software/pynguin/pynguin/testcase/execution/monkeytypeexecutor.py | se2p/artifact-pynguin-ssbse2020 | 32b5f4d27ef1b81e5c541471e98fa6e50f5ce8a6 | [
"CC-BY-4.0"
] | null | null | null | # This file is part of Pynguin.
#
# Pynguin is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pynguin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pynguin. If not, see <https://www.gnu.org/licenses/>.
"""An executor that executes a test under the inspection of the MonkeyType tool."""
import contextlib
import logging
import os
import sys
from typing import Any, Dict, Iterable, List, Optional
import astor
from monkeytype.config import DefaultConfig
from monkeytype.db.base import CallTraceStore, CallTraceThunk
from monkeytype.encoding import CallTraceRow, serialize_traces
from monkeytype.tracing import CallTrace, CallTraceLogger, CallTracer
import pynguin.configuration as config
import pynguin.testcase.execution.executioncontext as ctx
import pynguin.testcase.testcase as tc
# pylint:disable=too-few-public-methods
| 37.219178 | 88 | 0.636916 |