hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9ddca262545e263f1aa26d015f1d96948d664c84 | 7,778 | py | Python | testproject/testapp/tests/test_history_entries.py | innovationinit/django-wicked-historian | bef0011639791e2275c6bf2272b57542174b4cf0 | [
"BSD-2-Clause"
] | null | null | null | testproject/testapp/tests/test_history_entries.py | innovationinit/django-wicked-historian | bef0011639791e2275c6bf2272b57542174b4cf0 | [
"BSD-2-Clause"
] | null | null | null | testproject/testapp/tests/test_history_entries.py | innovationinit/django-wicked-historian | bef0011639791e2275c6bf2272b57542174b4cf0 | [
"BSD-2-Clause"
] | 1 | 2022-03-15T07:29:58.000Z | 2022-03-15T07:29:58.000Z | "Test history entries for migrated, obsolete fields"
from datetime import (
time,
timedelta,
)
from decimal import Decimal
from typing import (
Any,
Dict,
)
from django.contrib.auth.models import User
from django.db import models
from wicked_historian.usersmuggler import usersmuggler
from wicked_historian.utils import FieldDescription
from testapp.factories import BookFactory
from testapp.models import (
Author,
Book,
BookEditHistory,
Language,
OBSOLETE_BOOK_FIELD_CHOICES,
)
from .base import FreezeTimeTestCase
| 36.862559 | 125 | 0.613525 |
9ddcc75effef9735c94806309f3290c831fcccf5 | 219 | py | Python | UnityEngine/HideFlags/__init__.py | Grim-es/udon-pie-auto-completion | c2cd86554ed615cdbbb01e19fa40665eafdfaedc | [
"MIT"
] | null | null | null | UnityEngine/HideFlags/__init__.py | Grim-es/udon-pie-auto-completion | c2cd86554ed615cdbbb01e19fa40665eafdfaedc | [
"MIT"
] | null | null | null | UnityEngine/HideFlags/__init__.py | Grim-es/udon-pie-auto-completion | c2cd86554ed615cdbbb01e19fa40665eafdfaedc | [
"MIT"
] | null | null | null | from UdonPie import UnityEngine
from UdonPie.Undefined import *
| 18.25 | 37 | 0.611872 |
9ddcd002e78260764fc62fd401536244bf8603dd | 536 | py | Python | solicitudes/migrations/0006_auto_20191101_0122.py | jmartipu/CrearSolicitudBack | e05c30d81eee3277c2e28e8e3e8991fa723dd770 | [
"MIT"
] | null | null | null | solicitudes/migrations/0006_auto_20191101_0122.py | jmartipu/CrearSolicitudBack | e05c30d81eee3277c2e28e8e3e8991fa723dd770 | [
"MIT"
] | 7 | 2019-10-10T16:57:29.000Z | 2022-02-10T08:14:25.000Z | solicitudes/migrations/0006_auto_20191101_0122.py | jmartipu/CrearSolicitudBack | e05c30d81eee3277c2e28e8e3e8991fa723dd770 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.5 on 2019-11-01 06:22
from django.db import migrations, models
| 23.304348 | 73 | 0.600746 |
9ddcebb4e8c7a0186684b52cc9c2d36af16dce87 | 12,639 | py | Python | mmdetection/third_party/text_perceptron/mmdet/models/seg_heads/tp_head.py | chengzhanzhan/DAVAR-Lab-OCR | 79776915c616731698d452d935e7b599b1ce46f0 | [
"Apache-2.0"
] | 4 | 2021-07-08T03:08:16.000Z | 2022-03-20T02:53:29.000Z | mmdetection/third_party/text_perceptron/mmdet/models/seg_heads/tp_head.py | chengzhanzhan/DAVAR-Lab-OCR | 79776915c616731698d452d935e7b599b1ce46f0 | [
"Apache-2.0"
] | null | null | null | mmdetection/third_party/text_perceptron/mmdet/models/seg_heads/tp_head.py | chengzhanzhan/DAVAR-Lab-OCR | 79776915c616731698d452d935e7b599b1ce46f0 | [
"Apache-2.0"
] | null | null | null | """
####################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : tp_head.py
# Abstract : Text Perceptron head structure, mainly including losses for segmentation part and regression part
# Current Version: 1.0.0
# Author : Liang Qiao
# Date : 2020-05-31
# Modified Date : 2020-11-26
# Modified by : inusheng
# Comments : Code and comment standardized
######################################################################################################
"""
import numpy as np
import torch
import torch.nn as nn
from mmdet.models.builder import build_loss
from mmdet.models.registry import HEADS
from mmdet.ops import ConvModule
from mmdet.core import force_fp32, auto_fp16
def make_one_hot(input_tensor, num_classes):
"""
Description:
convert a feature map of shape [N, 1, H, W] into its one-hot encoding version of shape [N, C, H, W],
where C is the number of classes.
Arguments:
input_tensor: input tensor, [N, 1, *]
num_classes : the number of classes of feature maps
Returns:
one-hot encoding of input tensor, [N, num_classes, *]
"""
input_tensor = input_tensor[:, np.newaxis, ::]
shape = np.array(input_tensor.shape)
shape[1] = num_classes
shape = tuple(shape)
result = torch.zeros(shape)
result = result.scatter_(1, input_tensor.cpu(), 1).to(input_tensor.device)
return result
| 48.240458 | 131 | 0.600047 |
9ddd09288fc0047903028aaf720e265008e2c882 | 5,757 | py | Python | MyHDLSim/Wrappers/SignalWrapper.py | mattsnowboard/msu-myhdlsim | 5d0d881b682a72b77d9597cb84b8fdd4bd444afc | [
"BSD-2-Clause"
] | 1 | 2021-03-30T10:20:06.000Z | 2021-03-30T10:20:06.000Z | MyHDLSim/Wrappers/SignalWrapper.py | mattsnowboard/msu-myhdlsim | 5d0d881b682a72b77d9597cb84b8fdd4bd444afc | [
"BSD-2-Clause"
] | null | null | null | MyHDLSim/Wrappers/SignalWrapper.py | mattsnowboard/msu-myhdlsim | 5d0d881b682a72b77d9597cb84b8fdd4bd444afc | [
"BSD-2-Clause"
] | null | null | null | import wx, wx.lib.newevent
import wx.lib.ogl as ogl
from myhdl import Signal, always, intbv
from MyHDLSim.sequential import ClkDriver
# OGL object to draw a signal
| 34.473054 | 94 | 0.589717 |
9ddd849ad1a926498070335095fce2ee85d5a45b | 1,403 | py | Python | Modular Chatbot/frankenbot-framework/frankenbot_console.py | MAhsanShahid/Frankenbot | d9425446fb3754591aeea7ab7e562199f937bd52 | [
"MIT"
] | 2 | 2021-04-27T12:51:35.000Z | 2021-08-16T12:43:50.000Z | Modular Chatbot/frankenbot-framework/frankenbot_console.py | MAhsanShahid/Frankenbot | d9425446fb3754591aeea7ab7e562199f937bd52 | [
"MIT"
] | null | null | null | Modular Chatbot/frankenbot-framework/frankenbot_console.py | MAhsanShahid/Frankenbot | d9425446fb3754591aeea7ab7e562199f937bd52 | [
"MIT"
] | 1 | 2021-08-16T12:43:51.000Z | 2021-08-16T12:43:51.000Z | import json
import sys
from frankenbot.bot import Bot
from frankenbot.console_chat_interface import ConsoleChatInterface
from frankenbot.persistence.json.json_unserializer import JSONUnserializer
bot_def = "bot_def/restaurantsearch.json"
unserializer = JSONUnserializer()
#simpleResponseGenerator object
bot = unserializer.load_from_file(bot_def)
chat = ConsoleChatInterface(bot)
chat.start_chat()
# 'show me chines restaurants in the north'
# ________________________________
# from rasa_nlu.training_data import load_data
# from rasa_nlu.config import RasaNLUModelConfig
# from rasa_nlu.model import Trainer, Interpreter
# from rasa_nlu import config
#
# train_data = load_data('../intent_categorization_example_rasa/rasa_dataset.json')
# trainer = Trainer(config.load("../intent_categorization_example_rasa/config_spacy.yaml"))
#
# # train the model and save it to a folder
# trainer.train(train_data)
# model_directory = trainer.persist('../intent_categorization_example_rasa/projects/')
#
# # load the model in the interpreter
# interpreter = Interpreter.load(model_directory)
#
#
# # run two utterances through the interpreter
# def nlu(utterance):
# nlu_result = interpreter.parse(utterance)
# print("utterance: " + utterance)
# print("nlu resutl: " + str(nlu_result["intent"]))
#
#
# nlu(u"i'm looking for a place in the north of town")
# nlu(u"Good Morning World")
# nlu(u"vegg") | 34.219512 | 91 | 0.784034 |
9dde6bfc80676d7c23483dae2cdebeb48e518e09 | 6,801 | py | Python | spinel_wisun_utils.py | LaudateCorpus1/ti-wisunfan-pyspinel | 5b911ef8319115fb2ef20a57358dd44733bed30a | [
"Apache-2.0"
] | 2 | 2021-03-22T21:42:03.000Z | 2021-09-01T09:12:43.000Z | spinel_wisun_utils.py | LaudateCorpus1/ti-wisunfan-pyspinel | 5b911ef8319115fb2ef20a57358dd44733bed30a | [
"Apache-2.0"
] | 1 | 2021-11-11T16:18:51.000Z | 2021-11-11T16:18:51.000Z | spinel_wisun_utils.py | LaudateCorpus1/ti-wisunfan-pyspinel | 5b911ef8319115fb2ef20a57358dd44733bed30a | [
"Apache-2.0"
] | 5 | 2021-08-18T03:15:32.000Z | 2022-01-20T05:19:41.000Z | #!/usr/bin/env python
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import ipaddress
# Helper util function to parse received PROP_ROUTING_TABLE_UPDATE property info
def parse_routingtable_property(propRoutingTableAddrInfo):
"""
Internal utility function to convert Routing Table Addr Info into structure
Returns changed_type and dictionary entry
"""
routingTableEntry = {}
update_type = -1
dst_ipv6_addr = ""
try:
# 2 bytes = length of structure; 1 byte = change type; 16 bytes Dest IPv6 address;
# 1 byte = prefix len ; 16 bytes = next hop IPv6 address; 4 bytes = lifetime
routingTableStruct = propRoutingTableAddrInfo[0:len(propRoutingTableAddrInfo)]
changed_info = routingTableStruct[2:3] # C
dst_addr = routingTableStruct[3:19] # 6
prefix_length = routingTableStruct[19:20] # C
next_hop_addr = routingTableStruct[20:36] # 6
lifetime = routingTableStruct[36:40] # L
update_type = int.from_bytes(changed_info, "little", signed=False)
dst_ipv6_addr = ipaddress.IPv6Address(dst_addr)
routingTableEntry["prefixLen"] = int.from_bytes(prefix_length, "little", signed=False)
routingTableEntry["nextHopAddr"] = ipaddress.IPv6Address(next_hop_addr)
routingTableEntry["lifetime"] = int.from_bytes(lifetime, "little", signed=False)
except Exception as es:
print("Exception raised during Parsing Routing Table")
print(es)
return(update_type, dst_ipv6_addr, routingTableEntry)
| 32.54067 | 94 | 0.642847 |
9ddef4eb6e5502bd565a0db158fed8bdc6d939f1 | 1,383 | py | Python | src/ModularChess/movements/EnPassant.py | ferranSanchezLlado/ModularChess | 896fa192fd49f86062ea79dd0d3cbe7e5cdc9d6b | [
"MIT"
] | null | null | null | src/ModularChess/movements/EnPassant.py | ferranSanchezLlado/ModularChess | 896fa192fd49f86062ea79dd0d3cbe7e5cdc9d6b | [
"MIT"
] | null | null | null | src/ModularChess/movements/EnPassant.py | ferranSanchezLlado/ModularChess | 896fa192fd49f86062ea79dd0d3cbe7e5cdc9d6b | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING, Optional
from ModularChess.movements.Movement import Movement, MovementData
if TYPE_CHECKING:
from ModularChess.pieces.Piece import Piece
from ModularChess.utils.Position import Position
| 44.612903 | 103 | 0.642805 |
9ddfecdb8db0a77645be766083a1ef5b0e142f16 | 2,387 | py | Python | pytemperaturectrl/julabo.py | jenrei/pytemperaturectrl | eabfbf8a6d732cda72c5cd8397a85b0d8960da78 | [
"MIT"
] | null | null | null | pytemperaturectrl/julabo.py | jenrei/pytemperaturectrl | eabfbf8a6d732cda72c5cd8397a85b0d8960da78 | [
"MIT"
] | null | null | null | pytemperaturectrl/julabo.py | jenrei/pytemperaturectrl | eabfbf8a6d732cda72c5cd8397a85b0d8960da78 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
julabo.py
Contains Julabo temperature control
see documentation http://www.julabo.com/sites/default/files/downloads/manuals/french/19524837-V2.pdf at section 10.2.
:copyright: (c) 2015 by Maxime DAUPHIN
:license: MIT, see LICENSE for details
"""
import serial
import time
from .pytemperaturectrl import TemperatureControl
| 26.820225 | 118 | 0.714286 |
9de15ad674c58a2528581460d79721fcb5a0883c | 1,143 | py | Python | algorithms/matrix.py | rkistner/contest-algorithms | 8133f8ddce4f257386c7bcf55589d559854c1955 | [
"Apache-2.0"
] | 4 | 2015-03-08T15:38:45.000Z | 2018-04-08T02:13:54.000Z | algorithms/matrix.py | rkistner/contest-algorithms | 8133f8ddce4f257386c7bcf55589d559854c1955 | [
"Apache-2.0"
] | 1 | 2017-11-29T01:15:55.000Z | 2017-11-29T01:17:40.000Z | algorithms/matrix.py | rkistner/contest-algorithms | 8133f8ddce4f257386c7bcf55589d559854c1955 | [
"Apache-2.0"
] | 4 | 2015-11-08T03:39:54.000Z | 2020-11-06T10:42:53.000Z | """
Some basic matrix-related functionality.
"""
def cumulative2d(grid):
"""
>>> cumulative2d([[2, 5, 4], [3, 8, 1]])
[[0, 0, 0, 0], [0, 2, 7, 11], [0, 5, 18, 23]]
"""
rows = []
for row in grid:
rrr = [0]
last = 0
for col in row:
last += col
rrr.append(last)
rows.append(rrr)
blocks = []
last = [0]*len(rows[0])
blocks.append(last)
for row in rows:
last = list(map(sum, zip(last, row)))
blocks.append(last)
return blocks
def transpose(grid):
"""
Switches rows and columns.
>>> transpose([[1, 2, 3], [4, 5, 6]])
[[1, 4], [2, 5], [3, 6]]
"""
R = len(grid)
C = len(grid[0])
inverted = []
for r in range(C):
row = [c[r] for c in grid]
inverted.append(row)
return inverted
def moment(array):
"""
>>> moment([5, 6, 7, 2, 4])
[0, 6, 14, 6, 16]
"""
return list(map(lambda i_v: i_v[0]*i_v[1], enumerate(array)))
def moment2d(grid):
"""
>>> moment2d([[5, 6, 7, 2, 4]])
[[0, 6, 14, 6, 16]]
"""
return list(map(moment, grid)) | 21.166667 | 65 | 0.464567 |
9de39c8afb68ea723afe9738f2762762838a1e06 | 5,361 | py | Python | douyu_spider/douyu_spider_v2.py | DH-JQ/WebSpider-DH | 71603c85cc5327ce7a0a864db145f3c650fa13a5 | [
"MIT"
] | null | null | null | douyu_spider/douyu_spider_v2.py | DH-JQ/WebSpider-DH | 71603c85cc5327ce7a0a864db145f3c650fa13a5 | [
"MIT"
] | null | null | null | douyu_spider/douyu_spider_v2.py | DH-JQ/WebSpider-DH | 71603c85cc5327ce7a0a864db145f3c650fa13a5 | [
"MIT"
] | null | null | null | from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from lxml import etree
import json
if __name__ == '__main__':
dou_spider = DouyuSpider()
dou_spider.run()
| 43.233871 | 112 | 0.564447 |
9de3f58e34c1bda2a31dcb16e2481f3de5ab6ad2 | 960 | py | Python | setup.py | fungibit/bitcoinscript | ced6fb37dfa40eac7341826c758842e0ed7e7475 | [
"MIT"
] | 1 | 2017-10-25T17:11:44.000Z | 2017-10-25T17:11:44.000Z | setup.py | fungibit/bitcoinscript | ced6fb37dfa40eac7341826c758842e0ed7e7475 | [
"MIT"
] | 3 | 2017-03-10T05:27:29.000Z | 2017-04-07T16:06:28.000Z | setup.py | fungibit/bitcoinscript | ced6fb37dfa40eac7341826c758842e0ed7e7475 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# Read version info from bitcoinscript/version.py
version_vars = {}
with open("bitcoinscript/version.py") as fp:
exec(fp.read(), version_vars)
version_string = version_vars['__version_string__']
setup(
name='bitcoinscript',
description='Bitcoin Script Debugger and Interactive Shell',
long_description=long_description,
version=version_string,
author='fungibit',
author_email='fungibit@yandex.com',
url='https://github.com/fungibit/bitcoinscript',
license='MIT',
packages=find_packages(exclude=['tests*', 'bin']),
platforms = ["POSIX", "Windows"],
keywords='bitcoin, script, bitcoin-script, blockchain',
)
| 28.235294 | 64 | 0.726042 |
9de4af217717d87a01fd3c8f160faa869464539b | 29,850 | py | Python | discretize/tree_mesh.py | ngodber/discretize | 2329c9e9552b5c05f40ebf62f0bb207267bd2f92 | [
"MIT"
] | 123 | 2017-01-09T04:59:25.000Z | 2022-03-29T08:06:43.000Z | discretize/tree_mesh.py | ngodber/discretize | 2329c9e9552b5c05f40ebf62f0bb207267bd2f92 | [
"MIT"
] | 246 | 2017-01-09T17:20:12.000Z | 2022-03-01T22:05:20.000Z | discretize/tree_mesh.py | ngodber/discretize | 2329c9e9552b5c05f40ebf62f0bb207267bd2f92 | [
"MIT"
] | 26 | 2018-03-27T19:24:46.000Z | 2021-11-11T20:28:09.000Z | # ___ ___ ___ ___ ___ ___
# /\ \ /\ \ /\ \ /\ \ /\ \ /\ \
# /::\ \ /::\ \ \:\ \ /::\ \ /::\ \ /::\ \
# /:/\:\ \ /:/\:\ \ \:\ \ /:/\:\ \ /:/\:\ \ /:/\:\ \
# /:/ \:\ \ /:/ \:\ \ /::\ \ /::\~\:\ \ /::\~\:\ \ /::\~\:\ \
# /:/__/ \:\__\/:/__/ \:\__\ /:/\:\__\/:/\:\ \:\__\/:/\:\ \:\__\/:/\:\ \:\__\
# \:\ \ /:/ /\:\ \ \/__//:/ \/__/\/_|::\/:/ /\:\~\:\ \/__/\:\~\:\ \/__/
# \:\ /:/ / \:\ \ /:/ / |:|::/ / \:\ \:\__\ \:\ \:\__\
# \:\/:/ / \:\ \ \/__/ |:|\/__/ \:\ \/__/ \:\ \/__/
# \::/ / \:\__\ |:| | \:\__\ \:\__\
# \/__/ \/__/ \|__| \/__/ \/__/
#
#
#
# .----------------.----------------.
# /| /| /|
# / | / | / |
# / | 6 / | 7 / |
# / | / | / |
# .----------------.----+-----------. |
# /| . ---------/|----.----------/|----.
# / | /| / | /| / | /|
# / | / | 4 / | / | 5 / | / |
# / | / | / | / | / | / |
# . -------------- .----------------. |/ |
# | . ---+------|----.----+------|----. |
# | /| .______|___/|____.______|___/|____.
# | / | / 2 | / | / 3 | / | /
# | / | / | / | / | / | /
# . ---+---------- . ---+---------- . | /
# | |/ | |/ | |/ z
# | . ----------|----.-----------|----. ^ y
# | / 0 | / 1 | / | /
# | / | / | / | /
# | / | / | / o----> x
# . -------------- . -------------- .
#
#
# Face Refinement:
#
# 2_______________3 _______________
# | | | | |
# ^ | | | 2 | 3 |
# | | | | | |
# | | x | ---> |-------+-------|
# t1 | | | | |
# | | | 0 | 1 |
# |_______________| |_______|_______|
# 0 t0--> 1
#
#
# Face and Edge naming conventions:
#
# fZp
# |
# 6 ------eX3------ 7
# /| | / |
# /eZ2 . / eZ3
# eY2 | fYp eY3 |
# / | / fXp|
# 4 ------eX2----- 5 |
# |fXm 2 -----eX1--|---- 3 z
# eZ0 / | eY1 ^ y
# | eY0 . fYm eZ1 / | /
# | / | | / | /
# 0 ------eX0------1 o----> x
# |
# fZm
#
#
# fX fY
# 2___________3 2___________3
# | e1 | | e1 |
# | | | |
# e0 | x | e2 z e0 | x | e2 z
# | | ^ | | ^
# |___________| |___> y |___________| |___> x
# 0 e3 1 0 e3 1
# fZ
# 2___________3
# | e1 |
# | |
# e0 | x | e2 y
# | | ^
# |___________| |___> x
# 0 e3 1
from discretize.base import BaseTensorMesh
from discretize.operators import InnerProducts, DiffOperators
from discretize.mixins import InterfaceMixins, TreeMeshIO
from discretize.utils import as_array_n_by_dim
from discretize._extensions.tree_ext import _TreeMesh, TreeCell
import numpy as np
import scipy.sparse as sp
import warnings
from discretize.utils.code_utils import deprecate_property
def cell_levels_by_index(self, indices):
"""Fast function to return a list of levels for the given cell indices
Parameters
----------
index: (N) array_like
Cell indexes to query
Returns
-------
(N) numpy.ndarray of int
Levels for the cells.
"""
return self._cell_levels_by_indexes(indices)
def get_interpolation_matrix(
self, locs, location_type="CC", zeros_outside=False, **kwargs
):
"""Produces interpolation matrix
Parameters
----------
loc : (N, dim) array_like
Location of points to interpolate to
location_type: str, optional
What to interpolate
location_type can be:
- 'CC' -> scalar field defined on cell centers
- 'Ex' -> x-component of field defined on edges
- 'Ey' -> y-component of field defined on edges
- 'Ez' -> z-component of field defined on edges
- 'Fx' -> x-component of field defined on faces
- 'Fy' -> y-component of field defined on faces
- 'Fz' -> z-component of field defined on faces
- 'N' -> scalar field defined on nodes
Returns
-------
(N, n_loc_type) scipy.sparse.csr_matrix
the interpolation matrix
"""
if "locType" in kwargs:
warnings.warn(
"The locType keyword argument has been deprecated, please use location_type. "
"This will be removed in discretize 1.0.0",
DeprecationWarning,
)
location_type = kwargs["locType"]
if "zerosOutside" in kwargs:
warnings.warn(
"The zerosOutside keyword argument has been deprecated, please use zeros_outside. "
"This will be removed in discretize 1.0.0",
DeprecationWarning,
)
zeros_outside = kwargs["zerosOutside"]
locs = as_array_n_by_dim(locs, self.dim)
if location_type not in ["N", "CC", "Ex", "Ey", "Ez", "Fx", "Fy", "Fz"]:
raise Exception(
"location_type must be one of N, CC, Ex, Ey, Ez, Fx, Fy, or Fz"
)
if self.dim == 2 and location_type in ["Ez", "Fz"]:
raise Exception("Unable to interpolate from Z edges/face in 2D")
locs = np.require(np.atleast_2d(locs), dtype=np.float64, requirements="C")
if location_type == "N":
Av = self._getNodeIntMat(locs, zeros_outside)
elif location_type in ["Ex", "Ey", "Ez"]:
Av = self._getEdgeIntMat(locs, zeros_outside, location_type[1])
elif location_type in ["Fx", "Fy", "Fz"]:
Av = self._getFaceIntMat(locs, zeros_outside, location_type[1])
elif location_type in ["CC"]:
Av = self._getCellIntMat(locs, zeros_outside)
return Av
def validate(self):
return self.finalized
def equals(self, other):
try:
if self.finalized and other.finalized:
return super().equals(other)
except AttributeError:
pass
return False
def __reduce__(self):
return TreeMesh, (self.h, self.origin), self.__getstate__()
cellGrad = deprecate_property("cell_gradient", "cellGrad", removal_version="1.0.0", future_warn=False)
cellGradx = deprecate_property(
"cell_gradient_x", "cellGradx", removal_version="1.0.0", future_warn=False
)
cellGrady = deprecate_property(
"cell_gradient_y", "cellGrady", removal_version="1.0.0", future_warn=False
)
cellGradz = deprecate_property(
"cell_gradient_z", "cellGradz", removal_version="1.0.0", future_warn=False
)
cellGradStencil = deprecate_property(
"cell_gradient_stencil", "cellGradStencil", removal_version="1.0.0", future_warn=False
)
nodalGrad = deprecate_property(
"nodal_gradient", "nodalGrad", removal_version="1.0.0", future_warn=False
)
nodalLaplacian = deprecate_property(
"nodal_laplacian", "nodalLaplacian", removal_version="1.0.0", future_warn=False
)
faceDiv = deprecate_property("face_divergence", "faceDiv", removal_version="1.0.0", future_warn=False)
faceDivx = deprecate_property(
"face_x_divergence", "faceDivx", removal_version="1.0.0", future_warn=False
)
faceDivy = deprecate_property(
"face_y_divergence", "faceDivy", removal_version="1.0.0", future_warn=False
)
faceDivz = deprecate_property(
"face_z_divergence", "faceDivz", removal_version="1.0.0", future_warn=False
)
edgeCurl = deprecate_property("edge_curl", "edgeCurl", removal_version="1.0.0", future_warn=False)
maxLevel = deprecate_property("max_used_level", "maxLevel", removal_version="1.0.0", future_warn=False)
vol = deprecate_property("cell_volumes", "vol", removal_version="1.0.0", future_warn=False)
areaFx = deprecate_property("face_x_areas", "areaFx", removal_version="1.0.0", future_warn=False)
areaFy = deprecate_property("face_y_areas", "areaFy", removal_version="1.0.0", future_warn=False)
areaFz = deprecate_property("face_z_areas", "areaFz", removal_version="1.0.0", future_warn=False)
area = deprecate_property("face_areas", "area", removal_version="1.0.0", future_warn=False)
edgeEx = deprecate_property("edge_x_lengths", "edgeEx", removal_version="1.0.0", future_warn=False)
edgeEy = deprecate_property("edge_y_lengths", "edgeEy", removal_version="1.0.0", future_warn=False)
edgeEz = deprecate_property("edge_z_lengths", "edgeEz", removal_version="1.0.0", future_warn=False)
edge = deprecate_property("edge_lengths", "edge", removal_version="1.0.0", future_warn=False)
permuteCC = deprecate_property(
"permute_cells", "permuteCC", removal_version="1.0.0", future_warn=False
)
permuteF = deprecate_property("permute_faces", "permuteF", removal_version="1.0.0", future_warn=False)
permuteE = deprecate_property("permute_edges", "permuteE", removal_version="1.0.0", future_warn=False)
faceBoundaryInd = deprecate_property(
"face_boundary_indices", "faceBoundaryInd", removal_version="1.0.0", future_warn=False
)
cellBoundaryInd = deprecate_property(
"cell_boundary_indices", "cellBoundaryInd", removal_version="1.0.0", future_warn=False
)
_aveCC2FxStencil = deprecate_property(
"average_cell_to_total_face_x", "_aveCC2FxStencil", removal_version="1.0.0", future_warn=False
)
_aveCC2FyStencil = deprecate_property(
"average_cell_to_total_face_y", "_aveCC2FyStencil", removal_version="1.0.0", future_warn=False
)
_aveCC2FzStencil = deprecate_property(
"average_cell_to_total_face_z", "_aveCC2FzStencil", removal_version="1.0.0", future_warn=False
)
_cellGradStencil = deprecate_property(
"stencil_cell_gradient", "_cellGradStencil", removal_version="1.0.0", future_warn=False
)
_cellGradxStencil = deprecate_property(
"stencil_cell_gradient_x", "_cellGradxStencil", removal_version="1.0.0", future_warn=False
)
_cellGradyStencil = deprecate_property(
"stencil_cell_gradient_y", "_cellGradyStencil", removal_version="1.0.0", future_warn=False
)
_cellGradzStencil = deprecate_property(
"stencil_cell_gradient_z", "_cellGradzStencil", removal_version="1.0.0", future_warn=False
)
| 38.665803 | 115 | 0.514807 |
9de60149d3083eff4951fd6c5511316e856edce5 | 4,053 | py | Python | BLOX/Examples/DayTrader/data_downloader.py | linearlabstech/blox | 6a5c8a28fcfcb17731be89939284e7ac13a047d7 | [
"Apache-2.0"
] | 17 | 2019-03-31T18:37:35.000Z | 2020-08-17T18:14:40.000Z | BLOX/Examples/DayTrader/data_downloader.py | linearlabstech/blox | 6a5c8a28fcfcb17731be89939284e7ac13a047d7 | [
"Apache-2.0"
] | null | null | null | BLOX/Examples/DayTrader/data_downloader.py | linearlabstech/blox | 6a5c8a28fcfcb17731be89939284e7ac13a047d7 | [
"Apache-2.0"
] | 1 | 2019-04-02T07:02:08.000Z | 2019-04-02T07:02:08.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019, Linear Labs Technology
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from yahoo_finance_api2 import share
import torch.tensor as tt
import torch,random
def get_targets(rows):
"""
This is where you can build your target.
For the tutorial we're only concerned about whether we should buy or sell.
So we'll create a rule for this, we'll need to set this up as regression ( on the open set (-1,1) ). Our rule is as follows:
If the stock closes below the open, we should have sold (t-1 = sell at close, t = buy at close).
If the stock closes above the open, we should have bought (t-1 = buy at close, t = sell at close).
While under the constraint of maximizing our profit
We can obviously make this more complex, even with the data we have, but for this tutorial,
If you want to create your own targets, this is where you should do it. Below is the accessible data structure passed to this function.
rows = {
"timestamp": [
1557149400000,
],
"open": [
126.38999938964844,
],
"high": [
128.55999755859375,
],
"low": [
126.11000061035156,
],
"close": [
128.14999389648438,
],
"volume": [
24239800,
]
}
"""
# targets: sell = -1, buy = +1
# set to sell at beginning of the trading day
# we assume that unless the it's going down, buy.
# later we'll add some business logic to determine the actual action of purchasing
# return [ tt([0.]) ] + [ tt([ 0 if (rows['close'][i-2] > rows['open'][i-2]) and (rows['close'][i] > rows['open'][i]) else (1 if random.random() > .7 else 2 )]) for i in range(2,len(rows['open'])) ]
return [ tt( [ [ [ rows['high'][i] ] ] ] ) for i in range(1,len(rows['open'])) ]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-t','--ticker',help="enter the stock ticker symbol",required=True)
parser.add_argument('-s','--start',help="start date of data to grab. default is 75 days ago",default=75,type=int)
parser.add_argument('-o','--output_file',help="name of the output file to save the dataset",default='trader.ds')
parser.add_argument('-f','--frequency',help='how frequent to sample each day of trading (in hourly fractions)',type=int,default=1)
parser.add_argument('--csv',help='the csv file to load instead of downloading fresh data',default=None)
main( parser.parse_args() ) | 40.939394 | 203 | 0.606464 |
9de667e4365b4429b64b9267a959ad26b53a85c3 | 790 | py | Python | services/db-api/project/config.py | JoshPrim/EVA-Projekt | 94e4f594519eda676e0f5f2787f8643831f346df | [
"Apache-2.0"
] | 2 | 2018-05-30T08:40:26.000Z | 2018-09-06T15:37:25.000Z | services/db-api/project/config.py | JoshPrim/EVA-Projekt | 94e4f594519eda676e0f5f2787f8643831f346df | [
"Apache-2.0"
] | 1 | 2021-06-01T22:37:55.000Z | 2021-06-01T22:37:55.000Z | services/db-api/project/config.py | JoshPrim/EVA-Projekt | 94e4f594519eda676e0f5f2787f8643831f346df | [
"Apache-2.0"
] | 2 | 2018-05-31T14:55:04.000Z | 2018-08-29T09:38:31.000Z | import os
from project import app, db
| 27.241379 | 65 | 0.734177 |
9de764b4415129ff93630dd93ad535a3c634a3e9 | 625 | py | Python | webapp/models/steps.py | PratikMahajan/Recipe-Management-Webapp-And-Infrastructure | 234b0685ee0367a3e37401af3bd77d6fc6cde6ce | [
"Apache-2.0"
] | null | null | null | webapp/models/steps.py | PratikMahajan/Recipe-Management-Webapp-And-Infrastructure | 234b0685ee0367a3e37401af3bd77d6fc6cde6ce | [
"Apache-2.0"
] | null | null | null | webapp/models/steps.py | PratikMahajan/Recipe-Management-Webapp-And-Infrastructure | 234b0685ee0367a3e37401af3bd77d6fc6cde6ce | [
"Apache-2.0"
] | null | null | null | from sqlalchemy import Column,Integer,String, Float
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from config.envvar import *
Base = declarative_base()
engine = create_engine('mysql+pymysql://'+db_config["DB_USER"]+':'+db_config["DB_PASSWORD"]+'@'+db_config["DB_HOST"]+'/'
+ db_config["DB_NAME"])
Base.metadata.create_all(engine)
| 31.25 | 120 | 0.7184 |
9de9e23fa22dfbdc836f630d94dbe82b7f2350bd | 1,259 | py | Python | src/sample.py | xiajing10/akec | 239fdda923c8a0743f56dbf0a009fa2235b85451 | [
"MIT"
] | 14 | 2021-01-28T07:13:25.000Z | 2022-02-10T06:41:32.000Z | src/sample.py | xiajing10/akec | 239fdda923c8a0743f56dbf0a009fa2235b85451 | [
"MIT"
] | 2 | 2021-04-14T15:24:30.000Z | 2021-05-06T07:02:08.000Z | src/sample.py | xiajing10/akec | 239fdda923c8a0743f56dbf0a009fa2235b85451 | [
"MIT"
] | 1 | 2021-07-09T02:52:59.000Z | 2021-07-09T02:52:59.000Z | # -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 11:17:21 2020
@author: eilxaix
"""
import pandas as pd
import re
# =============================================================================
# data = read_csv_data(pd.read_csv('../../dataset/ieee_xai/ieee_xai.csv'))
# =============================================================================
| 38.151515 | 155 | 0.576648 |
9deb0ef9427cc365d5801578cfd0695ec1ae527c | 17,705 | py | Python | scicopia/app/parser/ScicopiaParser.py | pikatech/Scicopia | dcdb3b4f55b9111fa3b4fe78afdb07bb2ceb9985 | [
"MIT"
] | null | null | null | scicopia/app/parser/ScicopiaParser.py | pikatech/Scicopia | dcdb3b4f55b9111fa3b4fe78afdb07bb2ceb9985 | [
"MIT"
] | 9 | 2021-07-24T16:12:03.000Z | 2021-07-24T16:58:19.000Z | scicopia/app/parser/ScicopiaParser.py | pikatech/Scicopia | dcdb3b4f55b9111fa3b4fe78afdb07bb2ceb9985 | [
"MIT"
] | 1 | 2021-06-18T16:00:06.000Z | 2021-06-18T16:00:06.000Z | # Generated from Scicopia.g4 by ANTLR 4.9.2
# encoding: utf-8
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
| 33.468809 | 471 | 0.560859 |
9dec02cc3a734c80b921decedb5baec89b783ae0 | 259 | py | Python | scripts/init_db.py | LucienZhang/werewolf | c0b4baf2a0f7904978a26a533fe8a695fc274407 | [
"MIT"
] | null | null | null | scripts/init_db.py | LucienZhang/werewolf | c0b4baf2a0f7904978a26a533fe8a695fc274407 | [
"MIT"
] | 6 | 2020-02-10T10:01:03.000Z | 2020-04-01T06:19:55.000Z | scripts/init_db.py | LucienZhang/werewolf | c0b4baf2a0f7904978a26a533fe8a695fc274407 | [
"MIT"
] | null | null | null | from pathlib import Path
import sys
sys.path.append(str(Path(__file__).resolve().parents[1]))
from werewolf import create_app
from werewolf.database import db
app = create_app('db')
with app.app_context():
db.drop_all()
db.create_all()
| 19.923077 | 58 | 0.710425 |
9deccc7336bb4388cafa2a33d6c4aebd562a78e9 | 936 | py | Python | tests/test_estimators/test_probweight_regression.py | janvdvegt/scikit-lego | 774e557c4d19f67ef54f3f0d1622c64ef9903b63 | [
"MIT"
] | null | null | null | tests/test_estimators/test_probweight_regression.py | janvdvegt/scikit-lego | 774e557c4d19f67ef54f3f0d1622c64ef9903b63 | [
"MIT"
] | null | null | null | tests/test_estimators/test_probweight_regression.py | janvdvegt/scikit-lego | 774e557c4d19f67ef54f3f0d1622c64ef9903b63 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from sklego.common import flatten
from sklego.linear_model import ProbWeightRegression
from tests.conftest import nonmeta_checks, regressor_checks, general_checks
| 36 | 95 | 0.794872 |
9ded0421224a68f8c7f744c1d9f09e3afc552f04 | 854 | py | Python | Python/problem0294.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | Python/problem0294.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | Python/problem0294.py | 1050669722/LeetCode-Answers | c8f4d1ccaac09cda63b60d75144335347b06dc81 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Jun 29 19:44:28 2019
@author: Administrator
"""
solu = Solution()
s = "++++"
s = "+++++"
s = "++++++"
print(solu.canWin(s)) | 23.081081 | 60 | 0.432084 |
9def3bfc70e4954b3590123b603ffa0f9d2385b3 | 79 | py | Python | wmtexe/__init__.py | csdms/wmt-exe | d1e21537135a2d2c442ae6244427b076803d1254 | [
"MIT"
] | null | null | null | wmtexe/__init__.py | csdms/wmt-exe | d1e21537135a2d2c442ae6244427b076803d1254 | [
"MIT"
] | 10 | 2016-02-04T17:55:34.000Z | 2019-03-19T21:10:39.000Z | wmtexe/__init__.py | csdms/wmt-exe | d1e21537135a2d2c442ae6244427b076803d1254 | [
"MIT"
] | 1 | 2015-07-29T22:22:46.000Z | 2015-07-29T22:22:46.000Z | """The CSDMS Web Modeling Tool (WMT) execution server."""
__version__ = '0.3'
| 19.75 | 57 | 0.683544 |
9df0774506aa365c6756ee8a870b647ac6699146 | 8,284 | py | Python | GEM/plt_resukts_GEM.py | Webbah/sec-for-reinforcement-learning | 19db622dce4963d25cb1b6e4ae12ddf98b6d27d2 | [
"MIT"
] | 2 | 2021-12-16T12:49:26.000Z | 2022-01-28T19:18:43.000Z | GEM/plt_resukts_GEM.py | Webbah/sec-for-reinforcement-learning | 19db622dce4963d25cb1b6e4ae12ddf98b6d27d2 | [
"MIT"
] | null | null | null | GEM/plt_resukts_GEM.py | Webbah/sec-for-reinforcement-learning | 19db622dce4963d25cb1b6e4ae12ddf98b6d27d2 | [
"MIT"
] | null | null | null | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
save_results = False
plot_stored_GEM_reults() | 49.017751 | 185 | 0.543457 |
9df19b2f9979610a8ed9bef79a44747496f8dd2a | 3,725 | py | Python | Adhesion/Interactions/PowerLaw.py | ContactEngineering/Adhesion | acc46ad9bfe49fec667cb9a116ebde426faa38c4 | [
"MIT"
] | null | null | null | Adhesion/Interactions/PowerLaw.py | ContactEngineering/Adhesion | acc46ad9bfe49fec667cb9a116ebde426faa38c4 | [
"MIT"
] | 4 | 2021-08-18T07:30:57.000Z | 2022-03-05T11:05:09.000Z | Adhesion/Interactions/PowerLaw.py | ContactEngineering/Adhesion | acc46ad9bfe49fec667cb9a116ebde426faa38c4 | [
"MIT"
] | null | null | null | #
# Copyright 2020 Antoine Sanner
# 2020 Lars Pastewka
#
# ### MIT license
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import numpy as np
from NuMPI import MPI
from Adhesion.Interactions import Potential, SoftWall
| 32.391304 | 79 | 0.628725 |
9df469a37713b98c57237b63b551cab2396ffeab | 4,687 | py | Python | theory/func/bardell/print_bardell_integrals_to_C.py | mrosemeier/compmech | f18f6d0471c72b26a3b014d2df41df3463505eae | [
"BSD-3-Clause"
] | 4 | 2019-02-05T06:12:12.000Z | 2022-03-25T14:41:18.000Z | theory/func/bardell/print_bardell_integrals_to_C.py | saullocastro/panels | 5387daab1ec656b065f7a9be820e2c234e53764e | [
"BSD-3-Clause"
] | null | null | null | theory/func/bardell/print_bardell_integrals_to_C.py | saullocastro/panels | 5387daab1ec656b065f7a9be820e2c234e53764e | [
"BSD-3-Clause"
] | 2 | 2019-06-05T07:19:35.000Z | 2020-12-29T00:22:18.000Z | import os
import glob
from ast import literal_eval
import numpy as np
import sympy
from sympy import pi, sin, cos, var
from sympy.printing import ccode
from compmech.conecyl.sympytools import mprint_as_sparse, pow2mult
var('x1t, x1r, x2t, x2r')
var('y1t, y1r, y2t, y2r')
var('xi1, xi2')
var('c0, c1')
subs = {
}
header_c = """
#include <stdlib.h>
#include <math.h>
#if defined(_WIN32) || defined(__WIN32__)
#define EXPORTIT __declspec(dllexport)
#else
#define EXPORTIT
#endif
"""
printstr_full = header_c
printstr_12 = header_c
printstr_c0c1 = header_c
header_h = """
#if defined(_WIN32) || defined(__WIN32__)
#define IMPORTIT __declspec(dllimport)
#else
#define IMPORTIT
#endif
"""
printstr_full_h = header_h
printstr_12_h = header_h
printstr_c0c1_h = header_h
for i, filepath in enumerate(
glob.glob(r'.\bardell_integrals_mathematica\fortran_*.txt')):
print(filepath)
with open(filepath) as f:
filename = os.path.basename(filepath)
names = filename[:-4].split('_')
lines = [line.strip() for line in f.readlines()]
string = ''.join(lines)
string = string.replace('\\','')
tmp = eval(string)
print '\tfinished eval'
printstr = ''
if '_12' in filepath:
name = '_'.join(names[1:3])
printstr += 'EXPORTIT double integral_%s(double xi1, double xi2, int i, int j,\n' % name
printstr += ' double x1t, double x1r, double x2t, double x2r,\n'
printstr += ' double y1t, double y1r, double y2t, double y2r) {\n'
elif '_c0c1' in filepath:
name = '_'.join(names[1:3])
printstr += 'EXPORTIT double integral_%s(double c0, double c1, int i, int j,\n' % name
printstr += ' double x1t, double x1r, double x2t, double x2r,\n'
printstr += ' double y1t, double y1r, double y2t, double y2r) {\n'
else:
name = names[1]
printstr += 'EXPORTIT double integral_%s(int i, int j,\n' % name
printstr += ' double x1t, double x1r, double x2t, double x2r,\n'
printstr += ' double y1t, double y1r, double y2t, double y2r) {\n'
printstr_h = '\n'
printstr_h += '#ifndef BARDELL_%s_H\n' % name.upper()
printstr_h += '#define BARDELL_%s_H\n' % name.upper()
printstr_h += printstr.replace(' {', ';').replace('EXPORTIT', 'IMPORTIT')
printstr_h += '#endif /** BARDELL_%s_H */\n' % name.upper()
printstr_h += '\n'
matrix = sympy.Matrix(np.atleast_2d(tmp))
for i in range(matrix.shape[0]):
activerow = False
for j in range(matrix.shape[1]):
if matrix[i, j] == 0:
continue
if not activerow:
activerow = True
if i == 0:
printstr += ' switch(i) {\n'
else:
printstr += ' default:\n'
printstr += ' return 0.;\n'
printstr += ' }\n'
printstr += ' case %d:\n' % i
printstr += ' switch(j) {\n'
printstr += ' case %d:\n' % j
printstr += ' return %s;\n' % ccode(matrix[i, j].evalf())
printstr += ' default:\n'
printstr += ' return 0.;\n'
printstr += ' }\n'
printstr += ' default:\n'
printstr += ' return 0.;\n'
printstr += ' }\n'
printstr += '}\n'
if '_12' in filepath:
printstr_12_h += printstr_h
filepath = r'..\..\..\compmech\lib\src\bardell_integral_%s_12.c' % name[:-3]
with open(filepath, 'w') as g:
g.write(printstr_12 + printstr)
elif '_c0c1' in filepath:
printstr_c0c1_h += printstr_h
filepath = r'..\..\..\compmech\lib\src\bardell_integral_%s_c0c1.c' % name[:-5]
with open(filepath, 'w') as g:
g.write(printstr_c0c1 + printstr)
else:
printstr_full += printstr
printstr_full_h += printstr_h
with open(r'..\..\..\compmech\include\bardell.h', 'w') as g:
g.write(printstr_full_h)
with open(r'..\..\..\compmech\lib\src\bardell.c', 'w') as g:
g.write(printstr_full)
with open(r'..\..\..\compmech\include\bardell_12.h', 'w') as g:
g.write(printstr_12_h)
with open(r'..\..\..\compmech\include\bardell_c0c1.h', 'w') as g:
g.write(printstr_c0c1_h)
| 34.211679 | 100 | 0.53019 |
9df4e6c48971da6fc4b9921811db567e82ddd1ec | 6,277 | py | Python | src/distributeddictionarylearning.py | att/Distributed-Dictionary-Learning | c6915477dac1ffe7d9a732dd31333d540f5bd00b | [
"MIT"
] | 5 | 2020-07-25T18:44:54.000Z | 2022-03-27T14:14:20.000Z | src/distributeddictionarylearning.py | att/Distributed-Dictionary-Learning | c6915477dac1ffe7d9a732dd31333d540f5bd00b | [
"MIT"
] | null | null | null | src/distributeddictionarylearning.py | att/Distributed-Dictionary-Learning | c6915477dac1ffe7d9a732dd31333d540f5bd00b | [
"MIT"
] | 3 | 2019-09-04T08:04:09.000Z | 2022-03-27T14:14:26.000Z | import time
import numpy as np
from sparsecoding import sparse_encode_omp
from sparsecoding import sparse_encode_nnmp
from distributedpowermethod import distri_powermethod
| 44.204225 | 135 | 0.463916 |
9df5471ca3ddddaa94bd6982c624b686b6a66f95 | 677 | py | Python | Python3/Books/Douson/chapter09/simple_game.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Books/Douson/chapter09/simple_game.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | null | null | null | Python3/Books/Douson/chapter09/simple_game.py | neon1ks/Study | 5d40171cf3bf5e8d3a95539e91f5afec54d1daf3 | [
"MIT"
] | 2 | 2018-07-31T23:25:43.000Z | 2019-07-03T14:26:18.000Z | # Simple Game
# Demonstrates importing modules
import games, random
print("Welcome to the world's simplest game!\n")
again = None
while again != "n":
players = []
num = games.ask_number(question = "How many players? (2 - 5): ",
low = 2, high = 5)
for i in range(num):
name = input("Player name: ")
score = random.randrange(100) + 1
player = games.Player(name, score)
players.append(player)
print("\nHere are the game results:")
for player in players:
print(player)
again = games.ask_yes_no("\nDo you want to play again? (y/n): ")
input("\n\nPress the enter key to exit.")
| 26.038462 | 68 | 0.589365 |
9df56584ef8b053ce1af3875d1ad5df2cd5671e4 | 189 | py | Python | src/1.foundation/1.embedding_python/1.4.create_custom_module/assets/scripts/game.py | Chukobyte/learn-engine-dev | 3f4437ed4abab9011d584bdc0ab4eff921393f00 | [
"CC-BY-4.0",
"CC0-1.0"
] | 5 | 2021-08-13T01:53:59.000Z | 2022-01-23T18:50:17.000Z | src/1.foundation/1.embedding_python/1.4.create_custom_module/assets/scripts/game.py | Chukobyte/learn-engine-dev | 3f4437ed4abab9011d584bdc0ab4eff921393f00 | [
"CC-BY-4.0",
"CC0-1.0"
] | null | null | null | src/1.foundation/1.embedding_python/1.4.create_custom_module/assets/scripts/game.py | Chukobyte/learn-engine-dev | 3f4437ed4abab9011d584bdc0ab4eff921393f00 | [
"CC-BY-4.0",
"CC0-1.0"
] | null | null | null | import engine
| 23.625 | 70 | 0.68254 |
9df570a8cc43113443b6b213e6ab3adba4fc6941 | 964 | py | Python | yomikatawa/run.py | tensorknower69/yomikatawa-py | 3e49e5e5d2ebfc49ddabe5a20e486ea0927b362a | [
"MIT"
] | null | null | null | yomikatawa/run.py | tensorknower69/yomikatawa-py | 3e49e5e5d2ebfc49ddabe5a20e486ea0927b362a | [
"MIT"
] | null | null | null | yomikatawa/run.py | tensorknower69/yomikatawa-py | 3e49e5e5d2ebfc49ddabe5a20e486ea0927b362a | [
"MIT"
] | null | null | null | import argparse
import yomikatawa as yomi
if __name__ == "__main__":
main()
| 41.913043 | 140 | 0.706432 |
9df5c3a1c0529a4f203b0c8d4d096dd4cd43ed68 | 10,873 | py | Python | izzoLambertSolver.py | tylera277/voyagerTrajectoryCalculator | fded6356e670fbc2b182cac2bfcc98e7223e2b80 | [
"MIT"
] | null | null | null | izzoLambertSolver.py | tylera277/voyagerTrajectoryCalculator | fded6356e670fbc2b182cac2bfcc98e7223e2b80 | [
"MIT"
] | null | null | null | izzoLambertSolver.py | tylera277/voyagerTrajectoryCalculator | fded6356e670fbc2b182cac2bfcc98e7223e2b80 | [
"MIT"
] | null | null | null | """ A module hosting all algorithms devised by Izzo """
import time
import numpy as np
from numpy import cross, pi
from numpy.linalg import norm
from scipy.special import hyp2f1
def izzo2015(
mu,
r1,
r2,
tof,
M=0,
prograde=True,
low_path=True,
maxiter=35,
atol=1e-5,
rtol=1e-7,
full_output=False,
):
r"""
Solves Lambert problem using Izzo's devised algorithm.
Parameters
----------
mu: float
Gravitational parameter, equivalent to :math:`GM` of attractor body.
r1: numpy.array
Initial position vector.
r2: numpy.array
Final position vector.
M: int
Number of revolutions. Must be equal or greater than 0 value.
prograde: bool
If `True`, specifies prograde motion. Otherwise, retrograde motion is imposed.
low_path: bool
If two solutions are available, it selects between high or low path.
maxiter: int
Maximum number of iterations.
atol: float
Absolute tolerance.
rtol: float
Relative tolerance.
full_output: bool
If True, the number of iterations is also returned.
Returns
-------
v1: numpy.array
Initial velocity vector.
v2: numpy.array
Final velocity vector.
numiter: list
Number of iterations.
Notes
-----
This is the algorithm devised by Dario Izzo[1] in 2015. It inherits from
the one developed by Lancaster[2] during the 60s, following the universal
formulae approach. It is one of the most modern solvers, being a complete
Lambert's problem solver (zero and Multiple-revolution solutions). It shows
high performance and robustness while requiring no more than four iterations
to reach a solution.
All credits of the implementation go to Juan Luis Cano Rodrguez and the
poliastro development team, from which this routine inherits. Some changes
were made to adapt it to `lamberthub` API. In addition, the hypergeometric
function is the one from SciPy.
Copyright (c) 2012-2021 Juan Luis Cano Rodrguez and the poliastro development team
References
----------
[1] Izzo, D. (2015). Revisiting Lamberts problem. Celestial Mechanics
and Dynamical Astronomy, 121(1), 1-15.
[2] Lancaster, E. R., & Blanchard, R. C. (1969). A unified form of
Lambert's theorem (Vol. 5368). National Aeronautics and Space
Administration.
"""
# Check that input parameters are safe
#assert_parameters_are_valid(mu, r1, r2, tof, M)
# Chord
c = r2 - r1
c_norm, r1_norm, r2_norm = norm(c), norm(r1), norm(r2)
# Semiperimeter
s = (r1_norm + r2_norm + c_norm) * 0.5
# Versors
i_r1, i_r2 = r1 / r1_norm, r2 / r2_norm
i_h = cross(i_r1, i_r2)
i_h = i_h / norm(i_h)
# Geometry of the problem
ll = np.sqrt(1 - min(1.0, c_norm / s))
# Compute the fundamental tangential directions
if i_h[2] < 0:
ll = -ll
i_t1, i_t2 = cross(i_r1, i_h), cross(i_r2, i_h)
else:
i_t1, i_t2 = cross(i_h, i_r1), cross(i_h, i_r2)
# Correct transfer angle parameter and tangential vectors regarding orbit's
# inclination
ll, i_t1, i_t2 = (-ll, -i_t1, -i_t2) if prograde is False else (ll, i_t1, i_t2)
# Non dimensional time of flight
T = np.sqrt(2 * mu / s ** 3) * tof
# Find solutions and filter them
x, y, numiter, tpi = _find_xy(ll, T, M, maxiter, atol, rtol, low_path)
# Reconstruct
gamma = np.sqrt(mu * s / 2)
rho = (r1_norm - r2_norm) / c_norm
sigma = np.sqrt(1 - rho ** 2)
# Compute the radial and tangential components at initial and final
# position vectors
V_r1, V_r2, V_t1, V_t2 = _reconstruct(x, y, r1_norm, r2_norm, ll, gamma, rho, sigma)
# Solve for the initial and final velocity
v1 = V_r1 * (r1 / r1_norm) + V_t1 * i_t1
v2 = V_r2 * (r2 / r2_norm) + V_t2 * i_t2
return (v1, v2, numiter, tpi) if full_output is True else (v1, v2)
def _reconstruct(x, y, r1, r2, ll, gamma, rho, sigma):
"""Reconstruct solution velocity vectors."""
V_r1 = gamma * ((ll * y - x) - rho * (ll * y + x)) / r1
V_r2 = -gamma * ((ll * y - x) + rho * (ll * y + x)) / r2
V_t1 = gamma * sigma * (y + ll * x) / r1
V_t2 = gamma * sigma * (y + ll * x) / r2
return [V_r1, V_r2, V_t1, V_t2]
def _find_xy(ll, T, M, maxiter, atol, rtol, low_path):
"""Computes all x, y for given number of revolutions."""
# For abs(ll) == 1 the derivative is not continuous
assert abs(ll) < 1
M_max = np.floor(T / pi)
T_00 = np.arccos(ll) + ll * np.sqrt(1 - ll ** 2) # T_xM
# Refine maximum number of revolutions if necessary
if T < T_00 + M_max * pi and M_max > 0:
_, T_min = _compute_T_min(ll, M_max, maxiter, atol, rtol)
if T < T_min:
M_max -= 1
# Check if a feasible solution exist for the given number of revolutions
# This departs from the original paper in that we do not compute all solutions
if M > M_max:
raise ValueError("No feasible solution, try lower M!")
# Initial guess
x_0 = _initial_guess(T, ll, M, low_path)
# Start Householder iterations from x_0 and find x, y
x, numiter, tpi = _householder(x_0, T, ll, M, atol, rtol, maxiter)
y = _compute_y(x, ll)
return x, y, numiter, tpi
def _compute_y(x, ll):
"""Computes y."""
return np.sqrt(1 - ll ** 2 * (1 - x ** 2))
def _compute_psi(x, y, ll):
"""Computes psi.
"The auxiliary angle psi is computed using Eq.(17) by the appropriate
inverse function"
"""
if -1 <= x < 1:
# Elliptic motion
# Use arc cosine to avoid numerical errors
return np.arccos(x * y + ll * (1 - x ** 2))
elif x > 1:
# Hyperbolic motion
# The hyperbolic sine is bijective
return np.arcsinh((y - x * ll) * np.sqrt(x ** 2 - 1))
else:
# Parabolic motion
return 0.0
def _tof_equation(x, T0, ll, M):
"""Time of flight equation."""
return _tof_equation_y(x, _compute_y(x, ll), T0, ll, M)
def _tof_equation_y(x, y, T0, ll, M):
"""Time of flight equation with externally computated y."""
if M == 0 and np.sqrt(0.6) < x < np.sqrt(1.4):
eta = y - ll * x
S_1 = (1 - ll - x * eta) * 0.5
Q = 4 / 3 * hyp2f1(3, 1, 5 / 2, S_1)
T_ = (eta ** 3 * Q + 4 * ll * eta) * 0.5
else:
psi = _compute_psi(x, y, ll)
T_ = np.divide(
np.divide(psi + M * pi, np.sqrt(np.abs(1 - x ** 2))) - x + ll * y,
(1 - x ** 2),
)
return T_ - T0
def _compute_T_min(ll, M, maxiter, atol, rtol):
"""Compute minimum T."""
if ll == 1:
x_T_min = 0.0
T_min = _tof_equation(x_T_min, 0.0, ll, M)
else:
if M == 0:
x_T_min = np.inf
T_min = 0.0
else:
# Set x_i > 0 to avoid problems at ll = -1
x_i = 0.1
T_i = _tof_equation(x_i, 0.0, ll, M)
x_T_min = _halley(x_i, T_i, ll, atol, rtol, maxiter)
T_min = _tof_equation(x_T_min, 0.0, ll, M)
return [x_T_min, T_min]
def _initial_guess(T, ll, M, low_path):
"""Initial guess."""
if M == 0:
# Single revolution
T_0 = np.arccos(ll) + ll * np.sqrt(1 - ll ** 2) + M * pi # Equation 19
T_1 = 2 * (1 - ll ** 3) / 3 # Equation 21
if T >= T_0:
x_0 = (T_0 / T) ** (2 / 3) - 1
elif T < T_1:
x_0 = 5 / 2 * T_1 / T * (T_1 - T) / (1 - ll ** 5) + 1
else:
# This is the real condition, which is not exactly equivalent
# elif T_1 < T < T_0
x_0 = (T_0 / T) ** (np.log2(T_1 / T_0)) - 1
return x_0
else:
# Multiple revolution
x_0l = (((M * pi + pi) / (8 * T)) ** (2 / 3) - 1) / (
((M * pi + pi) / (8 * T)) ** (2 / 3) + 1
)
x_0r = (((8 * T) / (M * pi)) ** (2 / 3) - 1) / (
((8 * T) / (M * pi)) ** (2 / 3) + 1
)
# Filter out the solution
x_0 = np.max([x_0l, x_0r]) if low_path is True else np.min([x_0l, x_0r])
return x_0
def _halley(p0, T0, ll, atol, rtol, maxiter):
"""Find a minimum of time of flight equation using the Halley method.
Note
----
This function is private because it assumes a calling convention specific to
this module and is not really reusable.
"""
for ii in range(1, maxiter + 1):
y = _compute_y(p0, ll)
fder = _tof_equation_p(p0, y, T0, ll)
fder2 = _tof_equation_p2(p0, y, T0, fder, ll)
if fder2 == 0:
raise RuntimeError("Derivative was zero")
fder3 = _tof_equation_p3(p0, y, T0, fder, fder2, ll)
# Halley step (cubic)
p = p0 - 2 * fder * fder2 / (2 * fder2 ** 2 - fder * fder3)
if abs(p - p0) < rtol * np.abs(p0) + atol:
return p
p0 = p
raise RuntimeError("Failed to converge")
def _householder(p0, T0, ll, M, atol, rtol, maxiter):
"""Find a zero of time of flight equation using the Householder method.
Note
----
This function is private because it assumes a calling convention specific to
this module and is not really reusable.
"""
# The clock starts together with the iteration
tic = time.perf_counter()
for numiter in range(1, maxiter + 1):
y = _compute_y(p0, ll)
fval = _tof_equation_y(p0, y, T0, ll, M)
T = fval + T0
fder = _tof_equation_p(p0, y, T, ll)
fder2 = _tof_equation_p2(p0, y, T, fder, ll)
fder3 = _tof_equation_p3(p0, y, T, fder, fder2, ll)
# Householder step (quartic)
p = p0 - fval * (
(fder ** 2 - fval * fder2 / 2)
/ (fder * (fder ** 2 - fval * fder2) + fder3 * fval ** 2 / 6)
)
if abs(p - p0) < rtol * np.abs(p0) + atol:
# Stop the clock and compute the time per iteration
tac = time.perf_counter()
tpi = (tac - tic) / numiter
return p, numiter, tpi
p0 = p
raise RuntimeError("Failed to converge") | 32.360119 | 89 | 0.54925 |
9df6b818788dd513c66a7c66d4ffd98206ba31ae | 2,688 | py | Python | integrationtest/vm/virtualrouter/regression/delete_sg_with_2_attached_nics.py | sherry546/zstack-woodpecker | 54a37459f2d72ce6820974feaa6eb55772c3d2ce | [
"Apache-2.0"
] | 2 | 2016-03-23T08:45:44.000Z | 2017-06-26T02:40:46.000Z | integrationtest/vm/virtualrouter/regression/delete_sg_with_2_attached_nics.py | KevinDavidMitnick/zstack-woodpecker | 96257faaf3c362168d008bdb47002025ad669b24 | [
"Apache-2.0"
] | null | null | null | integrationtest/vm/virtualrouter/regression/delete_sg_with_2_attached_nics.py | KevinDavidMitnick/zstack-woodpecker | 96257faaf3c362168d008bdb47002025ad669b24 | [
"Apache-2.0"
] | 2 | 2020-03-12T03:11:28.000Z | 2021-07-26T01:57:58.000Z | '''
Test deleting SG with 2 attached NICs.
@author: Youyk
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.zstack_test.zstack_test_security_group as test_sg_header
import zstackwoodpecker.zstack_test.zstack_test_sg_vm as test_sg_vm_header
import apibinding.inventory as inventory
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
Port = test_state.Port
def test():
'''
Test image requirements:
1. have nc to check the network port
2. have "nc" to open any port
3. it doesn't include a default firewall
VR image is a good candiate to be the guest image.
'''
test_util.test_dsc("Create 3 VMs with vlan VR L3 network and using VR image.")
vm1 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm1)
vm2 = test_stub.create_sg_vm()
test_obj_dict.add_vm(vm2)
vm1.check()
vm2.check()
test_util.test_dsc("Create security groups.")
sg1 = test_stub.create_sg()
sg_vm = test_sg_vm_header.ZstackTestSgVm()
test_obj_dict.set_sg_vm(sg_vm)
l3_uuid = vm1.vm.vmNics[0].l3NetworkUuid
vr_vm = test_lib.lib_find_vr_by_vm(vm1.vm)[0]
vm2_ip = test_lib.lib_get_vm_nic_by_l3(vm2.vm, l3_uuid).ip
rule1 = test_lib.lib_gen_sg_rule(Port.rule1_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
rule2 = test_lib.lib_gen_sg_rule(Port.rule2_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
rule3 = test_lib.lib_gen_sg_rule(Port.rule3_ports, inventory.TCP, inventory.INGRESS, vm2_ip)
sg1.add_rule([rule1])
sg1.add_rule([rule2])
sg1.add_rule([rule3])
sg_vm.check()
nic_uuid1 = vm1.vm.vmNics[0].uuid
nic_uuid2 = vm2.vm.vmNics[0].uuid
# nic_uuid3 = vm2.vm.vmNics[0].uuid
vm1_nics = (nic_uuid1, vm1)
vm2_nics = (nic_uuid2, vm2)
# vm3_nics = (nic_uuid3, vm3)
#test_stub.lib_add_sg_rules(sg1.uuid, [rule0, rule1])
test_util.test_dsc("Add nic to security group 1.")
test_util.test_dsc("Allowed ingress ports: %s" % test_stub.rule1_ports)
#sg_vm.attach(sg1, [vm1_nics, vm2_nics, vm3_nics])
sg_vm.attach(sg1, [vm1_nics, vm2_nics])
sg_vm.check()
sg_vm.delete_sg(sg1)
sg_vm.check()
vm1.destroy()
test_obj_dict.rm_vm(vm1)
vm2.destroy()
test_obj_dict.rm_vm(vm2)
test_util.test_pass('Delete Security Group with 2 attached NICs Success')
#Will be called only if exception happens in test().
| 33.6 | 97 | 0.696801 |
9df90abf6f95f0cc5563b0534b8331a0e2b2223e | 15,365 | py | Python | examples/deep_architect.py | negrinho/sane_tikz | fd6f291d9815613594d724678cb91ac9d412fbb7 | [
"MIT"
] | 274 | 2020-02-13T20:24:50.000Z | 2022-03-23T01:51:20.000Z | examples/deep_architect.py | negrinho/sane_tikz | fd6f291d9815613594d724678cb91ac9d412fbb7 | [
"MIT"
] | null | null | null | examples/deep_architect.py | negrinho/sane_tikz | fd6f291d9815613594d724678cb91ac9d412fbb7 | [
"MIT"
] | 19 | 2020-02-14T01:07:42.000Z | 2022-02-28T11:42:36.000Z | # Figure 5 in https://arxiv.org/pdf/1909.13404.pdf (towards modular and programmable architecture search)
import sane_tikz.core as stz
import sane_tikz.formatting as fmt
frame_height = 9.5
frame_width = 10.0
frame_spacing = 0.2
frame_roundness = 0.6
frame_line_width = 4.5 * fmt.standard_line_width
module_height = 1.6
module_width = 2.8
io_height = 0.40
io_long_side = 0.9
io_short_side = 1.0 * io_long_side
io_spacing = 0.12
p_height = 1.2 * io_height
p_width = 1.2
p_spacing = io_spacing / 2.0
h_width = 1 * p_width
h_height = 1.3 * p_height
h_spacing = io_spacing / 2.0
io_corner_roundness = 0.0
module_roundness = 0.0
line_width = 2.0 * fmt.standard_line_width
module_inner_vertical_spacing = 0.1
delta_increment = 0.0
horizontal_module_spacing = 0.2
vertical_module_spacing = 0.2
spacing_between_module_and_hyperp = 0.8
spacing_between_hyperp_and_hyperp = 0.4
arrow_length = vertical_module_spacing
name2color = fmt.google_slides_named_colors()
connect_s_fmt = fmt.combine_tikz_strs(
[fmt.arrow_heads("end"), fmt.line_width(line_width)])
input_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
output_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
property_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
module_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
hyperp_s_fmt = fmt.combine_tikz_strs([
fmt.line_width(line_width),
])
frame_s_fmt = fmt.combine_tikz_strs([
fmt.rounded_corners(frame_roundness),
fmt.line_width(frame_line_width),
])
unassigned_h_s_fmt = fmt.combine_tikz_strs([
fmt.anchor("left_center"),
])
assigned_h_s_fmt = fmt.combine_tikz_strs([
fmt.anchor("left_center"),
])
search_space_transition()
| 33.918322 | 105 | 0.615164 |
9dfa3001c3ff293c70ee1d697f313a0584e7ea7e | 25,801 | py | Python | pytests/epengine/basic_ops.py | pavithra-mahamani/TAF | ff854adcc6ca3e50d9dc64e7756ca690251128d3 | [
"Apache-2.0"
] | null | null | null | pytests/epengine/basic_ops.py | pavithra-mahamani/TAF | ff854adcc6ca3e50d9dc64e7756ca690251128d3 | [
"Apache-2.0"
] | null | null | null | pytests/epengine/basic_ops.py | pavithra-mahamani/TAF | ff854adcc6ca3e50d9dc64e7756ca690251128d3 | [
"Apache-2.0"
] | null | null | null | import time
import json
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import doc_generator
from couchbase_helper.durability_helper import DurabilityHelper, \
DurableExceptions
from couchbase_helper.tuq_generators import JsonGenerator
from membase.api.rest_client import RestConnection
from mc_bin_client import MemcachedClient, MemcachedError
from remote.remote_util import RemoteMachineShellConnection
from table_view import TableView
"""
Capture basic get, set operations, also the meta operations.
This is based on some 4.1.1 test which had separate
bugs with incr and delete with meta and I didn't see an obvious home for them.
This is small now but we will reactively add things
These may be parameterized by:
- full and value eviction
- DGM and non-DGM
"""
| 46.155635 | 138 | 0.609744 |
9dfb7758cdce3c78cd800cea3cdddc3f4635fbfc | 1,025 | py | Python | plot/different_optimal_classifier_scale_for_different_classes.py | ZGCTroy/guided-diffusion | af987bb2b65db2875148a5466df79736ea5ae6a1 | [
"MIT"
] | null | null | null | plot/different_optimal_classifier_scale_for_different_classes.py | ZGCTroy/guided-diffusion | af987bb2b65db2875148a5466df79736ea5ae6a1 | [
"MIT"
] | null | null | null | plot/different_optimal_classifier_scale_for_different_classes.py | ZGCTroy/guided-diffusion | af987bb2b65db2875148a5466df79736ea5ae6a1 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import yaml
import os
workspace = "/workspace/mnt/storage/guangcongzheng/zju_zgc/guided-diffusion"
num_samples = 192
log = os.path.join(workspace, 'log/imagenet1000_classifier256x256_channel128_upperbound/predict/model500000_imagenet1000_stepsddim25_sample{}_selectedClass'.format(num_samples))
legends = []
plt.figure()
for class_id in range(3):
fid = []
for scale in range(1,21):
result_name = 'result_scale{}.0_class{}_stepsddim25_sample{}.yaml'.format(scale, class_id, num_samples)
result_path = os.path.join(log,result_name)
with open(result_path, "r") as stream:
try:
result_dict = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
fid.append(result_dict['fid'])
print(result_dict)
plt.plot(fid)
plt.xlabel('classifier scale')
plt.ylabel(fid)
legends.append('sample{}_class{}'.format(num_samples, class_id))
plt.legend(legends)
plt.show()
| 25 | 177 | 0.68878 |
9dfef6c55764a02f7c38cb42e6e52c30df77aaec | 2,882 | py | Python | main.py | swapmali/WalliScrapper | b7853f7d25da594045039847ad76eddd8d1204d8 | [
"MIT"
] | null | null | null | main.py | swapmali/WalliScrapper | b7853f7d25da594045039847ad76eddd8d1204d8 | [
"MIT"
] | null | null | null | main.py | swapmali/WalliScrapper | b7853f7d25da594045039847ad76eddd8d1204d8 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import urllib.request
from datetime import datetime
import time
from PIL import Image, ImageDraw, ImageFont
import ctypes
import os
import shutil
import socket
import sys
if __name__ == "__main__":
# check internet connection
while True:
#
if not is_connected("www.google.com"):
print("@author: Swapnil Mali \nPlease check your internet connection, will try again after 30 seconds..")
time.sleep(30)
continue
# move shortcut to main.exe to startup folder
try:
# get user name
user = os.getlogin()
path = r'C:\Users\{}\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup\main - Shortcut.lnk'.format(user)
# print(path)
shutil.move(r'main - Shortcut.lnk', path)
except FileNotFoundError:
pass
# just credit and copyright stuff
print('@author: Swapnil Mali \n\n(Note: New wallpaper is available everyday after 2.00 pm)')
print("Downloading Today's Wallpaper...please wait!!")
# get image link from the website page
res = requests.get('https://bing.wallpaper.pics/')
soup = BeautifulSoup(res.text, 'lxml')
image_box = soup.find('a', {'class': 'cursor_zoom'})
image = image_box.find('img')
link = image['src']
# download and save the image
filename = datetime.now().strftime('%d-%m-%y')
urllib.request.urlretrieve(link, '{}.jpg'.format(filename))
# for copyright overlaying text over the image
image = Image.open('{}.jpg'.format(filename))
font_type = ImageFont.truetype('fonts/Quicksand-Bold.otf', 44)
draw = ImageDraw.Draw(image)
draw.text(xy=(800, 1000), text=' Swapnil Mali', fill=(0, 0, 0), font=font_type)
# image.show()
image.save('{}.jpg'.format(filename))
print("\n\n-------------------------------------------\nDone..New wallpaper saved as '{}.jpg'\n-------------------------------------------".format(filename))
time.sleep(1)
# set new image as desktop background
directory = os.getcwd()
image_path = '{}\{}.jpg'.format(directory, filename)
print("\nSetting new Wallpaper..".format(filename))
ctypes.windll.user32.SystemParametersInfoW(20, 0, image_path, 3)
time.sleep(2)
print("Done..Closing this window")
time.sleep(2)
sys.exit()
| 34.722892 | 165 | 0.597155 |
9dff88c39b7da4ce4056ad2977600b1620da0183 | 9,401 | py | Python | model_rnn_attention.py | zhzhx2008/keras_text_classification | e10565fb82ffbfa8b1d685be8b162c26f1429784 | [
"MIT"
] | 2 | 2019-07-11T17:01:17.000Z | 2019-07-11T17:01:19.000Z | model_rnn_attention.py | zhzhx2008/keras_text_classification | e10565fb82ffbfa8b1d685be8b162c26f1429784 | [
"MIT"
] | null | null | null | model_rnn_attention.py | zhzhx2008/keras_text_classification | e10565fb82ffbfa8b1d685be8b162c26f1429784 | [
"MIT"
] | 1 | 2019-12-24T01:03:47.000Z | 2019-12-24T01:03:47.000Z | # coding=utf-8
# @Author : zhzhx2008
# @Time : 18-10-9
import os
import warnings
import jieba
import numpy as np
from keras import Input
from keras import Model
from keras import backend as K
from keras import initializers, regularizers, constraints
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.engine.topology import Layer
from keras.layers import Dropout, Bidirectional
from keras.layers import Embedding, Dense
from keras.layers import LSTM, SpatialDropout1D
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
seed = 2019
np.random.seed(seed)
# Feed-Forward Networks with Attention Can Solve Some Long-Term Memory Problems
# [https://arxiv.org/abs/1512.08756]
# https://www.kaggle.com/qqgeogor/keras-lstm-attention-glove840b-lb-0-043
input_dir = './data/THUCNews'
labels, datas_word, datas_char = get_labels_datas(input_dir)
id_label_map, label_id_map = get_label_id_map(labels)
labels, labels_test, datas_word, datas_word_test, datas_char, datas_char_test = train_test_split(labels, datas_word, datas_char, test_size=0.3, shuffle=True, stratify=labels)
labels_train, labels_dev, datas_word_train, datas_word_dev, datas_char_train, datas_char_dev = train_test_split(labels, datas_word, datas_char, test_size=0.1, shuffle=True, stratify=labels)
y_train = [label_id_map.get(x) for x in labels_train]
y_dev = [label_id_map.get(x) for x in labels_dev]
y_test = [label_id_map.get(x) for x in labels_test]
num_classes = len(set(y_train))
y_train_index = to_categorical(y_train, num_classes)
y_dev_index = to_categorical(y_dev, num_classes)
y_test_index = to_categorical(y_test, num_classes)
# keras extract feature
tokenizer = Tokenizer()
tokenizer.fit_on_texts(datas_word_train)
# feature5: word index for deep learning
x_train_word_index = tokenizer.texts_to_sequences(datas_word_train)
x_dev_word_index = tokenizer.texts_to_sequences(datas_word_dev)
x_test_word_index = tokenizer.texts_to_sequences(datas_word_test)
max_word_length = max([len(x) for x in x_train_word_index])
x_train_word_index = pad_sequences(x_train_word_index, maxlen=max_word_length)
x_dev_word_index = pad_sequences(x_dev_word_index, maxlen=max_word_length)
x_test_word_index = pad_sequences(x_test_word_index, maxlen=max_word_length)
input = Input(shape=(max_word_length,))
embedding = Embedding(len(tokenizer.word_index) + 1, 128)(input)
embedding = SpatialDropout1D(0.2)(embedding)
# rnn = SimpleRNN(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(SimpleRNN(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = GRU(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(GRU(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = CuDNNGRU(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(CuDNNGRU(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = LSTM(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
rnn = Bidirectional(LSTM(100, return_sequences=True))(embedding)
rnn = Attention(max_word_length)(rnn) # metrics value=0.38647342980771826
# rnn = GlobalMaxPool1D()(rnn)# 0.33816425149567464
# rnn = GlobalAvgPool1D()(rnn)# 0.20772946881499268
# rnn = Flatten()(rnn) # 0.3140096618357488
# rnn = concatenate([GlobalMaxPool1D()(rnn), GlobalAvgPool1D()(rnn)])# 0.24396135280097742
# rnn = CuDNNLSTM(100, return_sequences=True)(embedding)
# rnn = Attention(max_word_length)(rnn)
# rnn = Bidirectional(CuDNNLSTM(100, return_sequences=True))(embedding)
# rnn = Attention(max_word_length)(rnn)
drop = Dropout(0.2)(rnn)
output = Dense(num_classes, activation='softmax')(drop)
model = Model(inputs=input, outputs=output)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model_weight_file = './model_rnn_attention.h5'
model_file = './model_rnn_attention.model'
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
model_checkpoint = ModelCheckpoint(model_weight_file, save_best_only=True, save_weights_only=True)
model.fit(x_train_word_index,
y_train_index,
batch_size=32,
epochs=1000,
verbose=2,
callbacks=[early_stopping, model_checkpoint],
validation_data=(x_dev_word_index, y_dev_index),
shuffle=True)
model.load_weights(model_weight_file)
model.save(model_file)
evaluate = model.evaluate(x_test_word_index, y_test_index, batch_size=32, verbose=2)
print('loss value=' + str(evaluate[0]))
print('metrics value=' + str(evaluate[1]))
# loss value=1.562715420647273
# metrics value=0.2936507960160573 | 37.454183 | 189 | 0.688118 |
9dfff168d101cb9f78868b0ee56c24261cd170c9 | 73 | py | Python | 01-sample-instance/settings.py | diodonfrost/pulumi-aws-examples | 2fa07f3219dc01d00051559eb207c547d3554232 | [
"Apache-2.0"
] | null | null | null | 01-sample-instance/settings.py | diodonfrost/pulumi-aws-examples | 2fa07f3219dc01d00051559eb207c547d3554232 | [
"Apache-2.0"
] | null | null | null | 01-sample-instance/settings.py | diodonfrost/pulumi-aws-examples | 2fa07f3219dc01d00051559eb207c547d3554232 | [
"Apache-2.0"
] | null | null | null | # coding: utf8
vpc_cidr = "192.168.0.0/16"
http_cidr = "192.168.1.0/24"
| 14.6 | 28 | 0.643836 |
3b00d75cd611416080f44811a3c1f126a3ad61da | 6,731 | py | Python | fastfold/model/fastnn/ops.py | hpcaitech/FastFold | a65d5009279ef84c1518081344db5c02213c387a | [
"Apache-2.0"
] | 303 | 2022-03-03T01:59:47.000Z | 2022-03-31T07:46:42.000Z | fastfold/model/fastnn/ops.py | hpcaitech/FastFold | a65d5009279ef84c1518081344db5c02213c387a | [
"Apache-2.0"
] | 6 | 2022-03-03T22:17:03.000Z | 2022-03-17T06:09:11.000Z | fastfold/model/fastnn/ops.py | hpcaitech/FastFold | a65d5009279ef84c1518081344db5c02213c387a | [
"Apache-2.0"
] | 35 | 2022-03-03T01:58:56.000Z | 2022-03-29T21:21:06.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from fastfold.model.fastnn.kernel import scale_mask_softmax, scale_mask_bias_softmax
from fastfold.model.fastnn.kernel import LayerNorm
from .initializer import glorot_uniform_af
from fastfold.model.fastnn.kernel import bias_sigmod_ele
from fastfold.distributed import gather, scatter
from fastfold.distributed.comm_async import gather_async, gather_async_opp
| 34.875648 | 106 | 0.592334 |
3b031f123e10590a23278a8471646c084f1f967a | 1,093 | py | Python | src/input/__init__.py | huyingjun/PyAgent | ff7096634aa8deb617d2fe9d47fd2c6fbf8ff9a4 | [
"MIT"
] | 1 | 2021-12-23T11:56:19.000Z | 2021-12-23T11:56:19.000Z | src/input/__init__.py | huyingjun/PyAgent | ff7096634aa8deb617d2fe9d47fd2c6fbf8ff9a4 | [
"MIT"
] | null | null | null | src/input/__init__.py | huyingjun/PyAgent | ff7096634aa8deb617d2fe9d47fd2c6fbf8ff9a4 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
__init__.py
~~~~~~~~
input
:author: Fufu, 2021/6/7
"""
from abc import abstractmethod
from asyncio import create_task, sleep
from typing import Any
from loguru import logger
from ..libs.plugin import BasePlugin
| 22.770833 | 76 | 0.610247 |
3b03f07ac42f24043a890f0020944e25aecce786 | 1,933 | py | Python | repositorybots/bots/Librarian.py | conda/conda-bots | a68cff7b0318093328e355e18871518c050f5493 | [
"BSD-3-Clause"
] | 2 | 2021-09-27T02:29:26.000Z | 2021-10-20T19:10:39.000Z | repositorybots/bots/Librarian.py | conda/conda-bots | a68cff7b0318093328e355e18871518c050f5493 | [
"BSD-3-Clause"
] | 14 | 2021-09-09T21:16:05.000Z | 2022-03-28T09:31:09.000Z | repositorybots/bots/Librarian.py | conda/conda-bots | a68cff7b0318093328e355e18871518c050f5493 | [
"BSD-3-Clause"
] | 2 | 2021-09-09T12:11:48.000Z | 2022-01-28T20:25:26.000Z | import yaml
import re
from .SummonableBot import SummonableBot
| 42.021739 | 114 | 0.621314 |
3b04c0970a5c74618f4b5ea5a958ded0e0f252eb | 8,201 | py | Python | word_game_helper.py | avendesora/wordle-helper | 651c1eddca14f56be798e0fe242c1f2cf98ae7ba | [
"MIT"
] | null | null | null | word_game_helper.py | avendesora/wordle-helper | 651c1eddca14f56be798e0fe242c1f2cf98ae7ba | [
"MIT"
] | null | null | null | word_game_helper.py | avendesora/wordle-helper | 651c1eddca14f56be798e0fe242c1f2cf98ae7ba | [
"MIT"
] | null | null | null | import pprint
import statistics
from contextlib import suppress
from dataclasses import dataclass
from enum import Enum
from typing import Optional
class WordGameHelper:
_eliminated_characters: set[str]
_included_characters: dict[str, ValidCharacter]
_original_possible_common_words: set[str]
possible_words: set[str]
possible_common_words: set[str]
| 34.603376 | 90 | 0.573954 |
d178683893b91fd9a85c22e3c3785427e4b51812 | 2,249 | py | Python | 876.middle-of-the-linked-list.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 876.middle-of-the-linked-list.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | 876.middle-of-the-linked-list.py | windard/leeeeee | 0107a5f95746592ca4fe78d2b5875cf65b1910e7 | [
"MIT"
] | null | null | null | # coding=utf-8
#
# @lc app=leetcode id=876 lang=python
#
# [876] Middle of the Linked List
#
# https://leetcode.com/problems/middle-of-the-linked-list/description/
#
# algorithms
# Easy (64.97%)
# Likes: 593
# Dislikes: 42
# Total Accepted: 76.4K
# Total Submissions: 117.5K
# Testcase Example: '[1,2,3,4,5]'
#
# Given a non-empty, singlylinked list with head node head, returnamiddle
# node of linked list.
#
# If there are two middle nodes, return the second middle node.
#
#
#
#
# Example 1:
#
#
# Input: [1,2,3,4,5]
# Output: Node 3 from this list (Serialization: [3,4,5])
# The returned node has value 3. (The judge's serialization of this node is
# [3,4,5]).
# Note that we returned a ListNode object ans, such that:
# ans.val = 3, ans.next.val = 4, ans.next.next.val = 5, and ans.next.next.next
# = NULL.
#
#
#
# Example 2:
#
#
# Input: [1,2,3,4,5,6]
# Output: Node 4 from this list (Serialization: [4,5,6])
# Since the list has two middle nodes with values 3 and 4, we return the second
# one.
#
#
#
#
# Note:
#
#
# The number of nodes in the given list will be between 1and 100.
#
#
#
#
#
# Definition for singly-linked list.
# if __name__ == '__main__':
# s = Solution()
# print s.middleNode(None)
# head = ListNode(1)
# print s.middleNode(head)
# head.next = ListNode(2)
# print s.middleNode(head)
# head.next.next = ListNode(3)
# print s.middleNode(head)
# head.next.next.next = ListNode(4)
# print s.middleNode(head)
# head.next.next.next.next = ListNode(5)
# print s.middleNode(head)
| 21.834951 | 79 | 0.595376 |
d17e6c29b97301453dbf67266605a0471b95c7b0 | 3,442 | py | Python | ava_asd/vis.py | tuanchien/asd | 190c1c6d155b16a27717596d6350598e5cd4ffac | [
"Apache-2.0",
"MIT"
] | 18 | 2020-06-19T01:18:13.000Z | 2022-03-21T10:42:13.000Z | ava_asd/vis.py | tuanchien/asd | 190c1c6d155b16a27717596d6350598e5cd4ffac | [
"Apache-2.0",
"MIT"
] | 8 | 2020-12-17T06:09:59.000Z | 2021-07-10T02:07:41.000Z | ava_asd/vis.py | tuanchien/asd | 190c1c6d155b16a27717596d6350598e5cd4ffac | [
"Apache-2.0",
"MIT"
] | 4 | 2020-06-20T01:05:01.000Z | 2021-08-05T13:45:48.000Z | # New BSD License
#
# Copyright (c) 2007-2019 The scikit-learn developers.
# All rights reserved.
#
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Scikit-learn Developers nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues, dpi=70):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
np.set_printoptions(precision=2)
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
# classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots()
fig.set_dpi(dpi)
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
| 40.494118 | 107 | 0.691458 |
d1802643daf10062a7ef847447ff5fef65abb757 | 1,757 | py | Python | tests/state_tests.py | Alasdair-Macindoe/TuringMachineEmulator | 4c2639876bd94209b170232b2f33ea1409a61a45 | [
"MIT"
] | null | null | null | tests/state_tests.py | Alasdair-Macindoe/TuringMachineEmulator | 4c2639876bd94209b170232b2f33ea1409a61a45 | [
"MIT"
] | null | null | null | tests/state_tests.py | Alasdair-Macindoe/TuringMachineEmulator | 4c2639876bd94209b170232b2f33ea1409a61a45 | [
"MIT"
] | null | null | null | import pytest
import sys
sys.path.append('.')
from turingmachine import Transition, Direction, State
| 31.375 | 94 | 0.650541 |
d18168c2e3ac9ceaadcf572633e69461bbc92841 | 294 | py | Python | default/modules/cwd.py | AshlynnInWonderland/zsh-powerline | e6f3326b3e15d8a89a0ea959314ea0ea5768ea86 | [
"MIT"
] | null | null | null | default/modules/cwd.py | AshlynnInWonderland/zsh-powerline | e6f3326b3e15d8a89a0ea959314ea0ea5768ea86 | [
"MIT"
] | null | null | null | default/modules/cwd.py | AshlynnInWonderland/zsh-powerline | e6f3326b3e15d8a89a0ea959314ea0ea5768ea86 | [
"MIT"
] | null | null | null | import os
| 22.615385 | 53 | 0.547619 |
d1819b0206e55ddd28666cb193356c3dd4977602 | 1,850 | py | Python | openwisp_users/tests/test_adapter.py | ShreeshaRelysys/openwisp-users | af5f95e89656cbf3dd32f2392ba6d0f1b2c4df96 | [
"BSD-3-Clause"
] | null | null | null | openwisp_users/tests/test_adapter.py | ShreeshaRelysys/openwisp-users | af5f95e89656cbf3dd32f2392ba6d0f1b2c4df96 | [
"BSD-3-Clause"
] | 1 | 2022-01-24T16:44:03.000Z | 2022-01-24T16:44:03.000Z | openwisp_users/tests/test_adapter.py | ShreeshaRelysys/openwisp-users | af5f95e89656cbf3dd32f2392ba6d0f1b2c4df96 | [
"BSD-3-Clause"
] | null | null | null | from unittest import mock
from django.contrib.auth import get_user_model
from django.core import mail
from django.template import TemplateDoesNotExist
from django.test import TestCase
from django.urls import reverse
from ..accounts.adapter import EmailAdapter
from .utils import TestOrganizationMixin
User = get_user_model()
| 40.217391 | 87 | 0.718919 |
d18388ef51dc04c9d268500afd15bf62ce17867a | 382 | py | Python | setup.py | ashtonwebster/vuln_toolkit | 74792341104b2806e224bde4c2e704003d685a54 | [
"MIT"
] | 3 | 2021-05-13T14:20:12.000Z | 2021-06-20T18:58:22.000Z | setup.py | ashtonwebster/vuln_toolkit | 74792341104b2806e224bde4c2e704003d685a54 | [
"MIT"
] | null | null | null | setup.py | ashtonwebster/vuln_toolkit | 74792341104b2806e224bde4c2e704003d685a54 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='vuln_toolkit',
version='0.1',
description='Transfer Learning Toolkit',
url='https://para.cs.umd.edu/purtilo/vulnerability-detection-tool-set/tree/master',
author='Ashton Webster',
author_email='ashton.webster@gmail.com',
license='MIT',
packages=['vuln_toolkit', 'vuln_toolkit.common'],
zip_safe=False
)
| 27.285714 | 87 | 0.701571 |
d185040fe764c47e88800452a2deca88a3ec3079 | 13,255 | py | Python | edna2/tasks/Is4aTasks.py | gsantoni/edna2 | 0aad63a3ea8091ce62118f0b2c8ac78a2286da9e | [
"CC0-1.0",
"MIT"
] | null | null | null | edna2/tasks/Is4aTasks.py | gsantoni/edna2 | 0aad63a3ea8091ce62118f0b2c8ac78a2286da9e | [
"CC0-1.0",
"MIT"
] | 2 | 2020-04-06T10:39:50.000Z | 2021-04-14T19:24:37.000Z | edna2/tasks/Is4aTasks.py | gsantoni/edna2 | 0aad63a3ea8091ce62118f0b2c8ac78a2286da9e | [
"CC0-1.0",
"MIT"
] | 5 | 2019-06-14T07:28:38.000Z | 2021-04-28T13:10:39.000Z | #
# Copyright (c) European Synchrotron Radiation Facility (ESRF)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__authors__ = ["O. Svensson"]
__license__ = "MIT"
__date__ = "10/05/2019"
import json
import shutil
import pprint
from edna2.tasks.AbstractTask import AbstractTask
from edna2.tasks.ISPyBTasks import GetListAutoprocessingResults
| 40.045317 | 106 | 0.485704 |
d187fe47d5524b63a0f74b45076d6dc9c23a3d02 | 1,556 | py | Python | predict.py | SuperbTUM/RAW-image-denoising | 9f81be8da6a576f641022707d98b8c37f5c599ab | [
"MIT"
] | 4 | 2021-10-18T04:13:52.000Z | 2022-03-10T14:10:46.000Z | predict.py | SuperbTUM/computational-photography | 9f81be8da6a576f641022707d98b8c37f5c599ab | [
"MIT"
] | 2 | 2021-12-10T02:59:30.000Z | 2022-03-10T03:32:09.000Z | predict.py | SuperbTUM/computational-photography | 9f81be8da6a576f641022707d98b8c37f5c599ab | [
"MIT"
] | 1 | 2021-12-10T02:57:34.000Z | 2021-12-10T02:57:34.000Z | import numpy as np
from tqdm import *
from utils import DataLoaderX
from dataset import collate
from math import *
if __name__ == '__main__':
a = np.zeros((4, 3, 3))
print(a[:, 0:-1, 0:-1].shape)
| 32.416667 | 93 | 0.561697 |
d188ece94a0b8fbdf8e8a6257addd7cf8fc804b8 | 3,318 | py | Python | token_shift_gpt/autoregressive_wrapper.py | fwcwmc/token-shift-gpt | 58c946a8a59976681a90424be5db85ed9a034a59 | [
"MIT"
] | null | null | null | token_shift_gpt/autoregressive_wrapper.py | fwcwmc/token-shift-gpt | 58c946a8a59976681a90424be5db85ed9a034a59 | [
"MIT"
] | null | null | null | token_shift_gpt/autoregressive_wrapper.py | fwcwmc/token-shift-gpt | 58c946a8a59976681a90424be5db85ed9a034a59 | [
"MIT"
] | null | null | null | import torch
from torch import nn
from tqdm import tqdm
from entmax import entmax_bisect
import torch.nn.functional as F
# helper function
# top k filtering
# topk
# top_a
ENTMAX_ALPHA = 1.3
entmax = entmax_bisect
| 30.440367 | 171 | 0.622664 |
d18af7b94f4f32850b9d3a02c85ca36d568510f0 | 1,686 | py | Python | Contest/LeetCode/BiweeklyContest28/4.py | WatsonWangZh/CodingPractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 11 | 2019-09-01T22:36:00.000Z | 2021-11-08T08:57:20.000Z | Contest/LeetCode/BiweeklyContest28/4.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | null | null | null | Contest/LeetCode/BiweeklyContest28/4.py | WatsonWangZh/LeetCodePractice | dc057dd6ea2fc2034e14fd73e07e73e6364be2ae | [
"MIT"
] | 2 | 2020-05-27T14:58:52.000Z | 2020-05-27T15:04:17.000Z | # 1478. Allocate Mailboxes
# User Accepted:342
# User Tried:594
# Total Accepted:364
# Total Submissions:1061
# Difficulty:Hard
# Given the array houses and an integer k. where houses[i] is the location of the ith house along a street, your task is to allocate k mailboxes in the street.
# Return the minimum total distance between each house and its nearest mailbox.
# The answer is guaranteed to fit in a 32-bit signed integer.
# Example 1:
# Input: houses = [1,4,8,10,20], k = 3
# Output: 5
# Explanation: Allocate mailboxes in position 3, 9 and 20.
# Minimum total distance from each houses to nearest mailboxes is |3-1| + |4-3| + |9-8| + |10-9| + |20-20| = 5
# Example 2:
# Input: houses = [2,3,5,12,18], k = 2
# Output: 9
# Explanation: Allocate mailboxes in position 3 and 14.
# Minimum total distance from each houses to nearest mailboxes is |2-3| + |3-3| + |5-3| + |12-14| + |18-14| = 9.
# Example 3:
# Input: houses = [7,4,6,1], k = 1
# Output: 8
# Example 4:
# Input: houses = [3,6,14,10], k = 4
# Output: 0
# Constraints:
# n == houses.length
# 1 <= n <= 100
# 1 <= houses[i] <= 10^4
# 1 <= k <= n
# Array houses contain unique integers.
| 28.576271 | 159 | 0.580664 |
d18b16204dc6401e0872226dc90166eab90caa4c | 990 | py | Python | Full/Python/01_Python_Basico/04_operadoresLogicos.py | ramonabreu-cdev/Revisao-JS | 9a44ea0e7d3c22945b2ee6364d5c292d8be6a762 | [
"RSA-MD"
] | 1 | 2021-06-29T14:51:41.000Z | 2021-06-29T14:51:41.000Z | Full/Python/01_Python_Basico/04_operadoresLogicos.py | ramonabreu-cdev/Revisao-WD | 9a44ea0e7d3c22945b2ee6364d5c292d8be6a762 | [
"RSA-MD"
] | null | null | null | Full/Python/01_Python_Basico/04_operadoresLogicos.py | ramonabreu-cdev/Revisao-WD | 9a44ea0e7d3c22945b2ee6364d5c292d8be6a762 | [
"RSA-MD"
] | null | null | null | """
Operadores lgicos
Para agrupar operaes com lgica booleana, utilizaremos operadores lgicos.
Python suporta trs operadores bsicos: not (no), and (e), or (ou). Esses operadores
podem ser traduzidos como no ( negao), e ( conjuno) e ou (V disjuno).
"""
# Operador not
"""
>>> not True
False
>>> not False
True
"""
# Operador and
"""
O operador and (e) tem sua tabela verdade representada na tabela 3.5. O operador
and (e) resulta verdadeiro apenas quando seus dois operadores forem verdadeiros.
>>> True and True
True
>>> True and False
False
>>> False and True
False
>>> False and False
False
"""
# Operador or
"""
O operador or (ou) que ele resulta em falso apenas se seus dois operadores tambm forem falsos. Se apenas um de seus operadores for verdadeiro, ou
se os dois forem, o resultado da operao ser verdadeiro.
>>> True or True
True
>>> True or False
True
>>> False or True
True
>>> False or False
False
"""
"""
isso um comentrio
"""
"""
comen
"""
| 18 | 149 | 0.710101 |
d18b904208aa3a3c863151a2c83c482991eec4aa | 944 | py | Python | nfv/nfv-vim/nfv_vim/alarm/__init__.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2020-02-07T19:01:36.000Z | 2022-02-23T01:41:46.000Z | nfv/nfv-vim/nfv_vim/alarm/__init__.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 1 | 2021-01-14T12:02:25.000Z | 2021-01-14T12:02:25.000Z | nfv/nfv-vim/nfv_vim/alarm/__init__.py | SidneyAn/nfv | 5f0262a5b6ea4be59f977b9c587c483cbe0e373d | [
"Apache-2.0"
] | 2 | 2021-01-13T08:39:21.000Z | 2022-02-09T00:21:55.000Z | #
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from nfv_common.alarm import * # noqa: F401,F403
from nfv_vim.alarm._general import clear_general_alarm # noqa: F401
from nfv_vim.alarm._general import raise_general_alarm # noqa: F401
from nfv_vim.alarm._host import host_clear_alarm # noqa: F401
from nfv_vim.alarm._host import host_raise_alarm # noqa: F401
from nfv_vim.alarm._instance import instance_clear_alarm # noqa: F401
from nfv_vim.alarm._instance import instance_manage_alarms # noqa: F401
from nfv_vim.alarm._instance import instance_raise_alarm # noqa: F401
from nfv_vim.alarm._instance_group import clear_instance_group_alarm # noqa: F401
from nfv_vim.alarm._instance_group import raise_instance_group_policy_alarm # noqa: F401
from nfv_vim.alarm._sw_update import clear_sw_update_alarm # noqa: F401
from nfv_vim.alarm._sw_update import raise_sw_update_alarm # noqa: F401
| 49.684211 | 89 | 0.814619 |
d18d78e18bf3386dbb4d67d5c7497d3685c7a3b5 | 101 | py | Python | src/libsrv/lib/lib__PROJECT_/__init__.py | tecnickcom/pygen | 9c969137b472c100fe8815ec5a52beabd08e179b | [
"MIT"
] | 7 | 2018-04-23T08:03:12.000Z | 2021-01-28T20:44:19.000Z | src/lib/_PROJECT_/__init__.py | tecnickcom/pygen | 9c969137b472c100fe8815ec5a52beabd08e179b | [
"MIT"
] | 1 | 2018-07-09T09:44:54.000Z | 2018-07-09T09:44:54.000Z | src/libapp/lib/lib__PROJECT_/__init__.py | tecnickcom/pygen | 9c969137b472c100fe8815ec5a52beabd08e179b | [
"MIT"
] | 1 | 2018-07-09T09:33:43.000Z | 2018-07-09T09:33:43.000Z | """~#SHORTDESCRIPTION#~"""
__version__ = "1.0.0"
__release__ = "1"
__program_name__ = "~#PROJECT#~"
| 16.833333 | 32 | 0.643564 |
d18def4fbac29199069d5db3991e15a8d8b23343 | 2,225 | py | Python | rl_multi_agent/experiments/furnmove_grid_marginal_nocl_rot_config.py | allenai/cordial-sync | 4005fdc4816c86f6489e5f4b9252fa66b79602be | [
"MIT"
] | 28 | 2020-07-07T16:21:10.000Z | 2021-11-15T11:15:20.000Z | rl_multi_agent/experiments/furnmove_grid_marginal_nocl_rot_config.py | allenai/cordial-sync | 4005fdc4816c86f6489e5f4b9252fa66b79602be | [
"MIT"
] | 5 | 2020-09-29T07:54:43.000Z | 2022-01-04T22:33:02.000Z | rl_multi_agent/experiments/furnmove_grid_marginal_nocl_rot_config.py | allenai/cordial-sync | 4005fdc4816c86f6489e5f4b9252fa66b79602be | [
"MIT"
] | 2 | 2022-02-01T19:50:27.000Z | 2022-03-21T12:23:16.000Z | from typing import Optional
from torch import nn
from rl_multi_agent.experiments.furnmove_grid_marginal_nocl_base_config import (
FurnMoveExperimentConfig,
)
from rl_multi_agent.models import A3CLSTMNStepComCoordinatedActionsEgoGridsEmbedCNN
| 38.362069 | 86 | 0.621573 |
d18eb208e3b4bdd661680b8efa65073500d31974 | 728 | py | Python | relations/controllers.py | poad42/arxiv-external-links | a60941d93f20d85e8e524240631f6f3234c03e02 | [
"MIT"
] | null | null | null | relations/controllers.py | poad42/arxiv-external-links | a60941d93f20d85e8e524240631f6f3234c03e02 | [
"MIT"
] | null | null | null | relations/controllers.py | poad42/arxiv-external-links | a60941d93f20d85e8e524240631f6f3234c03e02 | [
"MIT"
] | null | null | null | """
Request controllers for the external links service.
These may be used handle requests originating from the :mod:`.routes.api`
and/or the :mod:`.routes.ui`.
If the behavior of these controllers diverges along the UI/API lines, then we
can split this into ``controllers/api.py`` and ``controllers/ui.py``.
"""
from typing import Tuple, Any, Dict
from http import HTTPStatus
from werkzeug.datastructures import MultiDict
Response = Tuple[Dict[str, Any], HTTPStatus, Dict[str, str]]
def service_status(params: MultiDict) -> Response:
"""
Handle requests for the service status endpoint.
Returns ``200 OK`` if the service is up and ready to handle requests.
"""
return {'iam': 'ok'}, HTTPStatus.OK, {} | 29.12 | 77 | 0.722527 |
d191008bf7777b6c542e5bf7e0d000e40eac38e6 | 3,101 | py | Python | apps/roster/views.py | dulrich15/spot | 5fa57dbb9c0c9a010b4dc153f832b2d130bc8f73 | [
"MIT"
] | null | null | null | apps/roster/views.py | dulrich15/spot | 5fa57dbb9c0c9a010b4dc153f832b2d130bc8f73 | [
"MIT"
] | null | null | null | apps/roster/views.py | dulrich15/spot | 5fa57dbb9c0c9a010b4dc153f832b2d130bc8f73 | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import unicode_literals
import re
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import RequestContext
from django.template import loader
from models import *
from apps.core.views import get_bg_color
| 28.449541 | 84 | 0.653015 |
d191a9aab35b5cf9693c2d15e10ff5f31d5411f3 | 6,635 | py | Python | mac-changer.py | Hiiirad/Mac-Changer | df23de01dde3f55b45a8f0bacb065cf2170feb06 | [
"MIT"
] | 1 | 2020-08-06T13:39:50.000Z | 2020-08-06T13:39:50.000Z | mac-changer.py | Hiiirad/Mac-Changer | df23de01dde3f55b45a8f0bacb065cf2170feb06 | [
"MIT"
] | null | null | null | mac-changer.py | Hiiirad/Mac-Changer | df23de01dde3f55b45a8f0bacb065cf2170feb06 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from subprocess import call
from re import search
from random import sample, choice
from csv import reader
from os import popen
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
'''
The strings, input and output of this program is in lowercase. => case-insensitive
List of standard OUI:
http://standards-oui.ieee.org/oui/oui.txt
http://standards-oui.ieee.org/oui/oui.csv
'''
# Validating mac address
# Validating Interface
hex_characters = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f"]
# Checking if user wants to choose new mac address randomly or not
random_or_not = prompt("Do you want your mac address to change randomly? [(Y)es or (N)o]\nOr\nDo you want to choose first part of your mac address based on other manufacturers mac address? [(O)UI]\nOr\nDo you want your mac address back to original one? [(R)everse]\nYour answer: ").lower()
interface = prompt("Please insert name of the interface you want to change its mac: [wlan* or eth*] ").lower()
interface_validation(interface)
if random_or_not == "y" or random_or_not == "yes":
# random mac
random_mac = []
for i in range(6):
random_mac.append("".join(sample(hex_characters, 2)))
random_mac = ":".join(random_mac)
print("Your new mac address will be {0}".format(random_mac))
elif random_or_not == "n" or random_or_not == "no":
# user's new mac
mac = prompt("Please insert your new mac: ").lower()
mac_validation(mac)
elif random_or_not == "r" or random_or_not == "reverse":
# back to normal
if search(string=interface, pattern=r"^eth\d{1}$"):
with open(file="/tmp/eth-old-mac.txt", mode="r", encoding="utf-8") as old_mac:
mac = old_mac.readline()
elif search(string=interface, pattern=r"^wlan\d{1}$"):
with open(file="/tmp/wlan-old-mac.txt", mode="r", encoding="utf-8") as old_mac:
mac = old_mac.readline()
elif random_or_not == "o" or random_or_not == "oui":
oui = {}
# Creating Template of our dictionary (OUI)
with open(file="oui.csv", mode="r", encoding="utf-8") as csvfile:
csvreader = reader(csvfile)
next(csvreader) # ignore first row of csv which is header
for row in csvreader:
oui[str(row[2]).replace(" ", " ")] = []
# Fill values of dictionary (OUI)
with open(file="oui.csv", mode="r", encoding="utf-8") as csvfile:
csvreader = reader(csvfile)
next(csvreader) # ignore first row of csv which is header
for row in csvreader:
value = oui[str(row[2]).replace(" ", " ")]
if len(str(row[1])) > 6:
continue
else:
value.append(str(row[1]))
oui[str(row[2])] = value
# Deleting keys with empty values []
# 273 keys were deleted from list.
for key, value in list(oui.items()):
if value == []:
del oui[key]
random_organization = prompt("Do you want to choose your mac address from specific manufacturer? [(Y)es or (N)o] ").lower()
if random_organization == "y" or random_organization == "yes":
organizations = WordCompleter(list(oui.keys()), ignore_case=True)
organization = prompt("Please select an organization name: ", completer=organizations)
print("You will be using mac address of '{0}' organization.".format(organization))
random_oui = choice(oui.get("{0}".format(organization)))
character_need = 12 - len(random_oui)
mac_without_colon = random_oui + str("".join(sample(hex_characters, character_need)))
mac = mac_without_colon[0:2] + ":" + mac_without_colon[2:4] + ":" + mac_without_colon[4:6] + ":" + mac_without_colon[6:8] + ":" + mac_without_colon[8:10] + ":" + mac_without_colon[10:12]
mac = mac.lower()
print("Your new mac address will be {0}".format(mac))
elif random_organization == "n" or random_organization == "no":
organization = choice(list(oui.keys()))
print("You will be using mac address of '{0}' organization.".format(organization))
random_oui = choice(oui.get("{0}".format(organization)))
character_need = 12 - len(random_oui)
mac_without_colon = random_oui + str("".join(sample(hex_characters, character_need)))
mac = mac_without_colon[0:2] + ":" + mac_without_colon[2:4] + ":" + mac_without_colon[4:6] + ":" + mac_without_colon[6:8] + ":" + mac_without_colon[8:10] + ":" + mac_without_colon[10:12]
mac = mac.lower()
print("Your new mac address will be {0}".format(mac))
else:
print("Please choose your answer correctly!")
quit()
else:
print("Please check your answer!")
quit()
# Saving old mac addresses | delete text files in reverse mode
if random_or_not == "r" or random_or_not == "reverse":
delete = prompt("Do you want to delete files related to your old mac address? [(Y)es or (N)o] ").lower()
if delete == "y" or delete =="yes":
call("rm /tmp/eth-old-mac.txt /tmp/wlan-old-mac.txt", shell=True)
elif delete == "n" or delete =="no":
pass
else:
print("Please check your answer! What do you want to do with old mac address text files?!")
quit()
else:
call("ip addr | grep -E 'ether' | cut --delimiter=' ' -f 6 | sed -n '1p' > /tmp/eth-old-mac.txt", shell=True)
call("ip addr | grep -E 'ether' | cut --delimiter=' ' -f 6 | sed -n '2p' > /tmp/wlan-old-mac.txt", shell=True)
# Checking kernel version to call different commands
kernel_version = popen("uname -r").read()
if float(".".join(kernel_version.split(".")[:2])) < 4.15:
# Start changing mac address for kernel versions lower than 4.15
call("ifconfig {0} down".format(interface), shell=True)
call("ifconfig {0} hw ether {1}".format(interface, mac), shell=True)
call("ifconfig {0} up".format(interface), shell=True)
else:
# Start changing mac address for kernel versions higher than 4.15
call("ip link set {0} down".format(interface), shell=True)
call("ip link set {0} address {1}".format(interface, mac), shell=True)
call("ip link set {0} up".format(interface), shell=True)
print("Done :)")
| 46.398601 | 289 | 0.64009 |
d192362b3381c7ad2fe4d69f9810feb1a55a0fba | 4,775 | py | Python | core/tests/test_StenciledFile.py | mxmlnkn/ratarmount | f63a5f6af64ccb43c1075884626ea61ba0202f83 | [
"MIT"
] | 208 | 2019-02-21T08:10:54.000Z | 2022-03-20T12:07:31.000Z | core/tests/test_StenciledFile.py | mxmlnkn/ratarmount | f63a5f6af64ccb43c1075884626ea61ba0202f83 | [
"MIT"
] | 72 | 2019-04-18T08:54:26.000Z | 2022-03-27T11:41:58.000Z | core/tests/test_StenciledFile.py | mxmlnkn/ratarmount | f63a5f6af64ccb43c1075884626ea61ba0202f83 | [
"MIT"
] | 19 | 2019-03-15T18:23:10.000Z | 2021-09-10T14:17:16.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=wrong-import-position
# pylint: disable=protected-access
import concurrent.futures
import io
import os
import sys
import tempfile
import threading
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from ratarmountcore import StenciledFile # noqa: E402
testData = b"1234567890"
tmpFile = tempfile.TemporaryFile()
tmpFile.write(testData)
randomTestData = os.urandom(128 * 1024)
randomTmpFile = tempfile.TemporaryFile()
randomTmpFile.write(randomTestData)
| 40.811966 | 103 | 0.595602 |
d194460e4175a7a303de85ee742fccf7806780cb | 3,029 | py | Python | scripts/check_status.py | frangiz/walter-server | 0c9ab88a9cc6cf446ba86b1b06bcf9f8c64cf639 | [
"MIT"
] | null | null | null | scripts/check_status.py | frangiz/walter-server | 0c9ab88a9cc6cf446ba86b1b06bcf9f8c64cf639 | [
"MIT"
] | 21 | 2019-09-16T08:08:17.000Z | 2020-05-27T06:49:34.000Z | scripts/check_status.py | frangiz/walter-server | 0c9ab88a9cc6cf446ba86b1b06bcf9f8c64cf639 | [
"MIT"
] | 1 | 2019-10-16T11:23:38.000Z | 2019-10-16T11:23:38.000Z | import datetime
import json
import os
import requests
import smtplib
import ssl
if __name__ == "__main__":
with open(os.path.join("scripts", "check_status_config.json"), "r") as f:
data = f.read()
config = json.loads(data)
check_status(config)
| 30.908163 | 85 | 0.665896 |
d19579492873b16f25e4b138c45496b98a9c1bd3 | 5,340 | py | Python | mngSettings.py | guidanoli/fibonaccibot | fead3a151835648f7140945b94afdd0f32aa55ce | [
"MIT"
] | null | null | null | mngSettings.py | guidanoli/fibonaccibot | fead3a151835648f7140945b94afdd0f32aa55ce | [
"MIT"
] | null | null | null | mngSettings.py | guidanoli/fibonaccibot | fead3a151835648f7140945b94afdd0f32aa55ce | [
"MIT"
] | null | null | null | # Settings Manager
# guidanoli
DEFAULT_STGS = {
"commas": "true",
"comments": "true",
"tknlistpath": "tknlist.tk",
"tokenpath": "token.tk"
}
SETTINGS_PATH = "fibonacci.cfg"
TYPE_STR = type("")
TYPE_LIST = type([])
| 31.597633 | 78 | 0.618914 |
d19832a8ebc406b607f9daf11bbd5483f8a533f1 | 632 | py | Python | core/commands/public/staff.py | Smashulica/nebula8 | 010df165e3cc61e0154d20310fa972482ec0e7be | [
"Apache-2.0"
] | null | null | null | core/commands/public/staff.py | Smashulica/nebula8 | 010df165e3cc61e0154d20310fa972482ec0e7be | [
"Apache-2.0"
] | null | null | null | core/commands/public/staff.py | Smashulica/nebula8 | 010df165e3cc61e0154d20310fa972482ec0e7be | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright SquirrelNetwork
from core import decorators
from telegram.utils.helpers import mention_markdown | 31.6 | 85 | 0.705696 |
d19be2f76f9b868d04aab5ea3ee253005cbeb0af | 762 | py | Python | pytglib/api/types/users.py | iTeam-co/pytglib | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 6 | 2019-10-30T08:57:27.000Z | 2021-02-08T14:17:43.000Z | pytglib/api/types/users.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 1 | 2021-08-19T05:44:10.000Z | 2021-08-19T07:14:56.000Z | pytglib/api/types/users.py | iTeam-co/python-telegram | e5e75e0a85f89b77762209b32a61b0a883c0ae61 | [
"MIT"
] | 5 | 2019-12-04T05:30:39.000Z | 2021-05-21T18:23:32.000Z |
from ..utils import Object
| 20.594595 | 56 | 0.56168 |
d19cf55d1eed29f5e017627cc562825403fd7101 | 2,937 | py | Python | 方法二/無LM算capacity.py | jell0213/MUNIT_DataHiding | 75cb80a7ee5175c0a2235336e230ce3759f5b296 | [
"Unlicense"
] | null | null | null | 方法二/無LM算capacity.py | jell0213/MUNIT_DataHiding | 75cb80a7ee5175c0a2235336e230ce3759f5b296 | [
"Unlicense"
] | null | null | null | 方法二/無LM算capacity.py | jell0213/MUNIT_DataHiding | 75cb80a7ee5175c0a2235336e230ce3759f5b296 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
#######################################################
'''
input
MOD
1.
2.()xlsx
3.-xlsx
4.mod
5.
output
xlsx(xlsx)
'''
#######################################################
from skimage import io
from openpyxl import Workbook
import openpyxl
import os
import math
import time
#---------------------------------------------------------------------------
in_dir="D:\\108resercher\\====######RESEARCH######====\\GAN-research\\12.8\\LM\\100%MOD3"
num_image = 5000
num_mod = 3
embed_ratio= 100
#---------------------------------------------------------------------------
tStart = time.time() #
cal_capacity(in_dir,num_image,num_mod,embed_ratio) #
tEnd = time.time() #
wb = openpyxl.load_workbook(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio))
ws = wb['Sheet']
ws.append(["total time",str(round(tEnd-tStart,2))+" s"])
wb.save(in_dir+"/NLM-mod{:d}_capacity".format(num_mod)+"({:d}%).xlsx".format(embed_ratio)) #
| 40.232877 | 138 | 0.405856 |
d19e24dc55adbb4574f0c6afd0acb289ae9f8abb | 183 | py | Python | foo.py | Lnvictor/i_am_learning_shellscript | 7ee0ebcbdf41fb94ff0ad0210b580f29583444bb | [
"Apache-2.0"
] | null | null | null | foo.py | Lnvictor/i_am_learning_shellscript | 7ee0ebcbdf41fb94ff0ad0210b580f29583444bb | [
"Apache-2.0"
] | null | null | null | foo.py | Lnvictor/i_am_learning_shellscript | 7ee0ebcbdf41fb94ff0ad0210b580f29583444bb | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
my_dict = OrderedDict()
if __name__ == "__main__":
print(populate({})) | 14.076923 | 35 | 0.693989 |
d19f00e9b0507a59fe36f2031d52bc840d7e8792 | 2,592 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/test_hashi_vault_option_group_base.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/test_hashi_vault_option_group_base.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/community/hashi_vault/tests/unit/plugins/module_utils/test_hashi_vault_option_group_base.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021 Brian Scholer (@briantist)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pytest
from ansible_collections.community.hashi_vault.tests.unit.compat import mock
from ansible_collections.community.hashi_vault.plugins.module_utils._hashi_vault_common import (
HashiVaultOptionGroupBase,
HashiVaultOptionAdapter,
)
PREREAD_OPTIONS = {
'opt1': 'val1',
'opt2': None,
'opt3': 'val3',
'opt4': None,
# no opt5
'opt6': None,
}
LOW_PREF_DEF = {
'opt1': dict(env=['_ENV_1A'], default='never'),
'opt2': dict(env=['_ENV_2A', '_ENV_2B']),
'opt4': dict(env=['_ENV_4A', '_ENV_4B', '_ENV_4C']),
'opt5': dict(env=['_ENV_5A']),
'opt6': dict(env=['_ENV_6A'], default='mosdefault'),
}
class TestHashiVaultOptionGroupBase(object):
| 31.609756 | 129 | 0.669367 |
d19f0f44e314141fc23b7283d9ad1c7b8685c068 | 384 | py | Python | iniciante/python/1113-crescente-e-decrescente.py | tfn10/beecrowd | 1ebf19ca9a253eb326160f03145d20be33064969 | [
"MIT"
] | null | null | null | iniciante/python/1113-crescente-e-decrescente.py | tfn10/beecrowd | 1ebf19ca9a253eb326160f03145d20be33064969 | [
"MIT"
] | null | null | null | iniciante/python/1113-crescente-e-decrescente.py | tfn10/beecrowd | 1ebf19ca9a253eb326160f03145d20be33064969 | [
"MIT"
] | null | null | null |
crescente_e_decrescente()
| 19.2 | 48 | 0.544271 |
d19f23d69e8c497f2703e0ce9519ea14add2903f | 2,695 | py | Python | app/client.py | akakou/privacy-enhanced-antivirus | 4cd32b27374016dd489eb13ac196c2c044912933 | [
"MIT"
] | null | null | null | app/client.py | akakou/privacy-enhanced-antivirus | 4cd32b27374016dd489eb13ac196c2c044912933 | [
"MIT"
] | null | null | null | app/client.py | akakou/privacy-enhanced-antivirus | 4cd32b27374016dd489eb13ac196c2c044912933 | [
"MIT"
] | null | null | null | from kivy.lang import Builder
import array
import scipy
import os
import syft as sy
import tensorflow as tf
import numpy
import time
import scipy
import sys
from dataset import get_dataset
from cluster import get_cluster
from PIL import Image
import leargist
from skimage import transform
from imageio import imsave
from kivy.app import App
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.core.window import Window
from kivy.uix.label import Label
# from kivy.uix.label import Label
from kivy.uix.screenmanager import CardTransition
THRESHOLD = 0
MAX_PATH_SIZE = 22
# Builder.load_file('assets/main.kv')
AntivirusApp().run()
| 21.910569 | 72 | 0.638219 |
d1a1b2cdca7fb838822d102ce1eb3031bc813ef4 | 5,910 | py | Python | network/vgg16.py | CamilaAlvarez/tensorflow-demo | 57f576bafe97054046610ded7a9ce39caa7e84b4 | [
"MIT"
] | null | null | null | network/vgg16.py | CamilaAlvarez/tensorflow-demo | 57f576bafe97054046610ded7a9ce39caa7e84b4 | [
"MIT"
] | null | null | null | network/vgg16.py | CamilaAlvarez/tensorflow-demo | 57f576bafe97054046610ded7a9ce39caa7e84b4 | [
"MIT"
] | null | null | null | from network.network import Network
import tensorflow as tf
import numpy as np
| 57.941176 | 111 | 0.644501 |
d1a20a252ae1bd2bda733e48649244f828568be5 | 2,840 | py | Python | S4/S4 Library/generated/protocolbuffers/ResourceKey_pb2.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Library/generated/protocolbuffers/ResourceKey_pb2.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Library/generated/protocolbuffers/ResourceKey_pb2.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
from google.protobuf import descriptor_pb2
DESCRIPTOR = descriptor.FileDescriptor(name='ResourceKey.proto', package='EA.Sims4.Network', serialized_pb='\n\x11ResourceKey.proto\x12\x10EA.Sims4.Network"<\n\x0bResourceKey\x12\x0c\n\x04type\x18\x01 \x02(\r\x12\r\n\x05group\x18\x02 \x02(\r\x12\x10\n\x08instance\x18\x03 \x02(\x04"G\n\x0fResourceKeyList\x124\n\rresource_keys\x18\x01 \x03(\x0b2\x1d.EA.Sims4.Network.ResourceKeyB\x0eB\x0cResourceKeys')
_RESOURCEKEY = descriptor.Descriptor(name='ResourceKey', full_name='EA.Sims4.Network.ResourceKey', filename=None, file=DESCRIPTOR, containing_type=None, fields=[descriptor.FieldDescriptor(name='type', full_name='EA.Sims4.Network.ResourceKey.type', index=0, number=1, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='group', full_name='EA.Sims4.Network.ResourceKey.group', index=1, number=2, type=13, cpp_type=3, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), descriptor.FieldDescriptor(name='instance', full_name='EA.Sims4.Network.ResourceKey.instance', index=2, number=3, type=4, cpp_type=4, label=2, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=39, serialized_end=99)
_RESOURCEKEYLIST = descriptor.Descriptor(name='ResourceKeyList', full_name='EA.Sims4.Network.ResourceKeyList', filename=None, file=DESCRIPTOR, containing_type=None, fields=[descriptor.FieldDescriptor(name='resource_keys', full_name='EA.Sims4.Network.ResourceKeyList.resource_keys', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None)], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, extension_ranges=[], serialized_start=101, serialized_end=172)
_RESOURCEKEYLIST.fields_by_name['resource_keys'].message_type = _RESOURCEKEY
DESCRIPTOR.message_types_by_name['ResourceKey'] = _RESOURCEKEY
DESCRIPTOR.message_types_by_name['ResourceKeyList'] = _RESOURCEKEYLIST
| 167.058824 | 1,189 | 0.817254 |
d1a59054375691b39ba6ea30c594eff643b8391c | 5,787 | py | Python | scripts/elfw-plotResults.py | multimedia-eurecat/ELFW | 98a6eca7ab9152a7cf8c447ee9f4a62b5629e3b2 | [
"Apache-2.0"
] | 15 | 2020-07-10T08:19:13.000Z | 2022-02-24T08:52:24.000Z | scripts/elfw-plotResults.py | multimedia-eurecat/ELFW | 98a6eca7ab9152a7cf8c447ee9f4a62b5629e3b2 | [
"Apache-2.0"
] | 7 | 2020-08-11T06:26:54.000Z | 2021-04-23T08:32:21.000Z | scripts/elfw-plotResults.py | multimedia-eurecat/ELFW | 98a6eca7ab9152a7cf8c447ee9f4a62b5629e3b2 | [
"Apache-2.0"
] | 5 | 2020-08-11T09:09:42.000Z | 2020-11-25T12:02:54.000Z | import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
# from matplotlib.ticker import MaxNLocator
# Rafael Redondo (c) Eurecat 2020
colors = np.array([
[[247, 197, 188],[242, 151, 136],[242, 120, 99],[237, 85, 59]],
[[255, 242, 196], [247, 232, 176], [250, 225, 135], [246, 213, 93]],
[[180, 224, 220],[123, 209, 201],[83, 194, 183],[60, 174, 163]],
[[159, 189, 214],[118, 160, 194],[72, 125, 168],[32, 99, 155]]
]) / 255.0
class_colors = np.array([
[[184, 184, 184],[125, 125, 125],[71, 71, 71], [000, 000, 000]],
[[196, 255, 196],[178, 255, 178],[126, 255, 126],[000, 255, 000]],
[[252, 189, 189],[255, 133, 133],[255, 77, 77], [255, 000, 000]],
[[207, 255, 255],[176, 255, 245],[144, 255, 255],[000, 255, 255]],
[[212, 212, 255],[149, 149, 255],[94, 94, 255],[000, 000, 255]],
[[255, 209, 255],[255, 156, 255],[255, 101, 255],[255, 000, 255]]
]) / 255.0
# ----------------------------------------------------------------------------------------------
fcn = np.array([
[94.86221, 89.94708, 78.54365, 90.62491, 94.86221, 89.94708, 78.54365, 90.62491, 94.86221, 89.94708, 78.54365, 90.62491],
[94.87768, 89.34935, 78.5738, 90.65072, 94.91543, 90.04007, 78.89132, 90.71592, 94.87198, 90.01351, 79.12107, 90.64796],
[94.82212, 89.07311, 78.57936, 90.55048, 95.01015, 89.30039, 79.2808, 90.85555, 94.92132, 89.81723, 79.51949, 90.71459],
[94.91106, 88.96342, 79.1459, 90.67938, 94.94046, 89.36422, 79.07776, 90.75119, 94.9023, 90.05563, 79.41762, 90.69509]
])
fcn_classes = np.array([
[94.75722, 86.38252, 71.85863, 61.34205, 72.44731, 84.47418, 94.75722, 86.38252, 71.85863, 61.34205, 72.44731, 84.47418, 94.75722, 86.38252, 71.85863, 61.34205, 72.44731, 84.47418],
[94.74529, 86.52661, 71.92953, 60.08587, 73.81507, 84.34044, 94.78213, 86.77009, 71.78261, 61.24385, 74.51008, 84.25919, 94.74461, 86.72296, 71.41357, 63.01671, 74.86192, 83.96667],
[94.70561, 86.4855, 71.23186, 60.26997, 74.76319, 84.02004, 94.91729, 86.89263, 72.04419, 62.55301, 75.33231, 83.94535, 94.789, 86.70316, 71.52878, 63.19029, 75.79578, 85.10994],
[94.74795, 86.68139, 71.68878, 62.33461, 74.78171, 84.64096, 94.79637, 86.70211, 71.9773, 61.76077, 73.98104, 85.249, 94.72738, 86.72993, 71.767, 62.76649, 75.47534, 85.03957]
])
deeplab = np.array([
[94.6848, 89.71417, 77.94909, 90.37054, 94.6848, 89.71417, 77.94909, 90.37054, 94.6848, 89.71417, 77.94909, 90.37054],
[94.78537, 89.59541, 78.56921, 90.51187, 94.86725, 89.7494, 78.62243, 90.63049, 94.81017, 89.91979, 78.41131, 90.55676],
[94.82899, 90.35099, 79.05202, 90.57593, 94.86047, 90.18027, 78.72145, 90.63608, 94.90303, 90.12334, 79.14438, 90.70572],
[94.89329, 90.06537, 79.38813, 90.67735, 94.9435, 90.07861, 78.87746, 90.75484, 94.89794, 90.37945, 79.37854, 90.70328]
])
deeplab_classes = np.array([
[94.51156, 86.31067, 71.33108, 60.57315, 71.34837, 83.61973, 94.51156, 86.31067, 71.33108, 60.57315, 71.34837, 83.61973, 94.51156, 86.31067, 71.33108, 60.57315, 71.34837, 83.61973],
[94.63511, 86.41761, 71.65853, 61.14348, 74.20494, 83.35558, 94.77132, 86.44033, 71.84831, 62.07319, 72.95129, 83.65015, 94.66345, 86.50839, 71.66187, 59.54201, 74.23029, 83.86187],
[94.66677, 86.3675, 71.90078, 61.5994, 75.30071, 84.47695, 94.73285, 86.59962, 71.78432, 62.6076, 72.75062, 83.8537, 94.75528, 86.66354, 71.85329, 61.44883, 74.97043, 85.17492],
[94.75281, 86.6263, 71.72533, 63.26911, 75.43251, 84.52274, 94.81422, 86.6394, 72.28983, 61.98376, 73.2231, 84.31447, 94.72211, 86.83926, 71.83235, 63.31498, 75.25782, 84.30474]
])
fontsizeSmall = 12
fontsizeMedium = 16
fontsizeLarge = 18
font = {'family':'normal', 'weight':'normal', 'size': fontsizeSmall}
plt.rc('font', **font)
metrics_fig, metrics_axis = plt.subplots(2, 3, sharey=True, sharex=True)
draw(0, metrics_axis, fcn, 'FCN', ('Pixel Acc.', 'Mean Acc.', 'Mean IU', 'Freq.W. IU'), colors, 1.8, 0.4, 0.1)
draw(1, metrics_axis, deeplab, 'DeepLabV3', ('Pixel Acc.', 'Mean Acc.', 'Mean IU', 'Freq.W. IU'), colors, 1.8, 0.4, 0.1)
# class_fig, class_axis = plt.subplots(2, 3, sharey=True)
# draw(0, class_axis, fcn_classes, 'FCN', ('Bkgnd', 'Skin', 'Hair', 'Beard', 'Snglss', 'Wear'), class_colors, 4.5, 0.5, 0.15)
# draw(1, class_axis, deeplab_classes,'DeepLabV3',('Bkgnd', 'Skin', 'Hair', 'Beard', 'Snglss', 'Wear'), class_colors, 4.5, 0.5, 0.15)
plt.show() | 49.887931 | 183 | 0.637809 |
d1a66059d6aa2f43be85ef5e0f0969dc1f348e3f | 4,299 | py | Python | preprocessing/gen_clustering.py | HaowenWeiJohn/CV_Project | 8e2414796f60a8c3fe452f3721e4a6ef7edfdb11 | [
"MIT"
] | null | null | null | preprocessing/gen_clustering.py | HaowenWeiJohn/CV_Project | 8e2414796f60a8c3fe452f3721e4a6ef7edfdb11 | [
"MIT"
] | null | null | null | preprocessing/gen_clustering.py | HaowenWeiJohn/CV_Project | 8e2414796f60a8c3fe452f3721e4a6ef7edfdb11 | [
"MIT"
] | null | null | null | import yaml
import os
import sys
import yaml
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from utils import load_poses, load_calib, load_files, load_vertex
from preprocessing.utils import *
from example.laserscan import *
from PC_cluster.ScanLineRun_cluster.build import ScanLineRun_Cluster
# data_path = '../data/sequences/08/velodyne/000030.bin'
# label_path = '../data/sequences/08/labels/000030.label'
CFG = yaml.safe_load(open('../config/semantic-kitti-mos.yaml', 'r'))
config_filename = '../config/mask_preparing.yaml'
if len(sys.argv) > 1:
config_filename = sys.argv[1]
if yaml.__version__ >= '5.1':
config = yaml.load(open(config_filename), Loader=yaml.FullLoader)
else:
config = yaml.load(open(config_filename))
# ground truth info
color_dict = CFG["color_map"]
label_transfer_dict = CFG["learning_map"]
nclasses = len(color_dict)
# mask config
data_folder = config['data_folder']
debug = config['debug']
visualize = config['visualize']
range_image_params = config['range_image']
sequences = config['sequences']
sem_scan = LaserScan(project=True,
flip_sign=False,
H=range_image_params['height'],
W=range_image_params['width'],
fov_up=range_image_params['fov_up'],
fov_down=range_image_params['fov_down'])
cluster=ScanLineRun_Cluster.ScanLineRun_Cluster(0.5, 1)
# create mask folder
for sequence in sequences:
sequence_folder = os.path.join(data_folder, sequence)
visualization_folder = config['visualization_folder']
scan_folder = config['scan_folder']
label_folder = config['label_folder']
mask_image_folder = config['mask_image_folder']
visualization_folder = os.path.join(sequence_folder, visualization_folder)
scan_folder = os.path.join(sequence_folder, scan_folder)
label_folder = os.path.join(sequence_folder, label_folder)
mask_image_folder = os.path.join(sequence_folder, mask_image_folder)
# if not os.path.exists(mask_image_folder):
# os.makedirs(mask_image_folder)
#
# # create mask image visualization folder
# if visualize:
# if not os.path.exists(visualization_folder):
# os.makedirs(visualization_folder)
# load labels
scan_paths = load_files(scan_folder)
# label_paths = load_files(label_folder)
# create scan object
# index_range = list(range(0,len(scan_paths)))
print('Clustering:', sequence, 'Frames: ', str(len(scan_paths)))
for frame_idx in tqdm(range(len(scan_paths))):
cluster_file_name = os.path.join(mask_image_folder, str(frame_idx).zfill(6))
sem_scan.open_scan(scan_paths[frame_idx])
# x_img = sem_scan.proj_xyz[:,:,0]*sem_scan.proj_mask
# y_img = sem_scan.proj_xyz[:,:,0]*sem_scan.proj_mask
# z_img = sem_scan.proj_xyz[:,:,0]*sem_scan.proj_mask
instance_label = cluster.ScanLineRun_cluster(sem_scan.proj_xyz[:,:,0],
sem_scan.proj_xyz[:,:,1],
sem_scan.proj_xyz[:,:,2],
sem_scan.proj_mask,
range_image_params['height'],
range_image_params['width']
)
instance_label = np.array(instance_label)
# ground removal
# clustering
# if visualize:
# fig = plt.figure(frameon=False, figsize=(16, 10))
# fig.set_size_inches(20.48, 0.64)
# ax = plt.Axes(fig, [0., 0., 1., 1.])
# ax.set_axis_off()
# fig.add_axes(ax)
# img = label_new.copy()
# img[img<2]=0
# ax.imshow(img, vmin=0, vmax=1)
# image_name = os.path.join(visualization_folder, str(frame_idx).zfill(6))
# plt.savefig(image_name)
# plt.close()
#
# # save to npy file
# label_new_one_hot = depth_onehot(matrix=label_new, category=[0, 1, 2], on_value=1, off_value=0, channel_first=True)
#
# np.save(mask_file_name, [label_new, label_new_one_hot, sem_scan.proj_idx])
| 34.119048 | 125 | 0.623168 |
d1a8c0ef8c78390a7389441f044f0c0970746917 | 529 | py | Python | src/Python/701-800/744.NextGreatestLetter.py | Peefy/PeefyLeetCode | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | [
"Apache-2.0"
] | 2 | 2018-05-03T07:50:03.000Z | 2018-06-17T04:32:13.000Z | src/Python/701-800/744.NextGreatestLetter.py | Peefy/PeefyLeetCode | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | [
"Apache-2.0"
] | null | null | null | src/Python/701-800/744.NextGreatestLetter.py | Peefy/PeefyLeetCode | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | [
"Apache-2.0"
] | 3 | 2018-11-09T14:18:11.000Z | 2021-11-17T15:23:52.000Z |
if __name__ == '__main__':
solution = Solution()
print(solution.nextGreatestLetter(["c", "f", "j"], "a"))
print(solution.nextGreatestLetter(["c", "f", "j"], "c"))
print(solution.nextGreatestLetter(["c", "f", "j"], "k"))
else:
pass
| 23 | 60 | 0.529301 |
d1a9642bf9111e3ce2efbfc78ec14600b2219939 | 592 | py | Python | codecademy_scripts/estimate_pi.py | Faraaz54/python_training_problems | 24c7b42daaf54366759e1d7c4b42f9936316e94b | [
"MIT"
] | null | null | null | codecademy_scripts/estimate_pi.py | Faraaz54/python_training_problems | 24c7b42daaf54366759e1d7c4b42f9936316e94b | [
"MIT"
] | null | null | null | codecademy_scripts/estimate_pi.py | Faraaz54/python_training_problems | 24c7b42daaf54366759e1d7c4b42f9936316e94b | [
"MIT"
] | null | null | null | from math import sqrt
def factorial(n):
"""Computes factorial of n."""
if n == 0:
return 1
else:
recurse = factorial(n-1)
result = n * recurse
return result
pi = estimate_pi()
print pi
| 20.413793 | 48 | 0.469595 |
d1aa0e60389e9105536a41fff2a976838f0e6c36 | 1,202 | py | Python | pywinrt/winsdk/windows/graphics/printing/printticket/__init__.py | pywinrt/python-winsdk | 1e2958a712949579f5e84d38220062b2cec12511 | [
"MIT"
] | 3 | 2022-02-14T14:53:08.000Z | 2022-03-29T20:48:54.000Z | pywinrt/winsdk/windows/graphics/printing/printticket/__init__.py | pywinrt/python-winsdk | 1e2958a712949579f5e84d38220062b2cec12511 | [
"MIT"
] | 4 | 2022-01-28T02:53:52.000Z | 2022-02-26T18:10:05.000Z | pywinrt/winsdk/windows/graphics/printing/printticket/__init__.py | pywinrt/python-winsdk | 1e2958a712949579f5e84d38220062b2cec12511 | [
"MIT"
] | null | null | null | # WARNING: Please don't edit this file. It was generated by Python/WinRT v1.0.0-beta.4
import enum
import winsdk
_ns_module = winsdk._import_ns_module("Windows.Graphics.Printing.PrintTicket")
try:
import winsdk.windows.data.xml.dom
except Exception:
pass
try:
import winsdk.windows.foundation
except Exception:
pass
try:
import winsdk.windows.foundation.collections
except Exception:
pass
PrintTicketCapabilities = _ns_module.PrintTicketCapabilities
PrintTicketFeature = _ns_module.PrintTicketFeature
PrintTicketOption = _ns_module.PrintTicketOption
PrintTicketParameterDefinition = _ns_module.PrintTicketParameterDefinition
PrintTicketParameterInitializer = _ns_module.PrintTicketParameterInitializer
PrintTicketValue = _ns_module.PrintTicketValue
WorkflowPrintTicket = _ns_module.WorkflowPrintTicket
WorkflowPrintTicketValidationResult = _ns_module.WorkflowPrintTicketValidationResult
| 26.130435 | 86 | 0.811148 |
d1ac4c4b62c7a69033fe73553cd10cf79ee11495 | 638 | py | Python | MyThread.py | hectorpadin1/Computer-Vision-Algorithms | 4ef66353f2453ec1be764787e23260f6ef402e0f | [
"MIT"
] | null | null | null | MyThread.py | hectorpadin1/Computer-Vision-Algorithms | 4ef66353f2453ec1be764787e23260f6ef402e0f | [
"MIT"
] | null | null | null | MyThread.py | hectorpadin1/Computer-Vision-Algorithms | 4ef66353f2453ec1be764787e23260f6ef402e0f | [
"MIT"
] | null | null | null | import threading
import sys
| 31.9 | 99 | 0.617555 |
d1aca30c7404605a4e877890f728a3f9b0615ca4 | 623 | py | Python | Python/Perfect-Python/12/1_4_2_echoclient.py | hrntsm/study-language | 922578a5321d70c26b935e6052f400125e15649c | [
"MIT"
] | 1 | 2022-02-06T10:50:42.000Z | 2022-02-06T10:50:42.000Z | Python/Perfect-Python/12/1_4_2_echoclient.py | hrntsm/study-language | 922578a5321d70c26b935e6052f400125e15649c | [
"MIT"
] | null | null | null | Python/Perfect-Python/12/1_4_2_echoclient.py | hrntsm/study-language | 922578a5321d70c26b935e6052f400125e15649c | [
"MIT"
] | null | null | null | import asyncio
loop = asyncio.get_event_loop()
coro = loop.create_connection(lambda: EchoClient("", loop), "127.0.0.1", 8192)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
| 23.961538 | 83 | 0.670947 |
d1aca4dd340ce95cab59972197c8c8400eb12629 | 482 | py | Python | Tinify.dzbundle/tinify/result_meta.py | F-Pseudonym/dropzone4-actions | d36f581a6be081ff303324bf3ca35daa5133c36a | [
"Ruby"
] | 1 | 2021-02-01T22:44:36.000Z | 2021-02-01T22:44:36.000Z | Tinify.dzbundle/tinify/result_meta.py | F-Pseudonym/dropzone4-actions | d36f581a6be081ff303324bf3ca35daa5133c36a | [
"Ruby"
] | null | null | null | Tinify.dzbundle/tinify/result_meta.py | F-Pseudonym/dropzone4-actions | d36f581a6be081ff303324bf3ca35daa5133c36a | [
"Ruby"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
| 21.909091 | 82 | 0.639004 |
d1ad2457b9a62c330bcd5b7334696d9a9143b52a | 855 | py | Python | app.py | wataxiwaX/OSCspider | 7f0635c49cca6c14ffac3a37cbb9c10cc2c15cc0 | [
"MIT"
] | null | null | null | app.py | wataxiwaX/OSCspider | 7f0635c49cca6c14ffac3a37cbb9c10cc2c15cc0 | [
"MIT"
] | null | null | null | app.py | wataxiwaX/OSCspider | 7f0635c49cca6c14ffac3a37cbb9c10cc2c15cc0 | [
"MIT"
] | 1 | 2019-04-29T10:52:40.000Z | 2019-04-29T10:52:40.000Z | # -*- coding: UTF-8 -*-
from spider import *
app = OSCPspider()
while True:
main_view()
op = raw_input(":")
if not op:
continue
elif op[0] == 'l':
flag = app.list_p(op)
elif op[0] == 's':
flag = app.search_p(op)
elif op == 'c':
flag = app.catch_p()
elif op == 'q':
exit()
else:
flag = 'no_op'
if flag == 'no_op':
print "!" | 23.75 | 67 | 0.389474 |
d1ae965b8719af361c251c7b3021070130bbaa7e | 5,653 | py | Python | LDA/lda.py | wimpykid26/Evolutionary-Classification | 0a78cbebc252c0a13703aee20dac9fa234f07b08 | [
"Apache-2.0"
] | 3 | 2019-11-10T08:51:11.000Z | 2020-08-05T14:23:27.000Z | LDA/lda.py | wimpykid26/Evolutionary-Classification | 0a78cbebc252c0a13703aee20dac9fa234f07b08 | [
"Apache-2.0"
] | null | null | null | LDA/lda.py | wimpykid26/Evolutionary-Classification | 0a78cbebc252c0a13703aee20dac9fa234f07b08 | [
"Apache-2.0"
] | 2 | 2017-12-12T13:35:41.000Z | 2017-12-28T10:00:56.000Z | import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import math
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder
feature_dict = {i:label for i,label in zip(
range(4),
('sepal length in cm',
'sepal width in cm',
'petal length in cm',
'petal width in cm', ))}
df = pd.io.parsers.read_csv(
filepath_or_buffer='https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
header=None,
sep=','
)
print (feature_dict.items())
df.columns = [l for i,l in sorted(feature_dict.items())] + ['class label']
df.dropna(how="all", inplace=True) # to drop the empty line at file-end
df.tail()
X = df[['sepal length in cm','sepal width in cm', 'petal length in cm', 'petal width in cm']].values
y = df['class label'].values
enc = LabelEncoder()
label_encoder = enc.fit(y)
y = label_encoder.transform(y) + 1
label_dict = {1: 'Setosa', 2: 'Versicolor', 3:'Virginica'}
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12,6))
for ax,cnt in zip(axes.ravel(), range(4)):
# set bin sizes
min_b = math.floor(np.min(X[:,cnt]))
max_b = math.ceil(np.max(X[:,cnt]))
bins = np.linspace(min_b, max_b, 25)
# plottling the histograms
for lab,col in zip(range(1,4), ('blue', 'red', 'green')):
ax.hist(X[y==lab, cnt],
color=col,
label='class %s' %label_dict[lab],
bins=bins,
alpha=0.5,)
ylims = ax.get_ylim()
# plot annotation
leg = ax.legend(loc='upper right', fancybox=True, fontsize=8)
leg.get_frame().set_alpha(0.5)
ax.set_ylim([0, max(ylims)+2])
ax.set_xlabel(feature_dict[cnt])
ax.set_title('Iris histogram #%s' %str(cnt+1))
# hide axis ticks
ax.tick_params(axis="both", which="both", bottom="off", top="off",
labelbottom="on", left="off", right="off", labelleft="on")
# remove axis spines
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
axes[0][0].set_ylabel('count')
axes[1][0].set_ylabel('count')
fig.tight_layout()
plt.show()
np.set_printoptions(precision=4)
mean_vectors = []
for cl in range(1,4):
mean_vectors.append(np.mean(X[y==cl], axis=0))
print('Mean Vector class %s: %s\n' %(cl, mean_vectors[cl-1]))
S_W = np.zeros((4,4))
for cl,mv in zip(range(1,4), mean_vectors):
class_sc_mat = np.zeros((4,4)) # scatter matrix for every class
for row in X[y == cl]:
row, mv = row.reshape(4,1), mv.reshape(4,1) # make column vectors
class_sc_mat += (row-mv).dot((row-mv).T)
S_W += class_sc_mat # sum class scatter matrices
print('within-class Scatter Matrix:\n', S_W)
overall_mean = np.mean(X, axis=0)
S_B = np.zeros((4,4))
for i,mean_vec in enumerate(mean_vectors):
n = X[y==i+1,:].shape[0]
mean_vec = mean_vec.reshape(4,1) # make column vector
overall_mean = overall_mean.reshape(4,1) # make column vector
S_B += n * (mean_vec - overall_mean).dot((mean_vec - overall_mean).T)
print('between-class Scatter Matrix:\n', S_B)
eig_vals, eig_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B))
for i in range(len(eig_vals)):
eigvec_sc = eig_vecs[:,i].reshape(4,1)
print('\nEigenvector {}: \n{}'.format(i+1, eigvec_sc.real))
print('Eigenvalue {:}: {:.2e}'.format(i+1, eig_vals[i].real))
for i in range(len(eig_vals)):
eigv = eig_vecs[:,i].reshape(4,1)
np.testing.assert_array_almost_equal(np.linalg.inv(S_W).dot(S_B).dot(eigv),
eig_vals[i] * eigv,
decimal=6, err_msg='', verbose=True)
print('ok')
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_vals[i]), eig_vecs[:,i]) for i in range(len(eig_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs = sorted(eig_pairs, key=lambda k: k[0], reverse=True)
# Visually confirm that the list is correctly sorted by decreasing eigenvalues
print('Eigenvalues in decreasing order:\n')
for i in eig_pairs:
print(i[0])
print('Variance explained:\n')
eigv_sum = sum(eig_vals)
for i,j in enumerate(eig_pairs):
print('eigenvalue {0:}: {1:.2%}'.format(i+1, (j[0]/eigv_sum).real))
W = np.hstack((eig_pairs[0][1].reshape(4,1), eig_pairs[1][1].reshape(4,1)))
print('Matrix W:\n', W.real)
X_lda = X.dot(W)
assert X_lda.shape == (150,2), "The matrix is not 150x2 dimensional."
plot_step_lda()
| 30.722826 | 101 | 0.613833 |
d1ae9c9c1b63d403d1996a3a61d581f139b38fa2 | 1,509 | py | Python | js_services/migrations/0013_auto_20190709_2315.py | evgeny-dmi3ev/js-services | 8d4960ec49a44b0eda537713af9f4976456e0f5e | [
"BSD-3-Clause"
] | null | null | null | js_services/migrations/0013_auto_20190709_2315.py | evgeny-dmi3ev/js-services | 8d4960ec49a44b0eda537713af9f4976456e0f5e | [
"BSD-3-Clause"
] | null | null | null | js_services/migrations/0013_auto_20190709_2315.py | evgeny-dmi3ev/js-services | 8d4960ec49a44b0eda537713af9f4976456e0f5e | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.22 on 2019-07-09 23:15
from __future__ import unicode_literals
from django.db import migrations, models
import js_color_picker.fields
| 35.928571 | 288 | 0.612326 |
d1b01bba827b8c38a0f0739fb791912ffc9c1b74 | 29,968 | py | Python | gentex/texmeas.py | NPann/GenTex | 8a2c7cc746abefd252613f4ddf0d7f70d7ff26f8 | [
"BSD-3-Clause"
] | 3 | 2019-04-26T00:48:01.000Z | 2020-07-06T19:10:17.000Z | gentex/texmeas.py | NPann/GenTex | 8a2c7cc746abefd252613f4ddf0d7f70d7ff26f8 | [
"BSD-3-Clause"
] | null | null | null | gentex/texmeas.py | NPann/GenTex | 8a2c7cc746abefd252613f4ddf0d7f70d7ff26f8 | [
"BSD-3-Clause"
] | 2 | 2019-01-10T18:38:05.000Z | 2021-05-19T16:54:01.000Z | """ gentex.texmeas package
"""
import numpy as np
| 45.613394 | 154 | 0.525894 |
d1b22c20857895713f38d86719437c73c6f5f5b7 | 3,373 | py | Python | AutoSketcher/utils/dataio.py | D1anaGreen/essaykiller | 75311a23dc1f5dc8b5040114fdeda67248700f7a | [
"Apache-2.0"
] | 4,551 | 2020-09-29T14:50:03.000Z | 2022-03-31T00:40:45.000Z | AutoSketcher/utils/dataio.py | D1anaGreen/essaykiller | 75311a23dc1f5dc8b5040114fdeda67248700f7a | [
"Apache-2.0"
] | 28 | 2020-10-01T08:03:23.000Z | 2022-03-30T15:40:40.000Z | AutoSketcher/utils/dataio.py | D1anaGreen/essaykiller | 75311a23dc1f5dc8b5040114fdeda67248700f7a | [
"Apache-2.0"
] | 809 | 2020-10-01T05:34:58.000Z | 2022-03-31T00:40:48.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zk
@contact: kun.zhang@nuance.com
@file: dataio.py
@time: 8/27/2019 4:31 PM
@desc:
"""
import os
def load_txt_data(path, mode='utf-8-sig', origin=False):
"""
This func is used to reading txt file
:param origin:
:param path: path where file stored
:param mode:
:type path: str
:return: string lines in file in a list
:rtype: list
"""
if type(path) != str:
raise TypeError
res = []
file = open(path, 'rb')
lines = file.read().decode(mode, 'ignore')
for line in lines.split('\n'):
line = line.strip()
if origin:
res.append(line)
else:
if line:
res.append(line)
file.close()
return res
def load_excel_data(path):
"""
This func is used to reading excel file
:param path: path where file stored
:type path: str
:return: data saved in a pandas DataFrame
:rtype: pandas.DataFrame
"""
if type(path) != str:
raise TypeError
import pandas as pd
return pd.read_excel(path).loc[:]
def load_variable(path):
"""
:param path:
:return:
"""
import pickle
return pickle.load(open(path, 'rb'))
def save_txt_file(data, path, end='\n'):
"""
This func is used to saving data to txt file
support data type:
list: Fully support
dict: Only save dict key
str: will save single char to each line
tuple: Fully support
set: Fully support
:param data: data
:param path: path to save
:type path: str
:param end:
:type end: str
:return: None
"""
if type(data) not in [list, dict, str, tuple, set] or type(path) != str:
raise TypeError
remove_old_file(path)
with open(path, 'a', encoding='utf-8') as f:
for item in data:
f.write(str(item) + end)
def save_variable(variable, path):
"""
:param variable:
:param path:
:return:
"""
import pickle
return pickle.dump(variable, open(path, 'wb'))
def load_file_name(path):
"""
This func can get root, subdir, file_names
:param path:
:type path:str
:return:
"""
for root, dirs, files in os.walk(path):
return root, dirs, files
def load_all_file_name(path, list_name, suffix='', not_include='.py'):
"""
Load all file name including sub folder
:param path:
:param list_name:
:param suffix:
:param not_include:
:return:
"""
for file in os.listdir(path):
file_path = os.path.join(path, file)
if os.path.isdir(file_path) and not_include not in file_path:
load_all_file_name(file_path, list_name, suffix, not_include)
elif os.path.splitext(file_path)[1] == suffix:
list_name.append(file_path)
def check_dir(path):
"""
check dir exists
:param path:
:type path:str
:return:
:rtype: bool
"""
return os.path.exists(path)
def mkdir(path):
"""
:param path:
:type path: str
:return: None
"""
path = path.strip()
if not check_dir(path):
os.makedirs(path)
def remove_old_file(path):
"""
:param path:
:type path: str
:return:
"""
if check_dir(path):
os.remove(path)
if __name__ == '__main__':
pass
| 20.319277 | 76 | 0.590276 |
d1b6e00f1b7c8a15539c5d29a89c356e88a3f73c | 20,511 | py | Python | music_maker.py | kenanbit/loopsichord | d02e021a68333c52adff38cc869bf217deebfc5c | [
"MIT"
] | null | null | null | music_maker.py | kenanbit/loopsichord | d02e021a68333c52adff38cc869bf217deebfc5c | [
"MIT"
] | null | null | null | music_maker.py | kenanbit/loopsichord | d02e021a68333c52adff38cc869bf217deebfc5c | [
"MIT"
] | null | null | null | from constants import *
import pygame as pg
from time import sleep
from metronome import *
import math
import numpy as np
from copy import deepcopy
from audio import *
from instructions_panel import *
from loop import *
| 49.305288 | 211 | 0.610502 |
d1b7d1521d980a52988abbf6e1742ba50379f867 | 10,084 | py | Python | danmu/danmaku/egame.py | simplecelery/zhibo | f1b69dabfde6cd2fc8a8a7fc4112da99feaf778f | [
"Apache-2.0"
] | 4 | 2021-11-21T15:30:32.000Z | 2022-03-11T02:49:30.000Z | danmu/danmaku/egame.py | simplecelery/zhibo | f1b69dabfde6cd2fc8a8a7fc4112da99feaf778f | [
"Apache-2.0"
] | 1 | 2021-11-11T15:44:44.000Z | 2021-11-11T15:44:44.000Z | danmu/danmaku/egame.py | simplecelery/zhibo | f1b69dabfde6cd2fc8a8a7fc4112da99feaf778f | [
"Apache-2.0"
] | 9 | 2021-09-24T03:26:21.000Z | 2022-03-23T01:32:15.000Z | import aiohttp
import struct
import json
import re
| 28.485876 | 380 | 0.416501 |
d1b947c30d3ec40d21821c8ac176cfd383593e74 | 832 | py | Python | minimal_rating_system/views.py | dvischi/minimal-rating-system | 25b42f49dbc36a8dfc912368273face49f9a4d97 | [
"MIT"
] | null | null | null | minimal_rating_system/views.py | dvischi/minimal-rating-system | 25b42f49dbc36a8dfc912368273face49f9a4d97 | [
"MIT"
] | null | null | null | minimal_rating_system/views.py | dvischi/minimal-rating-system | 25b42f49dbc36a8dfc912368273face49f9a4d97 | [
"MIT"
] | null | null | null | from django.views.generic import DetailView, TemplateView
from star_ratings.models import Rating
from .models import Beer, Snack
| 33.28 | 78 | 0.72476 |
d1b9a244a6632704e7c6e345d736e70ca933a771 | 335 | py | Python | AwardsApp/migrations/0005_remove_userprofile_bio.py | josphat-otieno/project-reviews | 5eaf9334fbd15b95726aee922f936d83e6f3d56f | [
"MIT"
] | null | null | null | AwardsApp/migrations/0005_remove_userprofile_bio.py | josphat-otieno/project-reviews | 5eaf9334fbd15b95726aee922f936d83e6f3d56f | [
"MIT"
] | null | null | null | AwardsApp/migrations/0005_remove_userprofile_bio.py | josphat-otieno/project-reviews | 5eaf9334fbd15b95726aee922f936d83e6f3d56f | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-20 15:33
from django.db import migrations
| 18.611111 | 52 | 0.6 |
d1b9c4876479aca8264e006843673c2f7fdb84e7 | 8,530 | py | Python | app.py | tpirate/SchoolPortal | ca4c7739126b0223b360de5b4927b3a8c9bda218 | [
"MIT"
] | 1 | 2020-12-07T07:40:33.000Z | 2020-12-07T07:40:33.000Z | app.py | tpirate/SchoolPortal | ca4c7739126b0223b360de5b4927b3a8c9bda218 | [
"MIT"
] | null | null | null | app.py | tpirate/SchoolPortal | ca4c7739126b0223b360de5b4927b3a8c9bda218 | [
"MIT"
] | null | null | null | # Imports
from flask import Flask, render_template, session, redirect, request, flash, url_for, abort
from flask_session import Session
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from cs50 import SQL
import os
import markdown
from time import sleep
from new import login_required
# App Config
app = Flask(__name__)
db = SQL("sqlite:///school.db")
app.debug = True
app.secret_key = b'\xb3\xaaS\xdf\xc0\x1fBc\x9b\x84\x9a\xfaCd\xc3\xd9'
app.static_folder = 'static'
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Session Config
app.config['SESSION_TYPE'] = 'filesystem'
Session(app)
app.config.from_object(__name__)
# Routes
if __name__ == "__main__":
app.run(debug=True)
| 40.813397 | 252 | 0.62966 |
d1bae04489d859e76db95e43fda5086ae3c123aa | 621 | py | Python | tests/test_rot13.py | onlykood/pycipher | 8f1d7cf3cba4e12171e27d9ce723ad890194de19 | [
"MIT"
] | 196 | 2015-01-16T19:09:19.000Z | 2022-03-13T16:19:21.000Z | tests/test_rot13.py | rafaelmessias/pycipher | 787eb947a173138869ddd388b5331559e5cd3a5a | [
"MIT"
] | 9 | 2015-10-09T18:07:32.000Z | 2021-12-22T12:04:00.000Z | tests/test_rot13.py | rafaelmessias/pycipher | 787eb947a173138869ddd388b5331559e5cd3a5a | [
"MIT"
] | 76 | 2015-02-08T23:17:43.000Z | 2021-12-27T04:15:30.000Z | from pycipher import Rot13
import unittest
| 36.529412 | 74 | 0.723027 |
d1bd24d999e751c41059b172e51cf7f0d7f8e82a | 246 | py | Python | tests/functional_tests/identity_tests/test_definition.py | lycantropos/lz | 632baaffc1c62cd644f6e67f0bcd7971ae6580da | [
"MIT"
] | 7 | 2019-05-26T15:30:03.000Z | 2022-03-07T16:00:31.000Z | tests/functional_tests/identity_tests/test_definition.py | lycantropos/lz | 632baaffc1c62cd644f6e67f0bcd7971ae6580da | [
"MIT"
] | 29 | 2018-11-12T11:45:56.000Z | 2021-05-04T17:24:45.000Z | tests/functional_tests/identity_tests/test_definition.py | lycantropos/lz | 632baaffc1c62cd644f6e67f0bcd7971ae6580da | [
"MIT"
] | null | null | null | from typing import Any
from hypothesis import given
from lz.functional import identity
from tests import strategies
| 17.571429 | 37 | 0.772358 |
d1bd46a35b1176540180e5d836f7a6d20314a7dc | 3,703 | py | Python | lib/cogs/reactionpolls.py | pille1842/gerfroniabot | 291dc8f3cf9fb00f3f5e89e36b066660a410026f | [
"MIT"
] | null | null | null | lib/cogs/reactionpolls.py | pille1842/gerfroniabot | 291dc8f3cf9fb00f3f5e89e36b066660a410026f | [
"MIT"
] | null | null | null | lib/cogs/reactionpolls.py | pille1842/gerfroniabot | 291dc8f3cf9fb00f3f5e89e36b066660a410026f | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from discord import Embed
from discord.ext.commands import Cog
from discord.ext.commands import command
import logging
| 44.614458 | 163 | 0.654604 |
d1be71acaff6d8c302bc2e4dd7fae486925372c6 | 5,975 | py | Python | scripts/visualize_image_dataset.py | Sergio5714/pybf | bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc | [
"Apache-2.0"
] | 1 | 2021-11-02T09:54:41.000Z | 2021-11-02T09:54:41.000Z | scripts/visualize_image_dataset.py | Sergio5714/pybf | bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc | [
"Apache-2.0"
] | null | null | null | scripts/visualize_image_dataset.py | Sergio5714/pybf | bf56b353cd715c1bdb16d6cbb79aef44e3ef49bc | [
"Apache-2.0"
] | 2 | 2020-04-17T10:50:06.000Z | 2021-11-02T09:54:47.000Z | """
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Sergei Vostrikov, ETH Zurich
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Basic libraries
import argparse
import numpy as np
import sys
from os.path import dirname, abspath
from pybf.pybf.io_interfaces import ImageLoader
from pybf.pybf.visualization import plot_image
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--path_to_img_dataset',
type=str,
default='',
help='Path to the image dataset file.')
# Parameters for visualization
parser.add_argument(
'--save_visualized_images',
type=str2bool,
nargs='?',
const=True,
default=False,
help='Flag to save visualized images.')
parser.add_argument(
'--frames_to_plot',
type=int,
nargs="+",
default=None,
help='Space separated list of frames to plot.\
"[]" - plot all frames. "None" - plot none.')
parser.add_argument(
'--low_res_img_to_plot',
type=int,
nargs="+",
default=None,
help='Space separated list of low resolution images to plot.\
"[]" - plot all frames. "None" - plot none.')
parser.add_argument(
'--db_range',
type=float,
default=None,
help='Decibels range for log compression of images ')
FLAGS, unparsed = parser.parse_known_args()
# Run main function
visualize_image_dataset(FLAGS.path_to_img_dataset,
FLAGS.save_visualized_images,
FLAGS.frames_to_plot,
FLAGS.low_res_img_to_plot,
FLAGS.db_range) | 33.757062 | 75 | 0.59364 |
d1bef0f9641ed3b8503a1d2834c347e28d936599 | 4,725 | py | Python | tests/ut/python/dataset/test_datasets_get_dataset_size.py | unseenme/mindspore | 4ba052f0cd9146ac0ccc4880a778706f1b2d0af8 | [
"Apache-2.0"
] | 7 | 2020-05-24T03:19:26.000Z | 2020-05-24T03:20:00.000Z | tests/ut/python/dataset/test_datasets_get_dataset_size.py | liyong126/mindspore | 930a1fb0a8fa9432025442c4f4732058bb7af592 | [
"Apache-2.0"
] | 7 | 2020-03-30T08:31:56.000Z | 2020-04-01T09:54:39.000Z | tests/ut/python/dataset/test_datasets_get_dataset_size.py | liyong126/mindspore | 930a1fb0a8fa9432025442c4f4732058bb7af592 | [
"Apache-2.0"
] | 1 | 2020-03-30T17:07:43.000Z | 2020-03-30T17:07:43.000Z | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import mindspore.dataset as ds
IMAGENET_RAWDATA_DIR = "../data/dataset/testImageNetData2/train"
IMAGENET_TFFILE_DIR = ["../data/dataset/test_tf_file_3_images2/train-0000-of-0001.data",
"../data/dataset/test_tf_file_3_images2/train-0000-of-0002.data",
"../data/dataset/test_tf_file_3_images2/train-0000-of-0003.data",
"../data/dataset/test_tf_file_3_images2/train-0000-of-0004.data"]
MNIST_DATA_DIR = "../data/dataset/testMnistData"
MANIFEST_DATA_FILE = "../data/dataset/testManifestData/test.manifest"
CIFAR10_DATA_DIR = "../data/dataset/testCifar10Data"
CIFAR100_DATA_DIR = "../data/dataset/testCifar100Data"
| 41.447368 | 90 | 0.748995 |
d1bfe6581b046ee9479ce7089c84c5e5bea00961 | 4,651 | py | Python | tobiko/shell/iperf/_interface.py | FedericoRessi/tobiko | 188825386dc30197a37b7fe8be03318c73abbc48 | [
"Apache-2.0"
] | 1 | 2022-01-11T20:50:06.000Z | 2022-01-11T20:50:06.000Z | tobiko/shell/iperf/_interface.py | FedericoRessi/tobiko | 188825386dc30197a37b7fe8be03318c73abbc48 | [
"Apache-2.0"
] | null | null | null | tobiko/shell/iperf/_interface.py | FedericoRessi/tobiko | 188825386dc30197a37b7fe8be03318c73abbc48 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 Red Hat, Inc.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from oslo_log import log
import tobiko
from tobiko.shell import sh
LOG = log.getLogger(__name__)
| 31.425676 | 78 | 0.641797 |
d1c1735ef2cb4649ea44c8972cfcfb01cf792d82 | 512 | py | Python | Tensorflow_official/cnn/.ipynb_checkpoints/test.py | starkidstory/OmegaTensor | 2a80d38236a7ce6d6460be59528b33227d98b93b | [
"MIT"
] | 2 | 2020-04-07T03:01:03.000Z | 2020-04-16T14:33:21.000Z | Tensorflow_official/cnn/.ipynb_checkpoints/test.py | starkidstory/OmegaTensor | 2a80d38236a7ce6d6460be59528b33227d98b93b | [
"MIT"
] | null | null | null | Tensorflow_official/cnn/.ipynb_checkpoints/test.py | starkidstory/OmegaTensor | 2a80d38236a7ce6d6460be59528b33227d98b93b | [
"MIT"
] | null | null | null | import tensorflow as tf
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#print(np.version.version)
#np.set_printoptions(precision=4)
dataset=tf.data.Dataset.from_tensor_slices([8,3,0,8,2,1])
num=np.arange(5)
numT=tf.convert_to_tensor(num)
numF=tf.cast(numT,dtype=tf.float32)
print(numT)
print(numF)
print(dataset)
mat=tf.convert_to_tensor(np.zeros([3,3]))
print(mat)
small_list=tf.convert_to_tensor([1,2,3],dtype=tf.float64)
print(small_list)
print(np.random.randint(0,5)) | 24.380952 | 57 | 0.785156 |
d1c367c3ef3880ac8d8c1957203e2309d51cb673 | 315 | py | Python | 378.py | geethakamath18/Leetcode | 8e55e0a47ee35ed100b30dda6682c7ce1033d4b2 | [
"MIT"
] | null | null | null | 378.py | geethakamath18/Leetcode | 8e55e0a47ee35ed100b30dda6682c7ce1033d4b2 | [
"MIT"
] | null | null | null | 378.py | geethakamath18/Leetcode | 8e55e0a47ee35ed100b30dda6682c7ce1033d4b2 | [
"MIT"
] | null | null | null | #LeetCode problem 378: Kth Smallest Element in a Sorted Matrix | 39.375 | 66 | 0.584127 |
d1c48b5c347d67ffd046cdeddd8592dec9342813 | 1,926 | py | Python | tests/unit_tests/crud/test_base.py | Georgi2704/vidpit-authentication | 521505a3b4e77db512227688558dab5605822626 | [
"Apache-2.0"
] | 16 | 2021-06-08T05:47:59.000Z | 2022-01-29T22:39:16.000Z | tests/unit_tests/crud/test_base.py | Georgi2704/vidpit-authentication | 521505a3b4e77db512227688558dab5605822626 | [
"Apache-2.0"
] | 6 | 2021-09-30T14:45:37.000Z | 2022-01-28T13:27:00.000Z | tests/unit_tests/crud/test_base.py | Georgi2704/vidpit-authentication | 521505a3b4e77db512227688558dab5605822626 | [
"Apache-2.0"
] | 3 | 2021-06-24T14:13:36.000Z | 2022-01-29T22:39:32.000Z | from server import crud
| 39.306122 | 114 | 0.711838 |
d1c51e9ef39f4d3feeb1e7c57ea1abdeb37eef20 | 18,815 | py | Python | src/data/SatelitteSolarPowerSystemV4.py | j1996/EPM_UCC | cf2218c7681966963a179aea043328a2343f92fb | [
"MIT"
] | null | null | null | src/data/SatelitteSolarPowerSystemV4.py | j1996/EPM_UCC | cf2218c7681966963a179aea043328a2343f92fb | [
"MIT"
] | null | null | null | src/data/SatelitteSolarPowerSystemV4.py | j1996/EPM_UCC | cf2218c7681966963a179aea043328a2343f92fb | [
"MIT"
] | null | null | null | import numpy as np
import trimesh
try:
from Satellite_Panel_Solar import Panel_Solar
from SatelitteActitud import SatelitteActitud
except:
from src.data.Satellite_Panel_Solar import Panel_Solar
from src.data.SatelitteActitud import SatelitteActitud
# noinspection SpellCheckingInspection
"""Satelitte Solar Power System
Es una clase donde se incluye todo el sistema de potencia del satelitte, permite incluir un modelo en CAD
y realizar su analisis de potencia dependiento de un vector utilizado como la direccion del sol hacia el satelite
Example:
Para llamar a esta clase solo hace falta, una linea como la de abajo:
$ Sat = SatelitteSolarPowerSystem(direccion='models/12U.stl')
Esta clase tiene varios atributos incluidos como la caracteristica de cada panel solar
para ello solo se necesita llamar a la clase con:
$ from Satellite_Panel_Solar import Panel_Solar
$ Sat.caracteristicas_panel_solar=[Panel_solar()]
Para ver como se configura cada Panel_solar hay que remitirse a su documentacion
Finalmente notar que el atributo mesh incluye todos aquellos del paquete trimesh
"""
if __name__ == '__main__':
filename = '12Unuv.stl'
actitud = SatelitteActitud(eje_de_spin=[0, 1, 0], control=True)
d = SatelitteSolarPowerSystem(
filename, actitud, Despegables_orientables=True)
d.apply_transform(trimesh.transformations.rotation_matrix(
np.pi/2, [0, 1, 0], [0, 0, 0]))
Sun_vector = np.array([-0.10486044, 0.91244007, 0.39554696])
print(d.mesh.facets_normal)
W, area_potencia, ang, angulo_giro = d.power_panel_con_actitud(
Sun_vector, 1)
| 39.116424 | 160 | 0.557906 |
d1c5a27d9a9982b68feaf9dde43f823499bf62ab | 744 | py | Python | odoo-13.0/addons/l10n_in/models/res_partner.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/l10n_in/models/res_partner.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | odoo-13.0/addons/l10n_in/models/res_partner.py | VaibhavBhujade/Blockchain-ERP-interoperability | b5190a037fb6615386f7cbad024d51b0abd4ba03 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
| 41.333333 | 131 | 0.706989 |
d1c5cfefd7363489dfaa63b0a0dd5fcfd287ee0f | 5,037 | py | Python | Corrfunc/bases.py | dfm/suave | 51c192f450821d9ebb0f3e7eef7461dfb1b2af5f | [
"MIT"
] | 7 | 2021-03-03T15:44:35.000Z | 2021-03-21T09:01:12.000Z | Corrfunc/bases.py | dfm/suave | 51c192f450821d9ebb0f3e7eef7461dfb1b2af5f | [
"MIT"
] | 3 | 2020-07-17T01:06:48.000Z | 2021-01-20T02:59:26.000Z | Corrfunc/bases.py | dfm/suave | 51c192f450821d9ebb0f3e7eef7461dfb1b2af5f | [
"MIT"
] | 2 | 2021-03-20T00:47:51.000Z | 2021-03-21T09:01:03.000Z | import numpy as np
from scipy.interpolate import BSpline
from colossus.cosmology import cosmology
"""
Helper routines for basis functions for the continuous-function estimator.
"""
################
# Spline basis #
################
def spline_bases(rmin, rmax, projfn, ncomponents, ncont=2000, order=3):
'''
Compute a set of spline basis functions for the given order.
Parameters
----------
rmin : double
Minimum r-value for basis functions
rmax : double
Maximum r-value for basis functions
projfn : string, default=None
Path to projection file if necessary
ncomponents : int
Number of components (basis functions)
ncont : int, default=2000
Number of continuous r-values at which to write the basis function file
order : int, default=3
Order of spline to use; default is cubic spline
Returns
-------
bases: array-like, double
2-d array of basis function values; first column is r-values
'''
if ncomponents<order*2:
raise ValueError("ncomponents must be at least twice the order")
kvs = _get_knot_vectors(rmin, rmax, ncomponents, order)
rcont = np.linspace(rmin, rmax, ncont)
bases = np.empty((ncont, ncomponents+1))
bases[:,0] = rcont
for n in range(ncomponents):
kv = kvs[n]
b = BSpline.basis_element(kv)
bases[:,n+1] = [b(r) if kv[0]<=r<=kv[-1] else 0 for r in rcont]
np.savetxt(projfn, bases)
return bases
#############
# BAO basis #
#############
def bao_bases(rmin, rmax, projfn, cosmo_base=None, ncont=2000,
redshift=0.0, alpha_guess=1.0, dalpha=0.001, bias=1.0,
k0=0.1, k1=10.0, k2=0.1, k3=0.001):
'''
Compute the 5-component BAO basis functions based on a cosmological model and
linearized around the scale dilation parameter alpha.
Parameters
----------
rmin : double
Minimum r-value for basis functions
rmax : double
Maximum r-value for basis functions
projfn : string, default=None
Path to projection file if necessary
cosmo_base : nbodykit cosmology object, default=nbodykit.cosmology.Planck15
Cosmology object for the BAO model.
ncont : int, default=2000
Number of continuous r-values at which to write the basis function file
redshift : double, default=0.0
Redshift at which to compute power spectrum
alpha_guess : double, default=1.0
The alpha (scale dilation parameter) at which to compute the model (alpha=1.0 is no scale shift)
dalpha : double, default=0.001
The change in alpha (scale dilation parameter) used to calculate the numerical partial derivative
bias : double, default=1.0
The bias parameter by which to scale the model amplitude (bias=1.0 indicates no bias)
k0 : double, default=0.1
The initial magnitude of the derivative term
k1 : double, default=1.0
The initial magnitude of the s^2 nuisance parameter term
k2 : double, default=0.1
The initial magnitude of the s nuisance parameter term
k3 : double, default=0.001
The initial magnitude of the constant nuisance parameter term
Returns
-------
bases: array-like, double
2-d array of basis function values; first column is r-values
'''
if cosmo_base is None:
print("cosmo_base not provided, defaulting to Planck 2015 cosmology ('planck15')")
cosmo_base = cosmology.setCosmology('planck15')
cf = cosmo_base.correlationFunction
rcont = np.linspace(rmin, rmax, ncont)
bs = _get_bao_components(rcont, cf_model, dalpha, alpha_guess, k0=k0, k1=k1, k2=k2, k3=k3)
nbases = len(bs)
bases = np.empty((ncont, nbases+1))
bases[:,0] = rcont
bases[:,1:nbases+1] = np.array(bs).T
np.savetxt(projfn, bases)
ncomponents = bases.shape[1]-1
return bases
| 29.115607 | 120 | 0.638674 |