hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
6e4cf85303623618f7fb5038daec890f74903ee3
2,641
py
Python
ILSpy.ConvertedToPython/TreeNodes/Analyzer/AnalyzedTypeExtensionMethodsTreeNode.py
exyi/ILSpy
17ddfa01ff4915c4ca8461c56fb7d04d25fc591e
[ "MIT" ]
1
2021-04-26T19:46:09.000Z
2021-04-26T19:46:09.000Z
ILSpy.ConvertedToPython/TreeNodes/Analyzer/AnalyzedTypeExtensionMethodsTreeNode.py
exyi/ILSpy
17ddfa01ff4915c4ca8461c56fb7d04d25fc591e
[ "MIT" ]
null
null
null
ILSpy.ConvertedToPython/TreeNodes/Analyzer/AnalyzedTypeExtensionMethodsTreeNode.py
exyi/ILSpy
17ddfa01ff4915c4ca8461c56fb7d04d25fc591e
[ "MIT" ]
null
null
null
# Copyright (c) 2011 AlphaSierraPapa for the SharpDevelop Team # # Permission is hereby granted, free of charge, to any person obtaining a copy of this # software and associated documentation files (the "Software"), to deal in the Software # without restriction, including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons # to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or # substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, # INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE # FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from System import * from System.Collections.Generic import * from System.Linq import * from System.Threading import * from Mono.Cecil import *
41.920635
99
0.775842
6e4d8cf0e65920064f1566f74432415d41b6c22a
3,187
py
Python
src/detector.py
omelchert/LEPM_1DFD
aa5ba78e557cdcd0a10a16065c0f17119f51ab78
[ "BSD-3-Clause" ]
null
null
null
src/detector.py
omelchert/LEPM_1DFD
aa5ba78e557cdcd0a10a16065c0f17119f51ab78
[ "BSD-3-Clause" ]
null
null
null
src/detector.py
omelchert/LEPM_1DFD
aa5ba78e557cdcd0a10a16065c0f17119f51ab78
[ "BSD-3-Clause" ]
null
null
null
import sys import numpy as np # EOF: detector.py
32.85567
81
0.569187
6e4dee90bdd936152cb862e03942c4be61d9a3e5
249
py
Python
2.datatype/1.number_typecasting.py
Tazri/Python
f7ca625800229c8a7e20b64810d6e162ccb6b09f
[ "DOC" ]
null
null
null
2.datatype/1.number_typecasting.py
Tazri/Python
f7ca625800229c8a7e20b64810d6e162ccb6b09f
[ "DOC" ]
null
null
null
2.datatype/1.number_typecasting.py
Tazri/Python
f7ca625800229c8a7e20b64810d6e162ccb6b09f
[ "DOC" ]
null
null
null
number_int = int("32"); number_float= float(32); number_complex = complex(3222342332432435435345324435324523423); print(type(number_int),": ",number_int); print(type(number_float),": ",number_float); print(type(number_complex),": ",number_complex);
35.571429
64
0.767068
6e50868796b7b8940a4c3451e490a815d749a818
338
py
Python
tests/test_kitty.py
raphaelavergud/CatApp
cf0a68e2c78307684e167a748e72e068c25a6089
[ "MIT" ]
null
null
null
tests/test_kitty.py
raphaelavergud/CatApp
cf0a68e2c78307684e167a748e72e068c25a6089
[ "MIT" ]
null
null
null
tests/test_kitty.py
raphaelavergud/CatApp
cf0a68e2c78307684e167a748e72e068c25a6089
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import unittest import kitty from kitty import uncomfy_checker import mock if __name__ == '__main__': unittest.main()
22.533333
69
0.736686
6e52ebb7ea5d298c9a93f221c29b566a183e81fe
251
py
Python
latest/modules/physics/control/control_plots-5.py
sympy/sympy_doc
ec4f28eed09d5acb9e55874e82cc86c74e762c0d
[ "BSD-3-Clause" ]
20
2015-01-28T01:08:13.000Z
2021-12-19T04:03:28.000Z
latest/modules/physics/control/control_plots-5.py
sympy/sympy_doc
ec4f28eed09d5acb9e55874e82cc86c74e762c0d
[ "BSD-3-Clause" ]
31
2015-01-27T07:16:19.000Z
2021-11-15T10:58:15.000Z
latest/modules/physics/control/control_plots-5.py
sympy/sympy_doc
ec4f28eed09d5acb9e55874e82cc86c74e762c0d
[ "BSD-3-Clause" ]
38
2015-01-08T18:48:27.000Z
2021-12-02T13:19:43.000Z
from sympy.abc import s from sympy.physics.control.lti import TransferFunction from sympy.physics.control.control_plots import ramp_response_plot tf1 = TransferFunction(s, (s+4)*(s+8), s) ramp_response_plot(tf1, upper_limit=2) # doctest: +SKIP
41.833333
67
0.780876
6e52fb33dd28eee7b106bc48ba5c34f08261ca0b
2,309
py
Python
src/pynorare/__main__.py
concepticon/pynorare
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
[ "MIT" ]
null
null
null
src/pynorare/__main__.py
concepticon/pynorare
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
[ "MIT" ]
5
2020-07-20T11:05:07.000Z
2022-03-11T15:51:52.000Z
src/pynorare/__main__.py
concepticon/pynorare
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
[ "MIT" ]
null
null
null
""" Main command line interface to the pynorare package. """ import sys import pathlib import contextlib from cldfcatalog import Config, Catalog from clldutils.clilib import register_subcommands, get_parser_and_subparsers, ParserError, PathType from clldutils.loglib import Logging from pyconcepticon import Concepticon from pynorare import NoRaRe import pynorare.commands if __name__ == '__main__': # pragma: no cover sys.exit(main() or 0)
32.985714
99
0.644435
6e535cb6e52945115eb6d7ac8b6103b52efc86b8
92
py
Python
app_kasir/apps.py
rizkyarwn/projectkasir
6524a052bcb52534524db1c5fba05d31a0f0d801
[ "MIT" ]
2
2018-06-28T10:52:47.000Z
2018-06-28T10:52:48.000Z
app_kasir/apps.py
rizkyarwn/projectkasir
6524a052bcb52534524db1c5fba05d31a0f0d801
[ "MIT" ]
null
null
null
app_kasir/apps.py
rizkyarwn/projectkasir
6524a052bcb52534524db1c5fba05d31a0f0d801
[ "MIT" ]
null
null
null
from django.apps import AppConfig
15.333333
33
0.76087
6e536b50d4b1d1ed9120b0881d839d4c283289b4
2,472
py
Python
evaluator.py
kavinyao/SKBPR
305aeb846ee89234d8eae3b73452c2fdad2496b4
[ "MIT" ]
null
null
null
evaluator.py
kavinyao/SKBPR
305aeb846ee89234d8eae3b73452c2fdad2496b4
[ "MIT" ]
null
null
null
evaluator.py
kavinyao/SKBPR
305aeb846ee89234d8eae3b73452c2fdad2496b4
[ "MIT" ]
1
2018-09-29T08:31:40.000Z
2018-09-29T08:31:40.000Z
""" Evaluate recommendations. """ import config from collections import defaultdict
35.314286
94
0.640372
6e53df58b8e50b1065505ed5b573aa01243270d1
12,263
py
Python
yolov3_deepsort.py
h-enes-simsek/deep_sort_pytorch
0a9ede55e53355c19455197cc8daa60336c652bb
[ "MIT" ]
1
2021-02-28T15:22:43.000Z
2021-02-28T15:22:43.000Z
yolov3_deepsort.py
h-enes-simsek/deep_sort_pytorch
0a9ede55e53355c19455197cc8daa60336c652bb
[ "MIT" ]
null
null
null
yolov3_deepsort.py
h-enes-simsek/deep_sort_pytorch
0a9ede55e53355c19455197cc8daa60336c652bb
[ "MIT" ]
null
null
null
import os import cv2 import time import argparse import torch import warnings import numpy as np from detector import build_detector from deep_sort import build_tracker from utils.draw import draw_boxes from utils.parser import get_config from utils.log import get_logger from utils.io import write_results from numpy import loadtxt #gt.txt yi almak iin if __name__ == "__main__": args = parse_args() cfg = get_config() cfg.merge_from_file(args.config_detection) cfg.merge_from_file(args.config_deepsort) with VideoTracker(cfg, args, video_path=args.VIDEO_PATH) as vdo_trk: vdo_trk.run()
42.432526
149
0.572698
280906641aae735ca1d3dbc649fdb86d59c81472
1,172
py
Python
aerosandbox/numpy/array.py
askprash/AeroSandbox
9e82966a25ced9ce96ca29bae45a4420278f0f1d
[ "MIT" ]
null
null
null
aerosandbox/numpy/array.py
askprash/AeroSandbox
9e82966a25ced9ce96ca29bae45a4420278f0f1d
[ "MIT" ]
null
null
null
aerosandbox/numpy/array.py
askprash/AeroSandbox
9e82966a25ced9ce96ca29bae45a4420278f0f1d
[ "MIT" ]
1
2021-09-11T03:28:45.000Z
2021-09-11T03:28:45.000Z
import numpy as onp import casadi as cas def length(array) -> int: """ Returns the length of an 1D-array-like object. Args: array: Returns: """ try: return len(array) except TypeError: # array has no function len() -> either float, int, or CasADi type try: if len(array.shape) >= 1: return array.shape[0] else: raise AttributeError except AttributeError: # array has no attribute shape -> either float or int return 1
25.478261
89
0.529863
280a3ff7069c05f2fa4cfad162456023976a914d
181
py
Python
states/__init__.py
EemeliSyynimaa/Pore
1eca9aa7163f1d31ae84c862790693eb3c904433
[ "MIT" ]
null
null
null
states/__init__.py
EemeliSyynimaa/Pore
1eca9aa7163f1d31ae84c862790693eb3c904433
[ "MIT" ]
null
null
null
states/__init__.py
EemeliSyynimaa/Pore
1eca9aa7163f1d31ae84c862790693eb3c904433
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- __author__ = 'eeneku' from main_menu import MainMenu from world_map import WorldMap from local_map import LocalMap __all__ = [MainMenu, WorldMap, LocalMap]
22.625
40
0.762431
280b4458ea5ac2ac7597da9b972198f9e0db4a04
2,502
py
Python
instagram/migrations/0001_initial.py
Maxwel5/photo-app-instagram
8635346a5115dcc7e282791bd646f0a7f9dd2917
[ "MIT" ]
null
null
null
instagram/migrations/0001_initial.py
Maxwel5/photo-app-instagram
8635346a5115dcc7e282791bd646f0a7f9dd2917
[ "MIT" ]
8
2020-06-06T00:09:24.000Z
2022-02-10T10:48:10.000Z
instagram/migrations/0001_initial.py
Maxwel5/photo-app-instagram
8635346a5115dcc7e282791bd646f0a7f9dd2917
[ "MIT" ]
null
null
null
# Generated by Django 2.2.5 on 2019-10-22 07:03 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django.utils.timezone
41.7
154
0.584333
280b7ce2e2cb3f65d56ba5e4705455b1cbb3bb0e
3,283
py
Python
capspayment/api_payin.py
agorapay/python-sdk
c5b7fd6894f95e6862446248b26c16253c8fd4f4
[ "MIT" ]
null
null
null
capspayment/api_payin.py
agorapay/python-sdk
c5b7fd6894f95e6862446248b26c16253c8fd4f4
[ "MIT" ]
null
null
null
capspayment/api_payin.py
agorapay/python-sdk
c5b7fd6894f95e6862446248b26c16253c8fd4f4
[ "MIT" ]
null
null
null
""" Payin API """ from dataclasses import dataclass from typing import Union from api_payin_model import ( PayinAdjustPaymentRequest, PayinCancelRequest, PayinCancelResponse, PayinCaptureRequest, PayinCaptureResponse, PayinMandateRequest, PayinMandateResponse, PayinOrderDetailsRequest, PayinOrderDetailsResponse, PayinPaymentDetailsRequest, PayinPaymentDetailsResponse, PayinPaymentIframeRequest, PayinPaymentIframeResponse, PayinPaymentMethodsRequest, PayinPaymentMethodsResponse, PayinPaymentRequest, PayinPaymentResponse, PayinRefundRequest, PayinRefundResponse, PayinTicketRequest, PayinTicketResponse, ) from base import BaseRequest from model import Response
32.186275
82
0.687786
280b8063834de2658f477b63373426eabdf7a4f6
5,516
py
Python
tests/test_api.py
HealthByRo/fdadb
e020a902ca20cebd5999bc2dbc530375ab0922fb
[ "MIT" ]
1
2020-06-11T04:44:22.000Z
2020-06-11T04:44:22.000Z
tests/test_api.py
HealthByRo/fdadb
e020a902ca20cebd5999bc2dbc530375ab0922fb
[ "MIT" ]
8
2018-11-26T09:22:14.000Z
2019-10-23T13:17:44.000Z
tests/test_api.py
HealthByRo/fdadb
e020a902ca20cebd5999bc2dbc530375ab0922fb
[ "MIT" ]
null
null
null
from rest_framework.reverse import reverse from rest_framework.test import APITestCase from fdadb.models import MedicationName, MedicationNDC, MedicationStrength
45.966667
120
0.583575
280c4e3ff6e2c8be5af4beb5882bf9b9cd5ee1c7
3,626
py
Python
script/gen_canonical_combining_class.py
CyberZHG/UChar
e59ee5e3ad166288380407df6d5e6c0fe20681cf
[ "MIT" ]
1
2020-07-15T16:16:20.000Z
2020-07-15T16:16:20.000Z
script/gen_canonical_combining_class.py
CyberZHG/UChar
e59ee5e3ad166288380407df6d5e6c0fe20681cf
[ "MIT" ]
null
null
null
script/gen_canonical_combining_class.py
CyberZHG/UChar
e59ee5e3ad166288380407df6d5e6c0fe20681cf
[ "MIT" ]
1
2020-06-01T01:15:29.000Z
2020-06-01T01:15:29.000Z
#!/usr/bin/env python """ Copyright 2020 Zhao HG Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ with open('UnicodeData.txt', 'r') as reader: last, indices, canonicals, classes = '', [], [], {} for line in reader: parts = line.strip().split(';') if parts[3] != last: last = parts[3] indices.append(parts[0]) canonicals.append(parts[3]) classes[parts[3]] = parts[0] with open('include/unicode_data.h', 'a') as writer: writer.write('/** The total number of indices used to store the canonical combing class. */\n') writer.write('const int32_t CANONICAL_COMBINING_NUM = {};\n'.format(len(indices))) writer.write('/** The indices of the first character that have a different type. */\n') writer.write('extern const int32_t CANONICAL_COMBINING_INDEX[];\n') writer.write('/** The canonical combining class data. */\n') writer.write('extern const int32_t CANONICAL_COMBINING_CLASS[];\n\n') with open('src/canonical_combining_class.cpp', 'w') as writer: with open('copyright.txt', 'r') as reader: writer.write(reader.read()) writer.write('#include "unicode_data.h"\n\n') writer.write('namespace unicode {\n\n') writer.write('\nconst int32_t CANONICAL_COMBINING_INDEX[] = {') for i, index in enumerate(indices): if i == 0: writer.write('\n ') elif i % 8 == 0: writer.write(',\n ') else: writer.write(', ') writer.write('0x' + index) writer.write('\n};\n') writer.write('\nconst int32_t CANONICAL_COMBINING_CLASS[] = {') for i, canonical in enumerate(canonicals): if i == 0: writer.write('\n ') elif i % 8 == 0: writer.write(',\n ') else: writer.write(', ') writer.write(canonical) writer.write('\n};\n\n') writer.write('} // namespace unicode\n') with open('tests/test_canonical_combining_class_gen.cpp', 'w') as writer: with open('copyright.txt', 'r') as reader: writer.write(reader.read()) writer.write('#include "test.h"\n') writer.write('#include "unicode_char.h"\n\n') writer.write('namespace test {\n\n') writer.write('class CanonicalCombiningClassGenTest : public UnitTest {};\n\n') writer.write('__TEST_U(CanonicalCombiningClassGenTest, test_classes) {\n') for canonical, code in classes.items(): writer.write(' __ASSERT_EQ({}, unicode::getCanonicalCombiningClass({}));\n'.format( canonical, '0x' + code )) writer.write('}\n\n') writer.write('} // namespace test\n')
40.741573
99
0.660232
280cef3837d316af797287a2c5c707f3a00a10c1
3,676
py
Python
server.py
Timothylock/twillio-buzzer-connector
9ac7e4763a5eee7d04daa054841e17332c0bac13
[ "Apache-2.0" ]
null
null
null
server.py
Timothylock/twillio-buzzer-connector
9ac7e4763a5eee7d04daa054841e17332c0bac13
[ "Apache-2.0" ]
null
null
null
server.py
Timothylock/twillio-buzzer-connector
9ac7e4763a5eee7d04daa054841e17332c0bac13
[ "Apache-2.0" ]
null
null
null
from flask import Flask, request from twilio.twiml.voice_response import VoiceResponse, Gather import datetime import os import json import http.client app = Flask(__name__) allowUntil = datetime.datetime.now() # Fetch env vars whitelisted_numbers = os.environ['WHITELISTED_NUMBERS'].split(",") # Numbers allowed to dial into the system forward_number = os.environ['FORWARD_NUMBER'] # Number that will be forwarded to if not whitelisted forward_number_from = os.environ['FORWARD_NUMBER_FROM'] # Number that will be forwarded to if not whitelisted buzzcode = os.environ['BUZZCODE'] # Digits to dial to let them in minutes = int(os.environ['MINUTES']) # Number of minutes to unlock the system slack_path = os.environ['SLACK_PATH'] # Slack path for slack message say_message = os.environ['SAY_MESSAGE'] # The message to be said to the dialer # Buzzer ########################################################################## def allowed_to_buzz(): """Fetches whether the system is allowed to buzz somebody in""" global allowUntil return allowUntil > datetime.datetime.now() def send_message(message): try: conn = http.client.HTTPSConnection("hooks.slack.com") payload = "{\"text\": \"" + message + "\"}" headers = { 'content-type': "application/json", } conn.request("POST", slack_path, payload, headers) conn.getresponse() except: print("error sending message") if __name__ == "__main__": app.run(host='0.0.0.0', port=8080)
33.418182
121
0.639554
280f1650f5bc3fd7a59f3f2ae253341d13e12350
5,738
py
Python
App/GUI_Pages/LoginPage.py
TUIASI-AC-enaki/Shopping_Application
d6c6f446618937347f9c78fe3b969bc2c2ef9331
[ "Apache-2.0" ]
null
null
null
App/GUI_Pages/LoginPage.py
TUIASI-AC-enaki/Shopping_Application
d6c6f446618937347f9c78fe3b969bc2c2ef9331
[ "Apache-2.0" ]
null
null
null
App/GUI_Pages/LoginPage.py
TUIASI-AC-enaki/Shopping_Application
d6c6f446618937347f9c78fe3b969bc2c2ef9331
[ "Apache-2.0" ]
null
null
null
import tkinter as tk from tkinter import font as tkfont, ttk import logging as log import sys from cx_Oracle import DatabaseError from GUI_Pages.BasicPage import TitlePage from Utilities.Cipher import Cipher, get_hash FORMAT = '[%(asctime)s] [%(levelname)s] : %(message)s' log.basicConfig(stream=sys.stdout, level=log.DEBUG, format=FORMAT)
50.778761
140
0.682468
2810be0978f433319136f58db93ce028bbbb9a9c
8,151
py
Python
cosmos/ingestion/ingest/process/hierarchy_extractor/bert_hierarchy_extractor/train/bert_extractor_trainer.py
ilmcconnell/Cosmos
84245034727c30e20ffddee9e02c7e96f3aa115e
[ "Apache-2.0" ]
30
2019-03-14T08:24:34.000Z
2022-03-09T06:05:44.000Z
cosmos/ingestion/ingest/process/hierarchy_extractor/bert_hierarchy_extractor/train/bert_extractor_trainer.py
ilmcconnell/Cosmos
84245034727c30e20ffddee9e02c7e96f3aa115e
[ "Apache-2.0" ]
78
2019-02-07T22:14:48.000Z
2022-03-09T05:59:18.000Z
cosmos/ingestion/ingest/process/hierarchy_extractor/bert_hierarchy_extractor/train/bert_extractor_trainer.py
ilmcconnell/Cosmos
84245034727c30e20ffddee9e02c7e96f3aa115e
[ "Apache-2.0" ]
11
2019-03-02T01:20:06.000Z
2022-03-25T07:25:46.000Z
from bert_hierarchy_extractor.datasets.train_dataset import TrainHierarchyExtractionDataset from bert_hierarchy_extractor.datasets.utils import cudafy from bert_hierarchy_extractor.logging.utils import log_metrics import numpy as np from torch.utils.data import DataLoader from transformers import AdamW, get_linear_schedule_with_warmup import torch import time from tqdm import tqdm from comet_ml import Experiment
39.567961
104
0.585818
2811f691c9df0cfa06acd32c5b53be25799129d1
786
py
Python
lvl2.py
choxner/python-challenge
3c726936027087bc38f830a758549dd68467af52
[ "Apache-2.0" ]
null
null
null
lvl2.py
choxner/python-challenge
3c726936027087bc38f830a758549dd68467af52
[ "Apache-2.0" ]
null
null
null
lvl2.py
choxner/python-challenge
3c726936027087bc38f830a758549dd68467af52
[ "Apache-2.0" ]
null
null
null
# Level 2 of pythonchallenge.com! # Challenge: within the source code of this level, there is a # set of jumbled characters. Within these characters, find the # letters and join them together to find the correct url. from solution_framework import Solution import requests # to view source code import re # to use regular expressions url_result = "" res = requests.get("http://www.pythonchallenge.com/pc/def/ocr.html") # import the HTML code from the site that hosts this challenge. text_array = res.text.split("<!--") text_to_search = text_array[2] # select only the text that needs to be searched regex_results = re.findall(r'\w', text_to_search) for item in regex_results: if item == '_': pass else: url_result += item Solution(url_result)
26.2
69
0.722646
2812d4c9e6e9c407e500296b0bda22c042be6c3e
1,444
py
Python
saleor/social/migrations/0001_initial.py
autobotasia/saleor
e03e9f6ab1bddac308a6609d6b576a87e90ae655
[ "CC-BY-4.0" ]
1
2022-02-19T13:27:40.000Z
2022-02-19T13:27:40.000Z
saleor/social/migrations/0001_initial.py
autobotasia/saleor
e03e9f6ab1bddac308a6609d6b576a87e90ae655
[ "CC-BY-4.0" ]
null
null
null
saleor/social/migrations/0001_initial.py
autobotasia/saleor
e03e9f6ab1bddac308a6609d6b576a87e90ae655
[ "CC-BY-4.0" ]
2
2021-12-03T16:59:37.000Z
2022-02-19T13:05:42.000Z
# Generated by Django 3.1.7 on 2021-05-10 07:38 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import saleor.core.utils.json_serializer
41.257143
156
0.640582
28137bb29b2acdc147558b677e97f5e615bea160
2,900
py
Python
adduser.py
Vignesh424/Face-Recognition-Attendance-Python
5d9c33b64bd41918edc55290a320f73bc4afa4e5
[ "Apache-2.0" ]
null
null
null
adduser.py
Vignesh424/Face-Recognition-Attendance-Python
5d9c33b64bd41918edc55290a320f73bc4afa4e5
[ "Apache-2.0" ]
null
null
null
adduser.py
Vignesh424/Face-Recognition-Attendance-Python
5d9c33b64bd41918edc55290a320f73bc4afa4e5
[ "Apache-2.0" ]
null
null
null
import cv2 import os import sqlite3 import dlib import re,time from playsound import playsound import pyttsx3 cam = cv2.VideoCapture(0) cam.set(3, 640) # set video width cam.set(4, 480) # set video height face_detector = cv2.CascadeClassifier('C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Face recognition/HaarCascade/haarcascade_frontalface_default.xml') detector = dlib.get_frontal_face_detector() # init function to get an engine instance for the speech synthesis engine1 = pyttsx3.init() engine2 = pyttsx3.init() # For each person, enter one numeric face id detector = dlib.get_frontal_face_detector() regex = '^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$' Id =int(input("Enter ID:")) fullname = input("Enter FullName : ") email=input("Enter Email:") match = re.match(regex,email) if match == None: print('Invalid Email') raise ValueError('Invalid Email') rollno = int(input("Enter Roll Number : ")) print("\n [INFO] Initializing face capture. Look the camera and wait ...") # say method on the engine that passing input text to be spoken playsound('sound.mp3') engine1.say('User Added Successfully') # run and wait method, it processes the voice commands. engine2.runAndWait() connects = sqlite3.connect("C:/Users/ACER/Desktop/PROJECT ALL RESOURCE/PROJECT ALL RESOURCE/Face recognition/sqlite3/Studentdb.db")# connecting to the database c = connects.cursor() c.execute('CREATE TABLE IF NOT EXISTS Student (ID INT NOT NULL UNIQUE PRIMARY KEY, FULLNAME TEXT NOT NULL, EMAIL NOT NULL, ROLLNO INT UNIQUE NOT NULL , STATUS TEXT DATE TIMESTAMP)') c.execute("INSERT INTO Student(ID, FULLNAME, EMAIL,ROLLNO) VALUES(?,?,?,?)",(Id,fullname,email,rollno)) print('Record entered successfully') connects.commit()# commiting into the database c.close() connects.close()# closing the connection # Initialize individual sampling face count count = 0 while(True): ret, img = cam.read() img = cv2.flip(img,1) # flip video image vertically gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) faces = face_detector.detectMultiScale(gray, 1.3, 5) for (x,y,w,h) in faces: cv2.rectangle(img, (x,y), (x+w,y+h), (255,0,0), 2) count += 1 # Save the captured image into the datasets folder cv2.imwrite("dataset/User." + str(Id) + '.' + str(count) + ".jpg", gray[y:y+h,x:x+w]) cv2.imshow('image', img) k = cv2.waitKey(100) & 0xff # Press 'ESC' for exiting video if k == 27: break elif count >= 30: # Take 30 face sample and stop video playsound('sound.mp3') engine2.say('DataSets Captured Successfully') # run and wait method, it processes the voice commands. engine2.runAndWait() break # Doing a bit of cleanup print("\n [INFO] Exiting Program and cleanup stuff") cam.release() cv2.destroyAllWindows()
43.283582
182
0.686207
2814523dd67ea38c542b435dab46056033dd5d9d
4,847
py
Python
web/app/djrq/model/ampache/__init__.py
bmillham/djrq2
c84283b75a7c15da1902ebfc32b7d75159c09e20
[ "MIT" ]
1
2016-11-23T20:50:00.000Z
2016-11-23T20:50:00.000Z
web/app/djrq/model/ampache/__init__.py
bmillham/djrq2
c84283b75a7c15da1902ebfc32b7d75159c09e20
[ "MIT" ]
15
2017-01-15T04:18:40.000Z
2017-02-25T04:13:06.000Z
web/app/djrq/model/ampache/__init__.py
bmillham/djrq2
c84283b75a7c15da1902ebfc32b7d75159c09e20
[ "MIT" ]
null
null
null
from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import * from sqlalchemy import * from sqlalchemy.sql import func, or_ from sqlalchemy.types import TIMESTAMP from sqlalchemy.ext.hybrid import hybrid_property from time import time import markupsafe from sqlalchemy.ext.associationproxy import association_proxy #from auth import * Base = declarative_base() #metadata = Base.metadata #session = StackedObjectProxy() #database_type = "MySQL" account_groups = Table('account_groups', Base.metadata, Column('account_id', String(32), ForeignKey('accounts.id')), Column('group_id', Unicode(32), ForeignKey('groups.id')) ) group_permissions = Table('group_perms', Base.metadata, Column('group_id', Unicode(32), ForeignKey('groups.id')), Column('permission_id', Unicode(32), ForeignKey('permissions.id')) ) #def ready(sessionmaker): # global session # session = sessionmaker # request.environ['catalogs'] = session.query(SiteOptions).limit(1).one()
29.023952
100
0.637508
2814df1e327e7a389483fc7f28c047ef76e86e37
8,753
py
Python
conet/datasets/duke_oct_flat_sp.py
steermomo/conet
21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b
[ "MIT" ]
null
null
null
conet/datasets/duke_oct_flat_sp.py
steermomo/conet
21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b
[ "MIT" ]
null
null
null
conet/datasets/duke_oct_flat_sp.py
steermomo/conet
21d60fcb4ab9a01a00aa4d9cd0bdee79ea35cc4b
[ "MIT" ]
1
2020-05-18T10:05:24.000Z
2020-05-18T10:05:24.000Z
import multiprocessing as mp # mp.set_start_method('spawn') import math import os import pickle import random from glob import glob from os import path import albumentations as alb import cv2 import numpy as np import skimage import torch import imageio from albumentations.pytorch import ToTensorV2 from skimage.color import gray2rgb from torch.utils.data import Dataset from conet.config import get_cfg # https://github.com/albumentations-team/albumentations/pull/511 # Fix grid distortion bug. #511 # GridDistortion bug..... train_size_aug = alb.Compose([ # alb.RandomSizedCrop(min_max_height=(300, 500)), alb.PadIfNeeded(min_height=100, min_width=600, border_mode=cv2.BORDER_REFLECT101), alb.Rotate(limit=6), alb.RandomScale(scale_limit=0.05,), alb.ElasticTransform(), # alb.GridDistortion(p=1, num_steps=20, distort_limit=0.5), # alb.GridDistortion(num_steps=10, p=1), # alb.OneOf([ # alb.OpticalDistortion(), # ]), # alb.MaskDropout(image_fill_value=0, mask_fill_value=-1,p=0.3), alb.HorizontalFlip(), # alb.VerticalFlip(), # alb.RandomBrightness(limit=0.01), alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101), alb.RandomCrop(224, 512), # alb.Normalize(), # alb.pytorch.ToTensor(), # ToTensorV2() ]) train_content_aug = alb.Compose([ # alb.MedianBlur(3), # alb.GaussianBlur(3), alb.RGBShift(r_shift_limit=5, g_shift_limit=5, b_shift_limit=5), alb.RandomBrightnessContrast(brightness_limit=0.05), alb.Normalize(), # ToTensorV2() ]) val_aug = alb.Compose([ # alb.PadIfNeeded(512, border_mode=cv2.BORDER_REFLECT101), # alb.Normalize(), # alb.Resize(512, 512), alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101), alb.CenterCrop(224, 512), # ToTensorV2(), ]) val_c_aug = alb.Compose([ alb.Normalize(), # ToTensorV2() ]) # train_aug_f = alb.Compose([ # # alb.RandomSizedCrop(min_max_height=(300, 500)), # alb.RandomScale(), # # alb.HorizontalFlip(), # alb.VerticalFlip(), # alb.RandomBrightness(limit=0.01), # alb.Rotate(limit=30), # # 224 548 # alb.PadIfNeeded(min_height=224, min_width=548, border_mode=cv2.BORDER_REFLECT101), # alb.RandomCrop(224, 512), # alb.Normalize(), # # alb.pytorch.ToTensor(), # ToTensorV2() # ]) # val_aug_f = alb.Compose([ # alb.PadIfNeeded(min_height=224, min_width=512, border_mode=cv2.BORDER_REFLECT101), # alb.Normalize(), # # alb.Resize(512, 512), # alb.CenterCrop(224, 512), # ToTensorV2(), # ]) if __name__ == "__main__": from skimage import segmentation, color, filters, exposure import skimage import os from os import path import imageio from matplotlib import pyplot as plt from torch.utils.data import DataLoader import random np.random.seed(42) random.seed(42) save_dir = '/data1/hangli/oct/debug' os.makedirs(save_dir, exist_ok=True) cmap = plt.cm.get_cmap('jet') n_seg = 1200 training_dataset = DukeOctFlatSPDataset(split='train', n_seg=n_seg) # val_dataset = DukeOctFlatSPDataset(split='val', n_seg=n_seg) data_loader = DataLoader(training_dataset, batch_size=16, shuffle=False, num_workers=8, pin_memory=False) # val_loader = DataLoader(val_dataset, batch_size=4, shuffle=False, num_workers=2, pin_memory=True) for t in range(40): for bidx, batch in enumerate(data_loader): data = batch['image'] target = batch['mask'] for b_i in range(len(data)): img = data[b_i] img = img.permute(1, 2, 0).cpu().numpy() img = (img - img.min()) / (img.max() - img.min()) img = skimage.img_as_ubyte(img) mask = target[b_i] # mask_color = cmap(mask) mask_color = color.label2rgb(mask.cpu().numpy()) mask_color = skimage.img_as_ubyte(mask_color) print(img.shape, mask_color.shape) save_img = np.hstack((img, mask_color)) p = path.join(save_dir, f'{t}_{bidx}_{b_i}.jpg') print(f'=> {p}') imageio.imwrite(p, save_img)
30.498258
109
0.595224
28162dcf4efa8e10ec1ddcb4eb91bfa4cb4b0d83
107
py
Python
Chapter 01/fraction-type.py
arifmudi/Applying-Math-with-Python
abeb6b0a9bcfa8b21092b9793d4e691cf5a146bf
[ "MIT" ]
34
2020-07-23T14:42:42.000Z
2022-03-18T07:00:17.000Z
Chapter 01/fraction-type.py
arifmudi/Applying-Math-with-Python
abeb6b0a9bcfa8b21092b9793d4e691cf5a146bf
[ "MIT" ]
null
null
null
Chapter 01/fraction-type.py
arifmudi/Applying-Math-with-Python
abeb6b0a9bcfa8b21092b9793d4e691cf5a146bf
[ "MIT" ]
31
2020-07-22T11:09:33.000Z
2022-03-15T16:59:53.000Z
from fractions import Fraction num1 = Fraction(1, 3) num2 = Fraction(1, 7) num1 * num2 # Fraction(1, 21)
17.833333
30
0.691589
281720b5fdc07905c3eb03b6c213540b162d5693
1,109
py
Python
tests/config/test_project.py
gaborbernat/toxn
1ecb1121b3e3dc30b892b0254cb5566048b5d2e7
[ "MIT" ]
4
2018-04-15T15:12:32.000Z
2019-06-03T12:41:06.000Z
tests/config/test_project.py
gaborbernat/tox3
1ecb1121b3e3dc30b892b0254cb5566048b5d2e7
[ "MIT" ]
3
2018-03-15T11:06:30.000Z
2018-04-15T15:17:29.000Z
tests/config/test_project.py
gaborbernat/tox3
1ecb1121b3e3dc30b892b0254cb5566048b5d2e7
[ "MIT" ]
1
2019-09-25T19:53:09.000Z
2019-09-25T19:53:09.000Z
from io import StringIO from pathlib import Path import pytest from toxn.config import from_toml
25.790698
59
0.6844
2819b274258b4f59c03325199e582718bece2d5e
536
py
Python
edinet_baseline_hourly_module/edinet_models/pyEMIS/ConsumptionModels/__init__.py
BeeGroup-cimne/module_edinet
0cda52e9d6222a681f85567e9bf0f7e5885ebf5e
[ "MIT" ]
null
null
null
edinet_baseline_hourly_module/edinet_models/pyEMIS/ConsumptionModels/__init__.py
BeeGroup-cimne/module_edinet
0cda52e9d6222a681f85567e9bf0f7e5885ebf5e
[ "MIT" ]
13
2021-03-25T22:24:38.000Z
2022-03-12T00:56:45.000Z
edinet_baseline_hourly_module/edinet_models/pyEMIS/ConsumptionModels/__init__.py
BeeGroup-cimne/module_edinet
0cda52e9d6222a681f85567e9bf0f7e5885ebf5e
[ "MIT" ]
1
2019-03-13T09:49:56.000Z
2019-03-13T09:49:56.000Z
from constantMonthlyModel import ConstantMonthlyModel from constantModel import ConstantModel from twoParameterModel import TwoParameterModel from threeParameterModel import ThreeParameterModel from anyModel import AnyModelFactory from schoolModel import SchoolModel, SchoolModelFactory from recurrentModel import RecurrentModel, RecurrentModelFactory from weeklyModel import WeeklyModel, WeeklyModelFactory from monthlyModel import MonthlyModel, MonthlyModelFactory from nanModel import NanModel from profile import ConsumptionProfile
44.666667
64
0.902985
281aa6d325487ceb00b0753134cf1290afd8b2fd
326
py
Python
tinder_config_ex.py
nathan-149/tinderbot
0413fbbba0219faf4415d75fd4f23518951b03a0
[ "MIT" ]
18
2020-06-30T18:31:44.000Z
2021-12-17T05:04:58.000Z
tinder_config_ex.py
havzor1231/Tinder-Bot
189524d7c80921a47b06262bd3cd42abaad7a85d
[ "MIT" ]
2
2020-07-21T07:55:48.000Z
2020-11-20T10:02:23.000Z
tinder_config_ex.py
havzor1231/Tinder-Bot
189524d7c80921a47b06262bd3cd42abaad7a85d
[ "MIT" ]
9
2020-07-12T08:00:00.000Z
2022-03-24T03:29:40.000Z
host = 'https://api.gotinder.com' #leave tinder_token empty if you don't use phone verification tinder_token = "0bb19e55-5f12-4a23-99df-8e258631105b" # Your real config file should simply be named "config.py" # Just insert your fb_username and fb_password in string format # and the fb_auth_token.py module will do the rest!
40.75
63
0.785276
281c7adc874167e64dc0db3f96ec79ad8d491740
1,958
py
Python
simone/test.py
ross/simone
cfee8eaa04a7ddd235f735fa6c07adac28b4c6a4
[ "MIT" ]
null
null
null
simone/test.py
ross/simone
cfee8eaa04a7ddd235f735fa6c07adac28b4c6a4
[ "MIT" ]
1
2021-11-04T13:47:28.000Z
2021-11-04T13:47:28.000Z
simone/test.py
ross/simone
cfee8eaa04a7ddd235f735fa6c07adac28b4c6a4
[ "MIT" ]
1
2021-10-20T14:44:19.000Z
2021-10-20T14:44:19.000Z
from django.test.runner import DiscoverRunner from io import StringIO from logging import StreamHandler, getLogger from unittest import TextTestRunner, TextTestResult class SimoneRunner(DiscoverRunner): test_runner = SimoneTestRunner
31.079365
78
0.670582
281e79df5e1dd65bfbeca11e0d9ea108af82bb30
18
py
Python
library/tutorial/__init__.py
gottaegbert/penter
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
[ "MIT" ]
13
2020-01-04T07:37:38.000Z
2021-08-31T05:19:58.000Z
library/tutorial/__init__.py
gottaegbert/penter
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
[ "MIT" ]
3
2020-06-05T22:42:53.000Z
2020-08-24T07:18:54.000Z
library/tutorial/__init__.py
gottaegbert/penter
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
[ "MIT" ]
9
2020-10-19T04:53:06.000Z
2021-08-31T05:20:01.000Z
__all__ = ["fibo"]
18
18
0.611111
2820ef5bc2fdcf7913515a4a45ac8b19c189a6ce
1,340
py
Python
longest path in matrix.py
buhuhaha/python
4ff72ac711f0948ae5bcb0886d68e8df77fe515b
[ "MIT" ]
null
null
null
longest path in matrix.py
buhuhaha/python
4ff72ac711f0948ae5bcb0886d68e8df77fe515b
[ "MIT" ]
null
null
null
longest path in matrix.py
buhuhaha/python
4ff72ac711f0948ae5bcb0886d68e8df77fe515b
[ "MIT" ]
null
null
null
row = [-1, -1, -1, 0, 0, 1, 1, 1] col = [-1, 0, 1, -1, 1, -1, 0, 1] if __name__ == '__main__': mat = [ ['D', 'E', 'H', 'X', 'B'], ['A', 'O', 'G', 'P', 'E'], ['D', 'D', 'C', 'F', 'D'], ['E', 'B', 'E', 'A', 'S'], ['C', 'D', 'Y', 'E', 'N'] ] ch = 'C' print("The length of the longest path with consecutive characters starting from " "character", ch, "is", findMaximumLength(mat, ch))
20.30303
85
0.435075
28226ec9ea67dad00950fa1852a66dbf14540c2c
4,653
py
Python
AnimalProfile/session/batchAnimals.py
AtMostafa/AnimalProfile
866f55659b80291f840ecacd090afada5f4de674
[ "MIT" ]
null
null
null
AnimalProfile/session/batchAnimals.py
AtMostafa/AnimalProfile
866f55659b80291f840ecacd090afada5f4de674
[ "MIT" ]
null
null
null
AnimalProfile/session/batchAnimals.py
AtMostafa/AnimalProfile
866f55659b80291f840ecacd090afada5f4de674
[ "MIT" ]
null
null
null
__all__ = ('get_session_list', 'get_animal_list', 'get_event', 'get_tag_pattern', 'get_pattern_animalList', 'get_current_animals') import datetime import logging from .. import Root from .. import File from .. import Profile from ..Profile import EventProfile from .singleAnimal import * def get_session_list(root: Root, animalList: list = None, profile: Profile = None): """ This function returns list of sessions with certain 'profile' for all the animals in animalList. if animalList=Nonr, it will search all the animals. """ if profile is None: profile = Profile(root=root) if animalList is None or animalList == '' or animalList == []: animalList = root.get_all_animals() profileOut = Profile(root=root) for animal in animalList: tagFile = File(root, animal) sessionProfile = tagFile.get_profile_session_list(profile) profileOut += sessionProfile return profileOut def get_animal_list(root: Root, profile: Profile = None): """ this function returns list of animals with at least one session matching the "profile" """ if profile is None: profile = Profile(root=root) allProfiles = get_session_list(root, animalList=None, profile=profile) sessionList = allProfiles.Sessions animalList = [] for session in sessionList: animalList.append(session[:len(profile._prefix) + 3]) animalList = list(set(animalList)) return sorted(animalList) def get_event(root: Root, profile1: Profile, profile2: Profile, badAnimals: list = None): """ This function finds the animals that match both profile1 and profile2 IN SUCCESSION I.E., when the conditions changed """ if badAnimals is None: badAnimals = [] animalList1 = get_animal_list(root, profile1) animalList2 = get_animal_list(root, profile2) animalList0 = set(animalList1).intersection(set(animalList2)) animalList0 = [animal for animal in animalList0 if animal not in badAnimals] # remove bad animals from animalList0 animalList0.sort() eventProfile = EventProfile(profile1, profile2) for animal in animalList0: sessionProfile1 = get_session_list(root, animalList=[animal], profile=profile1) sessionProfile2 = get_session_list(root, animalList=[animal], profile=profile2) sessionTotal = get_session_list(root, animalList=[animal], profile=root.get_profile()) try: index = sessionTotal.Sessions.index(sessionProfile1.Sessions[-1]) if sessionProfile2.Sessions[0] == sessionTotal.Sessions[index + 1]: # Two profiles succeed, meaning the Event happended. eventProfile.append(sessionProfile1.Sessions, sessionProfile2.Sessions) except Exception: pass return eventProfile def get_tag_pattern(root: Root, animalList: list = None, tagPattern: str = '*'): """ applies 'get_pattern_session_list' to a list of animals """ if animalList is None or animalList == []: animalList = root.get_all_animals() profileDict = root.get_profile() for animal in animalList: tagFile = File(root, animal) profileDict += tagFile.get_pattern_session_list(tagPattern=tagPattern) return profileDict def get_pattern_animalList(root: Root, tagPattern: str): """ this function returns list of animals with at least one session matching the 'tagPattern' """ allProfile = get_tag_pattern(root, animalList=None, tagPattern=tagPattern) sessionList = allProfile.Sessions animalList = [] for session in sessionList: animalList.append(session[:len(root.prefix) + 3]) animalList = list(set(animalList)) return sorted(animalList) def get_current_animals(root: Root, days_passed: int = 4): """ this function returns the list of animals with a new session within the last few ('days_passed') days """ now = datetime.datetime.now() all_animals = root.get_all_animals() if all_animals == []: logging.warning('No animal found!') return [] animalList = [] for animal in all_animals: animalTag = File(root, animal) sessionList = animalTag.get_all_sessions() if not sessionList: continue lastSessionDate = animalTag.get_session_date(sessionList[-1]) if (now - lastSessionDate).days <= days_passed: animalList.append(animal) return animalList
33.47482
119
0.663873
282403dbaa1f17f6e0d6f80a9faabdc5990009bd
10,747
py
Python
IsaacAgent.py
dholmdahl/connect4-1
cdcd92ee30f45e89a9f01ebc87a8b6d797cc4a81
[ "MIT" ]
null
null
null
IsaacAgent.py
dholmdahl/connect4-1
cdcd92ee30f45e89a9f01ebc87a8b6d797cc4a81
[ "MIT" ]
null
null
null
IsaacAgent.py
dholmdahl/connect4-1
cdcd92ee30f45e89a9f01ebc87a8b6d797cc4a81
[ "MIT" ]
null
null
null
from random import choice from copy import deepcopy from game_data import GameData from agents import Agent import numpy as np import random import pickle import pandas as pd
33.902208
199
0.499209
28254fc9a86cfb17a27b879bc1d9e02d48b17b76
1,288
py
Python
HTTPServer.py
dannyb648/HTTPServer
e7877646d2ee890229d5db67055abed2f3a91812
[ "MIT" ]
null
null
null
HTTPServer.py
dannyb648/HTTPServer
e7877646d2ee890229d5db67055abed2f3a91812
[ "MIT" ]
null
null
null
HTTPServer.py
dannyb648/HTTPServer
e7877646d2ee890229d5db67055abed2f3a91812
[ "MIT" ]
null
null
null
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler from SocketServer import ThreadingMixIn from os import curdir, sep import threading import urlparse import mimetypes PORT_NUMBER = 8080 VERSION_NUMBER = '1.0.0' if __name__ == '__main__': server = ThreadedHTTPServer(('', PORT_NUMBER), Handler) print 'Starting Server on port ' + str(PORT_NUMBER) print 'Version Code: ' + VERSION_NUMBER print 'Author @dannyb648 | danbeglin.co.uk' server.serve_forever()
24.769231
61
0.736801
2826bae5797a9d9d95a636c0a99581f2619ca237
5,872
py
Python
algorand-oracle-smart-contracts/src/algorand_oracle.py
damees/algorand-oracle
f7f078f9d153341d1ba546ff66e8afbf2685f114
[ "MIT" ]
null
null
null
algorand-oracle-smart-contracts/src/algorand_oracle.py
damees/algorand-oracle
f7f078f9d153341d1ba546ff66e8afbf2685f114
[ "MIT" ]
null
null
null
algorand-oracle-smart-contracts/src/algorand_oracle.py
damees/algorand-oracle
f7f078f9d153341d1ba546ff66e8afbf2685f114
[ "MIT" ]
null
null
null
from pyteal import * ADMIN_KEY = Bytes("admin") WHITELISTED_KEY = Bytes("whitelisted") REQUESTS_BALANCE_KEY = Bytes("requests_balance") MAX_BUY_AMOUNT = Int(1000000000) MIN_BUY_AMOUNT = Int(10000000) REQUESTS_SELLER = Addr("N5ICVTFKS7RJJHGWWM5QXG2L3BV3GEF6N37D2ZF73O4PCBZCXP4HV3K7CY") MARKET_EXCHANGE_NOTE = Bytes("algo-oracle-app-4") if __name__ == "__main__": with open("algorand_oracle_approval.teal", "w") as f: compiled = compileTeal(approval_program(), mode=Mode.Application, version=5) f.write(compiled) with open("algorand_oracle_clear_state.teal", "w") as f: compiled = compileTeal(clear_state_program(), mode=Mode.Application, version=5) f.write(compiled)
35.161677
111
0.547854
28271eebbca12a80c721021d335930842259d168
20,198
py
Python
custom_components/shelly/__init__.py
astrandb/ShellyForHASS
f404d3007a26945f310a801c6c7d196d7fa1fe23
[ "MIT" ]
null
null
null
custom_components/shelly/__init__.py
astrandb/ShellyForHASS
f404d3007a26945f310a801c6c7d196d7fa1fe23
[ "MIT" ]
null
null
null
custom_components/shelly/__init__.py
astrandb/ShellyForHASS
f404d3007a26945f310a801c6c7d196d7fa1fe23
[ "MIT" ]
null
null
null
""" Support for Shelly smart home devices. For more details about this component, please refer to the documentation at https://home-assistant.io/components/shelly/ """ # pylint: disable=broad-except, bare-except, invalid-name, import-error from datetime import timedelta import logging import time import asyncio import voluptuous as vol from homeassistant.const import ( CONF_DEVICES, CONF_DISCOVERY, CONF_ID, CONF_NAME, CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP) from homeassistant import config_entries from homeassistant.helpers import discovery from homeassistant.helpers.dispatcher import async_dispatcher_send from homeassistant.helpers.entity import Entity from homeassistant.helpers.script import Script from homeassistant.util import slugify from .const import * from .configuration_schema import CONFIG_SCHEMA REQUIREMENTS = ['pyShelly==0.1.16'] _LOGGER = logging.getLogger(__name__) __version__ = "0.1.6.b6" VERSION = __version__ BLOCKS = {} DEVICES = {} BLOCK_SENSORS = [] DEVICE_SENSORS = [] #def _get_block_key(block): # key = block.id # if not key in BLOCKS: # BLOCKS[key] = block # return key def get_block_from_hass(hass, discovery_info): """Get block from HASS""" if SHELLY_BLOCK_ID in discovery_info: key = discovery_info[SHELLY_BLOCK_ID] return hass.data[SHELLY_BLOCKS][key] #def _get_device_key(dev): # key = _dev_key(dev) # if not key in DEVICES: # DEVICES[key] = dev # return key def get_device_from_hass(hass, discovery_info): """Get device from HASS""" device_key = discovery_info[SHELLY_DEVICE_ID] return hass.data[SHELLY_DEVICES][device_key] class ShellyInstance(): """Config instance of Shelly""" def _get_specific_config_root(self, key, *ids): item = self._get_specific_config(key, None, *ids) if item is None: item = self.conf.get(key) return item def _find_device_config(self, device_id): device_conf_list = self.conf.get(CONF_DEVICES) for item in device_conf_list: if item[CONF_ID].upper() == device_id: return item return None def _get_device_config(self, device_id, id_2=None): """Get config for device.""" item = self._find_device_config(device_id) if item is None and id_2 is not None: item = self._find_device_config(id_2) if item is None: return {} return item def _get_specific_config(self, key, default, *ids): for device_id in ids: item = self._find_device_config(device_id) if item is not None and key in item: return item[key] return default def _get_sensor_config(self, *ids): sensors = self._get_specific_config(CONF_SENSORS, None, *ids) if sensors is None: sensors = self.conf.get(CONF_SENSORS) if SENSOR_ALL in sensors: return [*SENSOR_TYPES.keys()] if sensors is None: return {} return sensors def _add_device(self, platform, dev): self.hass.add_job(self._async_add_device(platform, dev)) class ShellyBlock(Entity): """Base class for Shelly entities""" def _updated(self, _block): """Receive events when the switch state changed (by mobile, switch etc)""" if self.entity_id is not None and not self._is_removed: self.schedule_update_ha_state(True) def remove(self): self._is_removed = True self.hass.add_job(self.async_remove) class ShellyDevice(Entity): """Base class for Shelly entities""" def _updated(self, _block): """Receive events when the switch state changed (by mobile, switch etc)""" if self.entity_id is not None and not self._is_removed: self.schedule_update_ha_state(True) if self._dev.info_values is not None: for key, _value in self._dev.info_values.items(): ukey = self._dev.id + '-' + key if not ukey in DEVICE_SENSORS: DEVICE_SENSORS.append(ukey) for sensor in self._sensor_conf: if SENSOR_TYPES[sensor].get('attr') == key: attr = {'sensor_type':key, 'itm':self._dev} conf = self.hass.data[SHELLY_CONFIG] #discovery.load_platform(self.hass, 'sensor', # DOMAIN, attr, conf) def remove(self): self._is_removed = True self.hass.add_job(self.async_remove)
36.003565
87
0.576839
28274273d3b6e8ded8878a57fe78503427048f15
664
py
Python
examples/petstore/migrations/versions/36745fa33987_remove_unique_constraint.py
fastack-dev/fastack-migrate
1e9d3b3b1d25bec000432026b975053e5350e3da
[ "MIT" ]
1
2021-12-23T03:20:57.000Z
2021-12-23T03:20:57.000Z
examples/petstore/migrations/versions/36745fa33987_remove_unique_constraint.py
fastack-dev/fastack-migrate
1e9d3b3b1d25bec000432026b975053e5350e3da
[ "MIT" ]
1
2022-02-09T08:10:30.000Z
2022-02-09T08:10:30.000Z
examples/petstore/migrations/versions/36745fa33987_remove_unique_constraint.py
fastack-dev/fastack-migrate
1e9d3b3b1d25bec000432026b975053e5350e3da
[ "MIT" ]
null
null
null
"""remove unique constraint Revision ID: 36745fa33987 Revises: 6b7ad8fd60f9 Create Date: 2022-01-06 08:31:55.141039 """ from alembic import op # revision identifiers, used by Alembic. revision = "36745fa33987" down_revision = "6b7ad8fd60f9" branch_labels = None depends_on = None
24.592593
72
0.701807
282798301fe62d89dc92c6e1905920362da8011c
564
py
Python
addons/website_event_track_live_quiz/controllers/track_live_quiz.py
SHIVJITH/Odoo_Machine_Test
310497a9872db7844b521e6dab5f7a9f61d365a4
[ "Apache-2.0" ]
null
null
null
addons/website_event_track_live_quiz/controllers/track_live_quiz.py
SHIVJITH/Odoo_Machine_Test
310497a9872db7844b521e6dab5f7a9f61d365a4
[ "Apache-2.0" ]
null
null
null
addons/website_event_track_live_quiz/controllers/track_live_quiz.py
SHIVJITH/Odoo_Machine_Test
310497a9872db7844b521e6dab5f7a9f61d365a4
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo.addons.website_event_track_live.controllers.track_live import EventTrackLiveController
43.384615
113
0.79078
28289fb87251908aef9d071f6be27139338d9810
2,640
py
Python
strongholds-and-followers/retainer/retainer.py
kbsletten/AvraeAliases
a392881dcddccc2155d10fd4d231f1be53c54bda
[ "MIT" ]
null
null
null
strongholds-and-followers/retainer/retainer.py
kbsletten/AvraeAliases
a392881dcddccc2155d10fd4d231f1be53c54bda
[ "MIT" ]
null
null
null
strongholds-and-followers/retainer/retainer.py
kbsletten/AvraeAliases
a392881dcddccc2155d10fd4d231f1be53c54bda
[ "MIT" ]
1
2022-03-17T18:06:25.000Z
2022-03-17T18:06:25.000Z
embed <drac2> GVARS = load_json(get_gvar("c1ee7d0f-750d-4f92-8d87-70fa22c07a81")) CLASSES = [load_json(get_gvar(gvar)) for gvar in GVARS] DISPLAY = { "acrobatics": "Acrobatics", "animalhandling": "Animal Handling", "athletics": "Athletics", "arcana": "Arcana", "deception": "Deception", "dex": "Dexterity", "dexterity": "Dexterity", "cha": "Charisma", "charisma": "Charisma", "con": "Constitution", "constitution": "Constitution", "history": "History", "investigation": "Investigation", "insight": "Insight", "int": "Intelligence", "intelligence": "Intelligence", "intimidation": "Intimidation", "medicine": "Medicine", "nature": "Nature", "perception": "Perception", "performance": "Performance", "persuasion": "Persuasion", "religion": "Religion", "sleightofhand": "Sleight of Hand", "survival": "Survival", "stealth": "Stealth", "str": "Strength", "strength": "Strength", "wis": "Wisdom", "wisdom": "Wisdom" } char = character() ret_name = get("_retainerName") ret_class = get("_retainerClass") ret_level = int(get("_retainerLevel", 0)) ret_hp = char.get_cc("Retainer HP") if char and char.cc_exists("Retainer HP") else 0 title = f"{char.name} doesn't have a retainer!" if ret_name and ret_class and ret_level: title = f"{char.name} has {ret_name} a level {ret_level} {ret_class} retainer!" cl_info = [c for c in CLASSES if c["name"] == ret_class] cl_info = cl_info[0] if cl_info else None fields = "" if cl_info: fields += f"""-f "HP|{ret_hp}/{ret_level}|inline" """ fields += f"""-f "AC|{cl_info["ac"]}|inline" """ fields += f"""-f "Primary Ability|{DISPLAY[cl_info["primary"]]}|inline" """ fields += f"""-f "Saves|{", ".join(DISPLAY[x] for x in cl_info["saves"])}|inline" """ fields += f"""-f "Skills|{", ".join(DISPLAY[x] for x in cl_info["skills"])}|inline" """ attack_text = [node for node in cl_info["attack"]["automation"] if node["type"] == "text"] fields += f"""-f "{cl_info["attack"]["name"]}|{attack_text[0]["text"] if attack_text else ""}" """ for action in cl_info["actions"]: if ret_level < action["level"]: continue attack_text = [node for node in action["attack"]["automation"] if node["type"] == "text"] fields += f"""-f "{action["attack"]["name"]} ({action["cc_max"]}/Day)|{attack_text[0]["text"] if attack_text else ""} {char.cc_str(action["cc"]) if char and action["cc"] and char.cc_exists(action["cc"]) else ""}" """ </drac2> -title "{{title}}" {{fields}} -footer "!retainer | kbsletten#5710" -color <color> -thumb {{get("_retainerImage")}}
36.164384
122
0.625
2829cb6a0e893f3f47a265e061c7b3ffa93b9eea
8,351
py
Python
bloomberg_functions.py
sophierubin1224/strategy_draft
410206e5679865ffa25506e733c13b5b03416586
[ "MIT" ]
null
null
null
bloomberg_functions.py
sophierubin1224/strategy_draft
410206e5679865ffa25506e733c13b5b03416586
[ "MIT" ]
null
null
null
bloomberg_functions.py
sophierubin1224/strategy_draft
410206e5679865ffa25506e733c13b5b03416586
[ "MIT" ]
4
2021-04-12T23:30:14.000Z
2021-04-13T13:19:15.000Z
################################################################################ ##### For Bloomberg ------------------------------------------------------------ ##### Can't use this if you're on a Mac :( ################################################################################ from __future__ import print_function from __future__ import absolute_import from optparse import OptionParser import os import platform as plat import sys if sys.version_info >= (3, 8) and plat.system().lower() == "windows": # pylint: disable=no-member with os.add_dll_directory(os.getenv('BLPAPI_LIBDIR')): import blpapi else: import blpapi from utils import date_to_str import pandas as pd __copyright__ = """ Copyright 2012. Bloomberg Finance L.P. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ ####### End of Bloomberg Section ----------------------------------------------- ################################################################################
41.341584
81
0.544246
282a2f90de76609dae1135f6d1faa97131c57a5d
3,767
py
Python
backend/histocat/worker/segmentation/processors/acquisition_processor.py
BodenmillerGroup/histocat-web
c598cd07506febf0b7c209626d4eb869761f2e62
[ "MIT" ]
4
2021-06-14T15:19:25.000Z
2022-02-09T13:17:39.000Z
backend/histocat/worker/segmentation/processors/acquisition_processor.py
BodenmillerGroup/histocat-web
c598cd07506febf0b7c209626d4eb869761f2e62
[ "MIT" ]
null
null
null
backend/histocat/worker/segmentation/processors/acquisition_processor.py
BodenmillerGroup/histocat-web
c598cd07506febf0b7c209626d4eb869761f2e62
[ "MIT" ]
1
2022-02-09T13:17:41.000Z
2022-02-09T13:17:41.000Z
import os from typing import Sequence, Union import numpy as np import tifffile from deepcell.applications import Mesmer from imctools.io.ometiff.ometiffparser import OmeTiffParser from skimage import measure from sqlalchemy.orm import Session from histocat.core.acquisition import service as acquisition_service from histocat.core.dataset.models import DatasetModel from histocat.core.errors import SegmentationError from histocat.core.segmentation.dto import SegmentationSubmissionDto from histocat.core.utils import timeit
38.835052
105
0.723653
282c0f9d07c95149eb897900350f51bc9e832909
9,800
py
Python
Fractals.py
do-it-for-coffee/fractals
6050dc72ddeed45aefafed489e07a40ee8d8dc1d
[ "Apache-2.0" ]
null
null
null
Fractals.py
do-it-for-coffee/fractals
6050dc72ddeed45aefafed489e07a40ee8d8dc1d
[ "Apache-2.0" ]
null
null
null
Fractals.py
do-it-for-coffee/fractals
6050dc72ddeed45aefafed489e07a40ee8d8dc1d
[ "Apache-2.0" ]
null
null
null
import math import numpy as np import random import os from PIL import Image import pyttsx3 if __name__ == '__main__': pass
35.507246
94
0.521429
282ccb40eb876af8551ca4938f1b672da9c20208
1,189
py
Python
helpers.py
AHS-Open-Sorcery/HTNE-Project
c6caf57f1e89302c06ef0a84ddb83c645274d183
[ "MIT" ]
null
null
null
helpers.py
AHS-Open-Sorcery/HTNE-Project
c6caf57f1e89302c06ef0a84ddb83c645274d183
[ "MIT" ]
null
null
null
helpers.py
AHS-Open-Sorcery/HTNE-Project
c6caf57f1e89302c06ef0a84ddb83c645274d183
[ "MIT" ]
null
null
null
from data_retrieval import * import emailer from login.config import Config from sentiment_analysis import * import tweepy import json
27.022727
117
0.710681
282cfc4e7936eaf93017c54214756f8505acd57a
3,291
py
Python
attack_metrics/mr.py
asplos2020/DRTest
c3de497142d9b226e518a1a0f95f7350d2f7acd6
[ "MIT" ]
1
2021-04-01T07:31:17.000Z
2021-04-01T07:31:17.000Z
attack_metrics/mr.py
Justobe/DRTest
85c3c9b2a46cafa7184130f2596c5f9eb3b20bff
[ "MIT" ]
null
null
null
attack_metrics/mr.py
Justobe/DRTest
85c3c9b2a46cafa7184130f2596c5f9eb3b20bff
[ "MIT" ]
1
2020-12-24T12:12:54.000Z
2020-12-24T12:12:54.000Z
""" This tutorial shows how to generate adversarial examples using FGSM and train a model using adversarial training with TensorFlow. It is very similar to mnist_tutorial_keras_tf.py, which does the same thing but with a dependence on keras. The original paper can be found at: https://arxiv.org/abs/1412.6572 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import sys import numpy as np import tensorflow as tf from scipy.misc import imsave, imread from tensorflow.python.platform import flags sys.path.append("../") from nmutant_data.mnist import data_mnist from nmutant_data.cifar10 import data_cifar10 from nmutant_data.svhn import data_svhn from nmutant_util.utils_tf import model_argmax, model_prediction from nmutant_model.model_operation import model_load from nmutant_attack.attacks import FastGradientMethod from nmutant_util.utils_imgproc import deprocess_image_1, preprocess_image_1, deprocess_image_1 from nmutant_data.data import get_data, get_shape from nmutant_util.utils import batch_indices from nmutant_util.utils_file import get_data_file import time import math FLAGS = flags.FLAGS def mr(datasets, model_name, attack, va, epoch=49): """ :param datasets :param sample: inputs to attack :param target: the class want to generate :param nb_classes: number of output classes :return: """ tf.reset_default_graph() X_train, Y_train, X_test, Y_test = get_data(datasets) input_shape, nb_classes = get_shape(datasets) sample=X_test sess, preds, x, y, model, feed_dict = model_load(datasets, model_name, epoch=epoch) probabilities = model_prediction(sess, x, preds, sample, feed=feed_dict, datasets=datasets) if sample.shape[0] == 1: current_class = np.argmax(probabilities) else: current_class = np.argmax(probabilities, axis=1) # only for correct: acc_pre_index=[] for i in range(0, sample.shape[0]): if current_class[i]==np.argmax(Y_test[i]): acc_pre_index.append(i) print(len(acc_pre_index)) sess.close() total=0 if attack=='fgsm': samples_path='../adv_result/'+datasets+'/'+attack+'/'+model_name+'/'+str(va) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) num=len(image_list) return num/len(acc_pre_index) else: total=0 for tar in range(0,nb_classes): samples_path='../adv_result/'+datasets+'/'+attack+'/'+model_name+'/'+str(va)+'_'+str(tar) [image_list, image_files, real_labels, predicted_labels] = get_data_file(samples_path) total+=len(image_list) return total/len(acc_pre_index) if __name__ == '__main__': flags.DEFINE_string('datasets', 'mnist', 'The target datasets.') #flags.DEFINE_string('sample', '../datasets/integration/mnist/0.png', 'The path to load sample.') flags.DEFINE_string('model', 'lenet4', 'The name of model.') flags.DEFINE_string('attack', 'fgsm', 'step size of fgsm') tf.app.run()
33.242424
101
0.717107
282d70a51f36fc140a29300e30c60da47fb2411b
7,434
py
Python
detect_deeplabv3plus_ascend.py
jackhanyuan/deeplabv3plus-ascend
817006a4514257aa8cd07d752b70bbff9709ba9f
[ "Apache-2.0" ]
null
null
null
detect_deeplabv3plus_ascend.py
jackhanyuan/deeplabv3plus-ascend
817006a4514257aa8cd07d752b70bbff9709ba9f
[ "Apache-2.0" ]
null
null
null
detect_deeplabv3plus_ascend.py
jackhanyuan/deeplabv3plus-ascend
817006a4514257aa8cd07d752b70bbff9709ba9f
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # by [jackhanyuan](https://github.com/jackhanyuan) 07/03/2022 import argparse import copy import glob import os import re import sys import time from pathlib import Path import cv2 import acl import torch import numpy as np from PIL import Image import torch.nn.functional as F from acl_net import check_ret, Net FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # Root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative IMG_EXT = ('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff') if __name__ == "__main__": opt = parse_opt() t0 = time.perf_counter() print("ACL Init:") ret = acl.init() check_ret("acl.init", ret) device_id = opt.device # 1.Load model print("Loading model %s." % opt.weights) model_path = str(opt.weights) net = Net(device_id, model_path) input_size = opt.imgsz output_dir = increment_path(Path(opt.output_dir) / 'exp', exist_ok=False) # increment path output_dir.mkdir(parents=True, exist_ok=True) # make dir # 2.Load label label_path = opt.labels labels = load_label(label_path) num_classes = len(labels) # 3.Start Detect print() print("Start Detect:") images_dir = opt.images_dir images = sorted(os.listdir(images_dir)) count = 0 total_count = len(images) for image_name in images: if image_name.lower().endswith(IMG_EXT): t1 = time.perf_counter() count += 1 image_path = os.path.join(images_dir, image_name) image = Image.open(image_path) # detect image org_img, pred_img = detect_image(image, num_classes=num_classes, input_shape=input_size, fp16=False) # count area for every labels s = "" for i in range(len(labels)): count_area = int(np.sum(pred_img == i)) if count_area > 0: s += f"{count_area} pixel{'s' * (count_area > 1)} {labels[i]}, " # add to string # draw imgage output_img = draw_image(org_img, pred_img, num_classes=num_classes, blend=opt.blend) # save image if opt.save_img: output_path = os.path.join(output_dir, image_name) output_img.save(output_path) t2 = time.perf_counter() t = t2 - t1 print('image {}/{} {}: {}Done. ({:.3f}s)'.format(count, total_count, image_path, s, t)) t3 = time.perf_counter() t = t3 - t0 print('This detection cost {:.3f}s.'.format(t)) print("Results saved to {}.".format(output_dir)) print()
34.738318
114
0.595776
282db7a977d5ee21b51eae4baa89a5c662e12b73
1,044
py
Python
trim2.py
AmadeusChan/GeminiGraph
893b05ee5c560ec51d41ab6a58a300baade8a9f5
[ "Apache-2.0" ]
null
null
null
trim2.py
AmadeusChan/GeminiGraph
893b05ee5c560ec51d41ab6a58a300baade8a9f5
[ "Apache-2.0" ]
null
null
null
trim2.py
AmadeusChan/GeminiGraph
893b05ee5c560ec51d41ab6a58a300baade8a9f5
[ "Apache-2.0" ]
null
null
null
import os with open("./simple_graph.txt", "r") as f: N = int(f.readline()) edges = [] while True: line = f.readline().strip() if line == None or len(line) == 0: break line = line.split(" ") edges.append([int(line[0]), int(line[1])]) con = [] for i in range(N): con.append([]) for edge in edges: con[edge[0]].append(edge[1]) """ for i in range(N): active.append(N) """ trimmed_num = 0 trimmed = [] for i in range(N): trimmed.append(False) it = 0 while True: it += 1 degree = [] newly_trimmed = 0 for i in range(N): degree.append(0) for i in range(N): if trimmed[i] == False: for j in con[i]: degree[j] += 1 for i in range(N): if trimmed[i] == False and degree[i] < 2: trimmed[i] = True newly_trimmed += 1 if newly_trimmed == 0: break trimmed_num += newly_trimmed print "iter =", it, " trimmed = ", trimmed_num print N - trimmed_num
18.642857
50
0.51341
282de6dc694665ce1234b3c8d5c5765ed89dc979
3,516
py
Python
Classes/Data.py
zerowsir/stock_study
ae2f3fab2b0cb3f4c980f0b229547867902415c4
[ "MIT" ]
5
2020-04-27T08:07:06.000Z
2022-01-02T14:47:21.000Z
Classes/Data.py
zerowsir/stock_study
ae2f3fab2b0cb3f4c980f0b229547867902415c4
[ "MIT" ]
null
null
null
Classes/Data.py
zerowsir/stock_study
ae2f3fab2b0cb3f4c980f0b229547867902415c4
[ "MIT" ]
3
2020-04-25T12:29:09.000Z
2021-07-09T05:47:01.000Z
# coding=utf-8 """ __title__ = '' __file__ = '' __author__ = 'tianmuchunxiao' __mtime__ = '2019/7/4' """ import requests import datetime import pandas as pd from io import StringIO TODAY = datetime.date.strftime(datetime.date.today(), '%Y%m%d')
42.878049
834
0.700512
28306d7ad64f3293bff710f9c34f8a63246637c0
4,151
py
Python
tledb/common/misc.py
rtubio/tledb
2bfb13497d4ba7c155505fa7396abd7bf837b3a5
[ "Apache-2.0" ]
null
null
null
tledb/common/misc.py
rtubio/tledb
2bfb13497d4ba7c155505fa7396abd7bf837b3a5
[ "Apache-2.0" ]
5
2020-11-09T00:24:16.000Z
2022-02-10T15:10:19.000Z
tledb/common/misc.py
rtubio/tledb
2bfb13497d4ba7c155505fa7396abd7bf837b3a5
[ "Apache-2.0" ]
1
2020-11-08T10:35:23.000Z
2020-11-08T10:35:23.000Z
import datetime import logging import pytz import socket logger = logging.getLogger('common') def get_fqdn(ip_address): """ Function that transforms a given IP address into the associated FQDN name for that host. :param ip_address: IP address of the remote host. :return: FQDN name for that host. """ return socket.gethostbyaddr(ip_address) # noinspection PyBroadException def get_fqdn_ip(): """ Function that returns the hostname as read from the socket library and the IP address for that hostname. :return: (String with the name of the current host, IP) """ hn = 'localhost' try: hn = socket.getfqdn() except Exception: pass return hn, socket.gethostbyname(hn) def get_now_utc(no_microseconds=True): """ This method returns now's datetime object UTC localized. :param no_microseconds: sets whether microseconds should be cleared. :return: the just created datetime object with today's date. """ if no_microseconds: return pytz.utc.localize(datetime.datetime.utcnow()).replace( microsecond=0 ) else: return pytz.utc.localize(datetime.datetime.utcnow()) def get_utc_window(center=None, duration=None, no_microseconds=True): """X minutes window Function that returns a time window (start, end tuple) centered at the current instant and with a length of as many minutes as specified as a parameter. By default, the lenght is 10 minutes and the center of the window is the execution instant. Args: center: datetime.datetime object that defines the center of the window duration: datetime.timedelta object with the duration of the window no_microseconds: flag that indicates whether the microseconds should be included in the window tuple or not Returns: (start, end) tuple that defines the window """ if not center: center = get_now_utc(no_microseconds=no_microseconds) if not duration: duration = datetime.timedelta(minutes=5) return center - duration, center + duration def get_now_hour_utc(no_microseconds=True): """ This method returns now's hour in the UTC timezone. :param no_microseconds: sets whether microseconds should be cleared. :return: The time object within the UTC timezone. """ if no_microseconds: return datetime.datetime.utcnow().replace(microsecond=0).time() else: return datetime.datetime.utcnow().time() def get_today_utc(): """ This method returns today's date localized with the microseconds set to zero. :return: the just created datetime object with today's date. """ return pytz.utc.localize(datetime.datetime.utcnow()).replace( hour=0, minute=0, second=0, microsecond=0 ) def get_next_midnight(): """ This method returns today's datetime 00am. :return: the just created datetime object with today's datetime 00am. TODO :: unit test """ return pytz.utc.localize(datetime.datetime.today()).replace( hour=0, minute=0, second=0, microsecond=0 ) + datetime.timedelta(days=1) def localize_date_utc(date): """ Localizes in the UTC timezone the given date object. :param date: The date object to be localized. :return: A localized datetime object in the UTC timezone. TODO :: unit test """ return pytz.utc.localize( datetime.datetime.combine( date, datetime.time(hour=0, minute=0, second=0) ) ) TIMESTAMP_0 = localize_date_utc(datetime.datetime(year=1970, month=1, day=1)) def get_utc_timestamp(utc_datetime=None): """ Returns a timestamp with the number of microseconds ellapsed since January 1st of 1970 for the given datetime object, UTC localized. :param utc_datetime: The datetime whose timestamp is to be calculated. :return: The number of miliseconds since 1.1.1970, UTC localized (integer) """ if utc_datetime is None: utc_datetime = get_now_utc() diff = utc_datetime - TIMESTAMP_0 return int(diff.total_seconds() * 10**6)
30.748148
78
0.688991
2832742f4b503c46d1f0a267a28c5b2b06f21e83
2,455
py
Python
CoC/CoC_Default_CmdSets.py
macorvalan/MyGame
29a14bcb1ffb11b158d325112d5698107d8f1188
[ "Unlicense" ]
null
null
null
CoC/CoC_Default_CmdSets.py
macorvalan/MyGame
29a14bcb1ffb11b158d325112d5698107d8f1188
[ "Unlicense" ]
null
null
null
CoC/CoC_Default_CmdSets.py
macorvalan/MyGame
29a14bcb1ffb11b158d325112d5698107d8f1188
[ "Unlicense" ]
null
null
null
""" """ from evennia import default_cmds from evennia import CmdSet
24.068627
72
0.627291
2832f2cc2b9e737de1d148d89d82f9fe93379119
6,300
py
Python
dit/inference/counts.py
leoalfonso/dit
e7d5f680b3f170091bb1e488303f4255eeb11ef4
[ "BSD-3-Clause" ]
1
2021-03-15T08:51:42.000Z
2021-03-15T08:51:42.000Z
dit/inference/counts.py
leoalfonso/dit
e7d5f680b3f170091bb1e488303f4255eeb11ef4
[ "BSD-3-Clause" ]
null
null
null
dit/inference/counts.py
leoalfonso/dit
e7d5f680b3f170091bb1e488303f4255eeb11ef4
[ "BSD-3-Clause" ]
null
null
null
""" Non-cython methods for getting counts and distributions from data. """ import numpy as np try: # cython from .pycounts import counts_from_data, distribution_from_data except ImportError: # no cython from boltons.iterutils import windowed_iter from collections import Counter, defaultdict from itertools import product from .. import modify_outcomes from ..exceptions import ditException def counts_from_data(data, hLength, fLength, marginals=True, alphabet=None, standardize=True): """ Returns conditional counts from `data`. To obtain counts for joint distribution only, use fLength=0. Parameters ---------- data : NumPy array The data used to calculate morphs. Note: `data` cannot be a generator. Also, if standardize is True, then data can be any indexable iterable, such as a list or tuple. hLength : int The maxmimum history word length used to calculate morphs. fLength : int The length of future words that defines the morph. marginals : bool If True, then the morphs for all histories words from L=0 to L=hLength are calculated. If False, only histories of length L=hLength are calculated. alphabet : list The alphabet to use when creating the morphs. If `None`, then one is obtained from `data`. If not `None`, then the provided alphabet supplements what appears in the data. So the data is always scanned through in order to get the proper alphabet. standardize : bool The algorithm requires that the symbols in data be standardized to a canonical alphabet consisting of integers from 0 to k-1, where k is the alphabet size. If `data` is already standard, then an extra pass through the data can be avoided by setting `standardize` to `False`, but note: if `standardize` is False, then data MUST be a NumPy array. Returns ------- histories : list A list of observed histories, corresponding to the rows in `cCounts`. cCounts : NumPy array A NumPy array representing conditional counts. The rows correspond to the observed histories, so this is sparse. The number of rows in this array cannot be known in advance, but the number of columns will be equal to the alphabet size raised to the `fLength` power. hCounts : NumPy array A 1D array representing the count of each history word. alphabet : tuple The ordered tuple representing the alphabet of the data. If `None`, the one is created from the data. Notes ----- This requires three complete passes through the data. One to obtain the full alphabet. Another to standardize the data. A final pass to obtain the counts. This is implemented densely. So during the course of the algorithm, we work with a large array containing a row for each possible history. Only the rows corresponding to observed histories are returned. """ try: data = list(map(tuple, data)) except TypeError: pass counts = Counter(windowed_iter(data, hLength+fLength)) cond_counts = defaultdict(lambda: defaultdict(int)) for word, count in counts.items(): cond_counts[word[:hLength]][word[hLength:]] += count histories = sorted(counts.keys()) alphabet = set(alphabet) if alphabet is not None else set() alphabet = tuple(sorted(alphabet.union(*[set(hist) for hist in histories]))) cCounts = np.empty((len(histories), len(alphabet)**fLength)) for i, hist in enumerate(histories): for j, future in enumerate(product(alphabet, repeat=fLength)): cCounts[i, j] = cond_counts[hist][future] hCounts = cCounts.sum(axis=1) return histories, cCounts, hCounts, alphabet def distribution_from_data(d, L, trim=True, base=None): """ Returns a distribution over words of length `L` from `d`. The returned distribution is the naive estimate of the distribution, which assigns probabilities equal to the number of times a particular word appeared in the data divided by the total number of times a word could have appeared in the data. Roughly, it corresponds to the stationary distribution of a maximum likelihood estimate of the transition matrix of an (L-1)th order Markov chain. Parameters ---------- d : list A list of symbols to be converted into a distribution. L : integer The length of the words for the distribution. trim : bool If true, then words with zero probability are trimmed from the distribution. base : int or string The desired base of the returned distribution. If `None`, then the value of `dit.ditParams['base']` is used. """ from dit import ditParams, Distribution try: d = list(map(tuple, d)) except TypeError: pass if base is None: base = ditParams['base'] words, _, counts, _ = counts_from_data(d, L, 0) # We turn the counts to probabilities pmf = counts/counts.sum() dist = Distribution(words, pmf, trim=trim) dist.set_base(base) if L == 1: try: dist = modify_outcomes(dist, lambda o: o[0]) except ditException: pass return dist def get_counts(data, length): """ Count the occurrences of all words of `length` in `data`. Parameters ---------- data : iterable The sequence of samples length : int The length to group samples into. Returns ------- counts : np.array Array with the count values. """ hists, _, counts, _ = counts_from_data(data, length, 0) mask = np.array([len(h) == length for h in hists]) counts = counts[mask] return counts
35
98
0.619524
28336284c8b0c58eec05e4f7f5c39c75af17be88
1,845
py
Python
tests/test_get_meetings.py
GeorgianBadita/Dronem-gym-envirnoment
f3b488f6a4b55722c4b129051555a68d7775278c
[ "MIT" ]
5
2020-06-13T10:43:42.000Z
2022-01-25T10:37:32.000Z
tests/test_get_meetings.py
GeorgianBadita/Dronem-gym-envirnoment
f3b488f6a4b55722c4b129051555a68d7775278c
[ "MIT" ]
null
null
null
tests/test_get_meetings.py
GeorgianBadita/Dronem-gym-envirnoment
f3b488f6a4b55722c4b129051555a68d7775278c
[ "MIT" ]
null
null
null
""" @author: Badita Marin-Georgian @email: geo.badita@gmail.com @date: 21.03.2020 00:58 """ from env_interpretation import Meeting from env_interpretation.meeting import get_valid_meetings
38.4375
104
0.656369
2834130633642f13cf991cd575cca24813206c77
1,670
py
Python
python/test/environment/test_reset.py
stacyvjong/PandemicSimulator
eca906f5dc8135d7c90a1582b96621235f745c17
[ "Apache-2.0" ]
null
null
null
python/test/environment/test_reset.py
stacyvjong/PandemicSimulator
eca906f5dc8135d7c90a1582b96621235f745c17
[ "Apache-2.0" ]
null
null
null
python/test/environment/test_reset.py
stacyvjong/PandemicSimulator
eca906f5dc8135d7c90a1582b96621235f745c17
[ "Apache-2.0" ]
null
null
null
# Confidential, Copyright 2020, Sony Corporation of America, All rights reserved. import copy import numpy as np from pandemic_simulator.environment import CityRegistry, Home, GroceryStore, Office, School, Hospital, PopulationParams, \ LocationParams from pandemic_simulator.script_helpers import make_standard_locations, make_us_age_population tiny_population_params = PopulationParams( num_persons=10, location_type_to_params={ Home: LocationParams(num=3), GroceryStore: LocationParams(num=1, worker_capacity=5, visitor_capacity=30), Office: LocationParams(num=1, worker_capacity=200, visitor_capacity=0), School: LocationParams(num=1, worker_capacity=40, visitor_capacity=300), Hospital: LocationParams(num=1, worker_capacity=30, visitor_capacity=2), })
36.304348
122
0.742515
28343120a82f0ad353610fd53956f8cb3bf271dc
1,008
py
Python
Groups/Group_ID_6/SIFT_and_RESIFT/Code_files/sift.py
sonaldangi12/DataScience
3d7cd529a96f37c2ef179ee408e2c6d8744d746a
[ "MIT" ]
5
2020-12-13T07:53:22.000Z
2020-12-20T18:49:27.000Z
Groups/Group_ID_6/SIFT_and_RESIFT/Code_files/sift.py
Gulnaz-Tabassum/DataScience
1fd771f873a9bc0800458fd7c05e228bb6c4e8a0
[ "MIT" ]
null
null
null
Groups/Group_ID_6/SIFT_and_RESIFT/Code_files/sift.py
Gulnaz-Tabassum/DataScience
1fd771f873a9bc0800458fd7c05e228bb6c4e8a0
[ "MIT" ]
24
2020-12-12T11:23:28.000Z
2021-10-04T13:09:38.000Z
from libs import *
56
128
0.779762
283446254a407be87bc62c4c4206eacf19fcc853
651
py
Python
Q14__/23_Maximum_Points_You_Can_Obtain_from_Cards/Solution.py
hsclinical/leetcode
48a57f6a5d5745199c5685cd2c8f5c4fa293e54a
[ "Apache-2.0" ]
null
null
null
Q14__/23_Maximum_Points_You_Can_Obtain_from_Cards/Solution.py
hsclinical/leetcode
48a57f6a5d5745199c5685cd2c8f5c4fa293e54a
[ "Apache-2.0" ]
null
null
null
Q14__/23_Maximum_Points_You_Can_Obtain_from_Cards/Solution.py
hsclinical/leetcode
48a57f6a5d5745199c5685cd2c8f5c4fa293e54a
[ "Apache-2.0" ]
null
null
null
from typing import List
32.55
79
0.506912
28349545dacdb38c6ebe53a67d01ff333f29fa0c
1,192
py
Python
utils/editResult.py
JasonHippo/Scene_text_detection_and_recognition
c0da141d71b7b888d560296b201aecbbd735b565
[ "MIT" ]
4
2021-12-27T14:37:33.000Z
2022-03-30T10:56:57.000Z
utils/editResult.py
JasonHippo/Scene_text_detection_and_recognition
c0da141d71b7b888d560296b201aecbbd735b565
[ "MIT" ]
null
null
null
utils/editResult.py
JasonHippo/Scene_text_detection_and_recognition
c0da141d71b7b888d560296b201aecbbd735b565
[ "MIT" ]
null
null
null
import pandas as pd import argparse badword = ["!", "$","%","&","'","(",")","*","+",",","-",".","/",":",";","<","=",">","?","@","[","/","]","^","_","`","{","|","}","~","",""] if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--path', help='the path of file') opt = parser.parse_args() print(len(badword)) df = pd.read_csv(opt.path) print(len(df)) nan_list = list() for i in range(len(df)): row = df.loc[i] print(i) print(row) try: if any(bad_word in row['pred'] for bad_word in badword): ss = row['pred'] for j in range(len(badword)): ss = ss.replace(badword[j],"") if ss =="": df.loc[i,'pred']= "" print(df.loc[i]) continue df.loc[i,'pred']= ss except: nan_list.append(i) df = df.drop(nan_list) df = df.drop(df.loc[df['pred']==''].index) print(df.head) df.to_csv("{}_post.csv".format(opt.path.split('.csv')[0]),index=False,encoding="utf-8")
32.216216
141
0.428691
28360d264e9f210900fd3d3f89893b647f81343f
133
py
Python
Service/Ipv4_stun/startup.py
zlf7735268/Tenet
54005ad5d17b5d1f5ef4cc04aa6eb7939e58c2c5
[ "Apache-2.0" ]
2
2021-12-17T01:21:19.000Z
2021-12-17T14:49:42.000Z
Service/Ipv4_stun/startup.py
zlf7735268/Tenet
54005ad5d17b5d1f5ef4cc04aa6eb7939e58c2c5
[ "Apache-2.0" ]
null
null
null
Service/Ipv4_stun/startup.py
zlf7735268/Tenet
54005ad5d17b5d1f5ef4cc04aa6eb7939e58c2c5
[ "Apache-2.0" ]
null
null
null
from Ipv4_stun.transfer import Transfer #m=Transfer(address=('172.16.0.156',9080)) m=Transfer(address=('127.0.0.1', 82)) m.run()
26.6
43
0.691729
2837b7ad00fad751653116c498c45a40929e5a19
4,030
py
Python
generator/season.py
fraziermatthew/njba
acedec351543b8ecf339beb2d27c635f3377e929
[ "MIT" ]
null
null
null
generator/season.py
fraziermatthew/njba
acedec351543b8ecf339beb2d27c635f3377e929
[ "MIT" ]
null
null
null
generator/season.py
fraziermatthew/njba
acedec351543b8ecf339beb2d27c635f3377e929
[ "MIT" ]
null
null
null
"""season.py: Generates random NJBA season data.""" __author__ = "Matthew Frazier" __copyright__ = "Copyright 2019, University of Delaware, CISC 637 Database Systems" __email__ = "matthew@udel.edu" from datetime import timedelta import calendar import csv ''' Steps to run this project: 1. Create a virtual env and activate source virtualenv -p python3 . ./bin/activate 2. Install names PyPi Module - https://pypi.org/project/names/ pip install names 3. Run the project python3 generate-seasons.py ''' numOfSeasons = 50 seasonType = ["Pre", "Regular", "Post"] id = 1 cal = calendar.Calendar(firstweekday = calendar.SUNDAY) year = 2019 # Start Year # month = 10 # October # month2 = 4 # April # month3 = 6 # June with open('data/seasons2.csv', mode = 'w') as season_file: season_writer = csv.writer(season_file, delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL) for j in range(numOfSeasons): for index in range(len(seasonType)): # id, start-date, end-date, start-year, end-year, seasonType # Create the season list season = [] # monthcal = cal.monthdatescalendar(year,month) if (seasonType[index] == "Pre"): monthcal = cal.monthdatescalendar(year, 9) elif (seasonType[index] == "Regular"): monthcal = cal.monthdatescalendar(year, 10) else: monthcal = cal.monthdatescalendar(year + 1, 4) # ID season.append(id) if (seasonType[index] == "Pre"): # Pre Season # Start date is 4th Saturday of every September start_date = [day for week in monthcal for day in week if \ day.weekday() == calendar.SATURDAY][3] # Start date season.append(start_date) # End date is 3rd Monday of every October monthcal = cal.monthdatescalendar(year, 10) end_date = [day for week in monthcal for day in week if \ day.weekday() == calendar.TUESDAY][2] end_date = end_date - timedelta(days = 1) # End date season.append(end_date) if (seasonType[index] == "Regular"): # Regular Season # Start date is 3rd Tuesday of every October start_date = [day for week in monthcal for day in week if \ day.weekday() == calendar.TUESDAY][2] # Start date season.append(start_date) # End date is 2nd Wednesday of every April monthcal2 = cal.monthdatescalendar(year + 1, 4) end_date = [day for week in monthcal2 for day in week if \ day.weekday() == calendar.WEDNESDAY][1] # End date season.append(end_date) if (seasonType[index] == "Post"): # Post Season # Start date is 2nd Thursday of every April start_date = [day for week in monthcal2 for day in week if \ day.weekday() == calendar.WEDNESDAY][1] start_date = start_date + timedelta(days = 1) # Start date season.append(start_date) # End date is 3rd Tursday of every June monthcal = cal.monthdatescalendar(year + 1, 6) end_date = [day for week in monthcal for day in week if \ day.weekday() == calendar.THURSDAY][2] # End date season.append(end_date) # # Year Abbreviation # abbr = str(year + 1) # season.append(str(year) + "-" + str(year + 1)) # seasonType season.append(seasonType[index]) id += 1 season_writer.writerow(season) year += 1
33.032787
106
0.536476
2837e96df35c3511f85fcf1e1d1d5915d5541eac
2,746
py
Python
src/pace/allele_similarity.py
tmadden/pace
0be5d92579efc4e6219f5c58bb4e4ac6754e865e
[ "MIT" ]
null
null
null
src/pace/allele_similarity.py
tmadden/pace
0be5d92579efc4e6219f5c58bb4e4ac6754e865e
[ "MIT" ]
9
2019-01-16T15:13:37.000Z
2019-07-29T18:31:58.000Z
src/pace/allele_similarity.py
tmadden/pace
0be5d92579efc4e6219f5c58bb4e4ac6754e865e
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd import pace from pace.definitions import amino_acids, builtin_aa_encodings, builtin_allele_similarities from pace.sklearn import create_one_hot_encoder from pkg_resources import resource_stream def get_allele_similarity_mat(allele_similarity_name): """ Get a matrix of pre-computed allele similarities Parameters ---------- allele_similarity_name : str Pre-computed allele similarity matrices are availble based on observed peptide binding motifs ('motifs') or HLA protein binding pocket residues ('pockets'). Returns ------- pandas.core.frame.DataFrame allele similarity matrix """ return load_allele_similarity(allele_similarity_name) def get_similar_alleles(allele_similarity_name, allele, similarity_threshold): """ Get the most similar alleles to a given allele, based on a specified allele similarity matrix and similarity threshold. Parameters ---------- allele_similarity_name : str Pre-computed allele similarity matrices are availble based on observed peptide binding motifs ('motifs') or HLA protein binding pocket residues ('pockets'). allele : str The allele for which to determine similar alleles similarity_threshold Numerical threhosld value that determins the cutoff for considering an allele similar to the given allele. Returns ------- pandas.core.frame.DataFrame The similar alleles satisfying the specifid threshold along with the numerical similarity values. Note that the given allele is also returned. """ assert(allele_similarity_name in builtin_allele_similarities) allele_similarity = get_allele_similarity_mat(allele_similarity_name) similar_alleles = allele_similarity[allele] if allele_similarity_name == 'motifs': # higher values => more similar alleles similar_alleles_thr = similar_alleles[similar_alleles > similarity_threshold] similar_alleles_thr = similar_alleles_thr[(similar_alleles_thr*-1).argsort()] if allele_similarity_name == 'pockets': # higher values => less similar alleles similar_alleles_thr = similar_alleles[similar_alleles < similarity_threshold] similar_alleles_thr = similar_alleles_thr[similar_alleles_thr.argsort()] return similar_alleles_thr.to_frame()
36.131579
101
0.717407
2838bf018ab619624c31ab34ebae1fd0c469063d
13,884
py
Python
client/forms/reader_form.py
zhmzlzn/Network-Pj-BookReader
891d395f2db464f4d4e7b84dd03c3cddebbafd30
[ "MIT" ]
6
2019-11-28T10:47:46.000Z
2021-11-04T08:22:56.000Z
client/forms/reader_form.py
zhmzlzn/python-BookReader
891d395f2db464f4d4e7b84dd03c3cddebbafd30
[ "MIT" ]
null
null
null
client/forms/reader_form.py
zhmzlzn/python-BookReader
891d395f2db464f4d4e7b84dd03c3cddebbafd30
[ "MIT" ]
4
2019-12-17T15:29:22.000Z
2021-05-28T16:39:51.000Z
import tkinter as tk from tkinter import * from tkinter import messagebox from tkinter.simpledialog import askinteger from protocol.secure_transmission.secure_channel import establish_secure_channel_to_server from protocol.message_type import MessageType from protocol.data_conversion.from_byte import deserialize_message from client.memory import current_user import client.memory
43.118012
129
0.584054
2838cb9fb3068b931f43ec405489f94b2abb45c7
3,285
py
Python
akispy/__init__.py
ryanleland/Akispy
dbbb85a1d1b027051e11179289cc9067cb90baf6
[ "MIT" ]
null
null
null
akispy/__init__.py
ryanleland/Akispy
dbbb85a1d1b027051e11179289cc9067cb90baf6
[ "MIT" ]
2
2017-05-19T21:59:04.000Z
2021-06-25T15:28:07.000Z
akispy/__init__.py
ryanleland/Akispy
dbbb85a1d1b027051e11179289cc9067cb90baf6
[ "MIT" ]
1
2017-05-18T05:23:47.000Z
2017-05-18T05:23:47.000Z
#!/usr/bin/env python """Light weight python client for Akismet API.""" __title__ = 'akispy' __version__ = '0.2' __author__ = 'Ryan Leland' __copyright__ = 'Copyright 2012 Ryan Leland' import http.client, urllib.request, urllib.parse, urllib.error
30.990566
101
0.557991
283a11f41a96444be50edc2390d31671ac936bd1
1,599
py
Python
scripts/extract_mfcc.py
xavierfav/coala
b791ad6bb5c4f7b8f8f8fa8e0c5bd5b89b0ecbc3
[ "MIT" ]
34
2020-06-12T15:54:22.000Z
2021-12-16T08:16:45.000Z
scripts/extract_mfcc.py
xavierfav/ae-w2v-attention
8039c056ad365769bdf8d77d6292d4f3cfb957a4
[ "MIT" ]
3
2020-06-22T09:06:27.000Z
2021-07-10T09:58:30.000Z
scripts/extract_mfcc.py
xavierfav/coala
b791ad6bb5c4f7b8f8f8fa8e0c5bd5b89b0ecbc3
[ "MIT" ]
4
2020-10-23T03:29:35.000Z
2021-08-19T09:31:57.000Z
""" This script is used to compute mffc features for target task datasets. Warning: Need manual editing for switching datasets """ import os import librosa import soundfile as sf import numpy as np from tqdm import tqdm from pathlib import Path FILES_LOCATION = '../data/UrbanSound8K/audio' FILES_LOCATION = '../data/GTZAN/genres' SAVE_LOCATION = '../data/embeddings/gtzan/mfcc' SAVE_LOCATION = '../data/embeddings/nsynth/test/mfcc' if __name__ == "__main__": # p = Path(FILES_LOCATION) # filenames = p.glob('**/*.wav') # # filenames = p.glob('*') p = Path('../data/nsynth/nsynth-test/audio') filenames = p.glob('*.wav') for f in tqdm(filenames): try: y = compute_mfcc(str(f)) np.save(Path(SAVE_LOCATION, str(f.stem)+'.npy'), y) except RuntimeError as e: print(e, f)
30.75
89
0.632896
283a66142e84ec8f0ef344d826e481e7692288fc
391
py
Python
sqli/__main__.py
everilae/sqli
8a63076dc8316b38ce521b63e67bea8d2ccf2a80
[ "MIT" ]
null
null
null
sqli/__main__.py
everilae/sqli
8a63076dc8316b38ce521b63e67bea8d2ccf2a80
[ "MIT" ]
1
2017-10-22T11:13:58.000Z
2020-06-01T09:20:20.000Z
sqli/__main__.py
everilae/sqli
8a63076dc8316b38ce521b63e67bea8d2ccf2a80
[ "MIT" ]
null
null
null
import argparse import astunparse import sys from . import check parser = argparse.ArgumentParser() parser.add_argument("file", nargs="?", type=argparse.FileType("r"), default=sys.stdin) args = parser.parse_args() poisoned = check(args.file.read()) print("Possible SQL injections:") for p in poisoned: print("line {}: {}".format(p.get_lineno(), p.get_source()))
23
67
0.682864
283aae358baff3c73b726efc887ef653ab678494
318
py
Python
rammstein.py
wildekek/rammstein-generator
fc7ef34260c4dddaba01ff4c964349e13bd4bf1a
[ "MIT" ]
3
2015-10-11T15:39:30.000Z
2019-06-18T19:20:00.000Z
rammstein.py
wildekek/rammstein-generator
fc7ef34260c4dddaba01ff4c964349e13bd4bf1a
[ "MIT" ]
null
null
null
rammstein.py
wildekek/rammstein-generator
fc7ef34260c4dddaba01ff4c964349e13bd4bf1a
[ "MIT" ]
1
2019-06-16T21:49:16.000Z
2019-06-16T21:49:16.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- a='Du hast mich' b='Du, du hast'+'\n'+a c=a+' gefragt' d=j([b,b,'']) e=j(['Willst du bis der Tod euch scheidet','Treurig sein fr alle Tage?','Nein, nein!','']) f=j([d,b,a,'',j([c,c,c,'Und ich hab nichts gesagt','']),e,e]) print j([d,f,f,e])
28.909091
91
0.566038
283bc06264c86314c6694bec3801d78f4ce49fef
799
py
Python
tests/test_06_reporting_classes.py
themperek/pyuvm
12abf6b0a631321c0fcce6ebbc04b8cc9900c6a8
[ "Apache-2.0" ]
null
null
null
tests/test_06_reporting_classes.py
themperek/pyuvm
12abf6b0a631321c0fcce6ebbc04b8cc9900c6a8
[ "Apache-2.0" ]
null
null
null
tests/test_06_reporting_classes.py
themperek/pyuvm
12abf6b0a631321c0fcce6ebbc04b8cc9900c6a8
[ "Apache-2.0" ]
null
null
null
import pyuvm_unittest from pyuvm import *
34.73913
68
0.524406
283c7a1056bf5070026e3e0618dc2649d53dd2ec
203
py
Python
sucursal_crud/sucursal_crud_api/models/puntoDeRetiro.py
cassa10/challenge-api
0bcc4f38b049f930faca45b80d869835650e2a23
[ "MIT" ]
null
null
null
sucursal_crud/sucursal_crud_api/models/puntoDeRetiro.py
cassa10/challenge-api
0bcc4f38b049f930faca45b80d869835650e2a23
[ "MIT" ]
null
null
null
sucursal_crud/sucursal_crud_api/models/puntoDeRetiro.py
cassa10/challenge-api
0bcc4f38b049f930faca45b80d869835650e2a23
[ "MIT" ]
null
null
null
from django.db import models from django.core.validators import MinValueValidator from .nodo import Nodo
33.833333
70
0.817734
283cfb68139e552afe4cbfabaafdcde926934b65
8,429
py
Python
trinity/sync/header/chain.py
g-r-a-n-t/trinity
f108b6cd34ed9aabfcf9e235badd91597650ecd5
[ "MIT" ]
1
2021-04-07T07:33:28.000Z
2021-04-07T07:33:28.000Z
trinity/sync/header/chain.py
g-r-a-n-t/trinity
f108b6cd34ed9aabfcf9e235badd91597650ecd5
[ "MIT" ]
null
null
null
trinity/sync/header/chain.py
g-r-a-n-t/trinity
f108b6cd34ed9aabfcf9e235badd91597650ecd5
[ "MIT" ]
null
null
null
import asyncio from typing import Sequence from async_service import Service, background_asyncio_service from eth.abc import BlockHeaderAPI from eth.exceptions import CheckpointsMustBeCanonical from eth_typing import BlockNumber from trinity._utils.pauser import Pauser from trinity.chains.base import AsyncChainAPI from trinity.db.eth1.chain import BaseAsyncChainDB from trinity.protocol.eth.peer import ETHPeerPool from trinity.protocol.eth.sync import ETHHeaderChainSyncer from trinity._utils.logging import get_logger from trinity.sync.common.checkpoint import Checkpoint from trinity.sync.common.constants import ( MAX_BACKFILL_HEADERS_AT_ONCE, MAX_SKELETON_REORG_DEPTH, ) from trinity.sync.common.headers import persist_headers from trinity.sync.common.strategies import ( FromCheckpointLaunchStrategy, FromGenesisLaunchStrategy, FromBlockNumberLaunchStrategy, SyncLaunchStrategyAPI, )
37.629464
97
0.626409
283d50169f9d4063fc968899a7356c0ef91c4024
2,436
py
Python
plato/clients/scaffold.py
iQua/plato
76fdac06af8b4d85922cd12749b4a687e3161745
[ "Apache-2.0" ]
null
null
null
plato/clients/scaffold.py
iQua/plato
76fdac06af8b4d85922cd12749b4a687e3161745
[ "Apache-2.0" ]
null
null
null
plato/clients/scaffold.py
iQua/plato
76fdac06af8b4d85922cd12749b4a687e3161745
[ "Apache-2.0" ]
1
2021-05-18T15:03:32.000Z
2021-05-18T15:03:32.000Z
""" A federated learning client using SCAFFOLD. Reference: Karimireddy et al., "SCAFFOLD: Stochastic Controlled Averaging for Federated Learning" (https://arxiv.org/pdf/1910.06378.pdf) """ import os from dataclasses import dataclass import torch from plato.clients import simple
35.304348
93
0.694992
283e3728c74f274d987dc75b56ca98081fe4485b
2,307
py
Python
test/test.py
SK-415/bilireq
ce4dfa2ae05a88291162907b86caf29ab868bedc
[ "MIT" ]
2
2021-10-20T06:32:35.000Z
2022-03-26T11:40:07.000Z
test/test.py
SK-415/bilireq
ce4dfa2ae05a88291162907b86caf29ab868bedc
[ "MIT" ]
1
2021-12-06T01:37:08.000Z
2021-12-06T01:37:08.000Z
test/test.py
SK-415/bilireq
ce4dfa2ae05a88291162907b86caf29ab868bedc
[ "MIT" ]
null
null
null
import asyncio import sys from pathlib import Path sys.path.append(str(Path(__file__).parent.parent)) from bilireq.auth import Auth from bilireq.dynamic import get_user_dynamics, get_followed_dynamics_update_info, get_followed_new_dynamics, get_followed_history_dynamics from bilireq.live import get_rooms_info_by_ids from bilireq.login import Login, get_token_info, refresh_token from bilireq.user import get_user_info from test_data import AUTH, PASSWORD, PHONE, UID, USERNAME asyncio.run(main())
30.355263
138
0.704378
283f0444ed2c9cb2e8181317df155e9ffdbf38c6
1,105
py
Python
scripts/pgmviz.py
anindex/auto_localization
e8acc6fb4a4221115e2d4f9ba87fd077ad741b70
[ "MIT" ]
1
2020-09-03T14:29:27.000Z
2020-09-03T14:29:27.000Z
scripts/pgmviz.py
anindex/auto_localization
e8acc6fb4a4221115e2d4f9ba87fd077ad741b70
[ "MIT" ]
null
null
null
scripts/pgmviz.py
anindex/auto_localization
e8acc6fb4a4221115e2d4f9ba87fd077ad741b70
[ "MIT" ]
2
2019-09-26T15:20:37.000Z
2021-07-14T11:00:49.000Z
import re import numpy def read_pgm(filename, byteorder='>'): """Return image data from a raw PGM file as numpy array. Format specification: http://netpbm.sourceforge.net/doc/pgm.html """ with open(filename, 'rb') as f: buffer = f.read() try: header, width, height, maxval = re.search( b"(^P5\s(?:\s*#.*[\r\n])*" b"(\d+)\s(?:\s*#.*[\r\n])*" b"(\d+)\s(?:\s*#.*[\r\n])*" b"(\d+)\s(?:\s*#.*[\r\n]\s)*)", buffer).groups() except AttributeError: raise ValueError("Not a raw PGM file: '%s'" % filename) return numpy.frombuffer(buffer, dtype='u1' if int(maxval) < 256 else byteorder+'u2', count=int(width)*int(height), offset=len(header) ).reshape((int(height), int(width))) if __name__ == "__main__": from matplotlib import pyplot image = read_pgm("/home/anindex/robotics_ws/src/krp_localization/maps/test.pgm", byteorder='<') pyplot.imshow(image, pyplot.cm.gray) pyplot.show()
34.53125
99
0.529412
283fe692e8590b67f92e91991108dc8259bb2861
406
py
Python
sgnlp/models/span_extraction/__init__.py
vincenttzc/sgnlp
44ae12a5ae98c9a1945d346e9373854c7d472a4b
[ "MIT" ]
null
null
null
sgnlp/models/span_extraction/__init__.py
vincenttzc/sgnlp
44ae12a5ae98c9a1945d346e9373854c7d472a4b
[ "MIT" ]
null
null
null
sgnlp/models/span_extraction/__init__.py
vincenttzc/sgnlp
44ae12a5ae98c9a1945d346e9373854c7d472a4b
[ "MIT" ]
null
null
null
from .config import RecconSpanExtractionConfig from .tokenization import RecconSpanExtractionTokenizer from .modeling import RecconSpanExtractionModel from .preprocess import RecconSpanExtractionPreprocessor from .postprocess import RecconSpanExtractionPostprocessor from .train import train from .eval import evaluate from .utils import load_examples from .data_class import RecconSpanExtractionArguments
40.6
58
0.889163
2841fd174e918cba71310ebfefa31472caa5fe2f
331
py
Python
pandoc-starter/MarkTex/marktex/config.py
riciche/SimpleCVReproduction
4075de39f9c61f1359668a413f6a5d98903fcf97
[ "Apache-2.0" ]
923
2020-01-11T06:36:53.000Z
2022-03-31T00:26:57.000Z
pandoc-starter/MarkTex/marktex/config.py
riciche/SimpleCVReproduction
4075de39f9c61f1359668a413f6a5d98903fcf97
[ "Apache-2.0" ]
25
2020-02-27T08:35:46.000Z
2022-01-25T08:54:19.000Z
pandoc-starter/MarkTex/marktex/config.py
riciche/SimpleCVReproduction
4075de39f9c61f1359668a413f6a5d98903fcf97
[ "Apache-2.0" ]
262
2020-01-02T02:19:40.000Z
2022-03-23T04:56:16.000Z
'''Don't change the basic param''' import os '''prog path''' config_path = os.path.split(__file__)[0] marktemp_path = os.path.join(config_path,"markenv.tex") '''tools setting''' image_download_retry_time = 10 # # wait_manully_if_all_failed = False # tex give_rele_path = True
23.642857
55
0.770393
28420d9ddb5dd0a224753623044f62aac5eba76f
669
py
Python
ProgramsToRead/ExercisesLists/Lista05/Exer12Lista05.py
ItanuRomero/PythonStudyPrograms
2b784b2af068b34e65ddf817ca8d99c1ca3a710e
[ "MIT" ]
null
null
null
ProgramsToRead/ExercisesLists/Lista05/Exer12Lista05.py
ItanuRomero/PythonStudyPrograms
2b784b2af068b34e65ddf817ca8d99c1ca3a710e
[ "MIT" ]
null
null
null
ProgramsToRead/ExercisesLists/Lista05/Exer12Lista05.py
ItanuRomero/PythonStudyPrograms
2b784b2af068b34e65ddf817ca8d99c1ca3a710e
[ "MIT" ]
null
null
null
# Questo 12. Construa uma funo que receba uma string como parmetro # e devolva outra string com os carateres emba- ralhados. Por exemplo: # se funo receber a palavra python, pode retornar npthyo, ophtyn ou # qualquer outra combinao possvel, de forma aleatria. Padronize em # sua funo que todos os caracteres sero devolvidos em caixa alta ou # caixa baixa, independentemente de como foram digitados. import random palavra = input('Digite uma palavra: ') embaralha(palavra)
37.166667
71
0.751868
28426aef923f9eca775a9b7c35cef0f1597b7c28
54,872
py
Python
DataScience/Numpy.py
AlPus108/Python_lessons
0e96117d9a8b76fd651e137fc126ddedaa6accd9
[ "MIT" ]
null
null
null
DataScience/Numpy.py
AlPus108/Python_lessons
0e96117d9a8b76fd651e137fc126ddedaa6accd9
[ "MIT" ]
null
null
null
DataScience/Numpy.py
AlPus108/Python_lessons
0e96117d9a8b76fd651e137fc126ddedaa6accd9
[ "MIT" ]
null
null
null
# Numpy # NumPy: https://numpy.org/doc/stable/ # NumPy https://numpy.org/devdocs/reference/ufuncs.html # NumPy - / . # # . , (), , # . . list, # numpy-, . . # numpy , . . # , numpy-. 60% - numpy 40% - . . # . # , , . # ? , # . # import numpy as np # ------------------- ------------------------ # ------------------------------- array() ------------------------------ # # , , , , . # Numpy- . # a = [2, 4, 5] # , ndarray (numpy_data_array) - numpy # numpy- a_numpy = np.array(a) # . list numpy-list # 'a' . # , # a = np.array([2, 4, 5]) - # print(type(a_numpy)) # <class 'numpy.ndarray'> - , , - . # numpy, , - dtype() print(a_numpy.dtype) # int32 # . int32. 32 - . # 16 64. . , int - . # a_numpy - numpy-. # print(a_numpy) # [2 4 5] # , numpy . # , . # float b = [2, 3.14] # numpy b_numpy = np.array(b) print(b_numpy.dtype) # float64 - 64- . print(b_numpy) # [2. 3.14] # , int float # float # c = [2, 3.14, 'kotiki'] # numpy c_numpy = np.array(c) print(c_numpy) # ['2' '3.14' 'kotiki'] # str # numpy . # float int , str int float. # , . print(c_numpy.dtype) # <U32 - # , my_list = [2, 3.14, 'kotiki', [2, 3, 4]] # . # numpy- my_list_numpy = np.array(my_list) print(my_list_numpy) # [2 3.14 'kotiki' list([2, 3, 4])] - - . print(my_list_numpy.dtype) # object # numpy- . # numpy- - Object - numpy. # int # np.array([2, 3.14, 'kotiki', [2,3,4]], dtype='int64') # : ValueError: invalid literal for int() with base 10: 'kotiki' # 'kotiki' . # list, , . # ----------------------------- --------------------------- # NumPy , . # , int ( ). , numpy # , np.int8, np.int16... # . a_python = 123 # numpy a_numpy = np.int32(123) # int - , 32 - - 4 . # ? . - 4 . # 64 - . , 132 , 8 # print(type(a_python)) # <class 'int'> - int print(type(a_numpy)) # <class 'numpy.int32'> - int numpy # uint - : # # uint8: 0 - 2**8 ( 0 2 8- ( 255)) # uint16: 0 - 2**16 ( 0 2 16- ( 65535)) # uint32: 0 - 2**32 ( 0 2 32- ( 4294967295)) # uint64: 0 - 2**64 ( 0 2 32- ( 18446744073709551615)) # int: # int8: -128 -- +127 ( 256 , ) # int16: -32768 -- +32767 # int32: -2147483648 -- +2147483647 # int64: -9223372036854775808 -- +9223372036854775807 # ----------------------------- ----------------------- # # , , numpy a = np.array([1, 2, 3, 4, 5]) # b = np.array(['cat', 'mouse', 'dog', 'rat']) # print(a.dtype) # dtype('int64') print(b.dtype) # dtype('<U5') # "" 5 - 'mouse' # , . . # NumPy . NumPy - # . int NumPy : # int8, int16, int32, int64. : . # . 32, int64 . , , . # : float16, float32, float64. - , . # NumPy float16 (): float; F(): REAL; float32 - C:double, F: double precision; # P(): float. , float, float32. Float64 ( ) NumPy . # . # NumPy : complex64 - float32 complex128 - float64. # "" NumPy. # NumPy . # , , : # float ( ) # , x = np.array([[1,2], [3,4], [5,6], [7,8]], dtype=np.float32) # # print(' ', x) # [[1. 2.] # [3. 4.] # [5. 6.] # [7. 8.]] # float. , , NumPy # . # : print(x.dtype) # float32 print(type(x)) # <class 'numpy.ndarray'> - # --------------------------------- shape --------------------------- # - ( ) - ( ) print(a.shape) # (5,) - 'a' - 5 - print(b.shape) # (4,) - 4- # -------------------------------- size ---------------------------- # print(a.size) # 5 - shape. - 5 - 5 # size shape . , , 1010, shape 1010, # size 100 # 6 a = np.array([1, 2, 3, 4, 5, 6]) print(a) print(a.shape) # (6,) # - reshape() # ---------------------------- () ------------------------------- # , # . # , . # my_array = np.array([[1, 2, 3],[4, 5, 6]]) print(my_array.shape) print(' \n', my_array) # [[1 2 3] # [4 5 6]] # ---------------------------------- reshape() ----------------------- # . ( ) # , . reshape() , # , , # # , . two_dim_arr = np.arange(12).reshape(3, 4) print(' \n ', two_dim_arr) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] a_new = a.reshape(3, 2) print(a_new) # [[1 2] # [3 4] # [5 6]] print(a_new.shape) # (3, 2) print(a_new.size) # 6 # shape a_new = a.reshape(2, 3) print(a_new) # [[1 2 3] # [4 5 6]] print(a_new.shape) # (2, 3) print(a_new.size) # 6 a_new = a.reshape(1, 6) print(a_new) # [[1 2 3 4 5 6]] # ( .), print(a_new.shape) # (1, 6) print(a_new.size) # 6 # , - 5, 15 51 # 5 23 - . # list = [[1, 2, 3], [4, 5]] # , list numpy np_list = np.array(list) print(np_list) # [list([1, 2, 3]) list([4, 5])] # numpy- . , numpy . # , , list_2 = [[1, 2, 3], [4, 5, 6]] np_list_2 = np.array(list_2) print(np_list_2) # [[1 2 3] # [4 5 6]] # numpy- ( ) # , , numpy . # . # two_dim_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] two_dim_np = np.array(two_dim_list) print(' : \n', two_dim_np) # [[1 2 3] # [4 5 6] # [7 8 9]] # ----------------------- ravel() ------------------------------------- # 2D 1D. ravel() . # order , : # print(a1_2d) # > [[ 1 2 3 4] # [ 5 6 7 8] # [ 9 10 11 12]] # # print(a1_2d.ravel()) # # > [ 1 2 3 4 5 6 7 8 9 10 11 12] # # print(a1_2d.ravel(order='F')) # # > [ 1 5 9 2 6 10 3 7 11 4 8 12] # - . , reshape() print(' ', two_dim_np.ravel()) # [1 2 3 4 5 6 7 8 9] # . , , . # ravel() , 'F' print(' F ', two_dim_np.ravel('F')) # , # [1 4 7 2 5 8 3 6 9] # - reshape() print('reshape() F ', two_dim_np.reshape((3,3), order='F')) # [[1 2 3] # [4 5 6] # [7 8 9]] # -------------------------- ------------------------ # print(a) # [1 2 3 4 5 6] # print(a[0]) # 1 print(a[1]) # 2 print(a[-1]) # 6 - # ----------------------- ---------------------------- # : # , , 1 (2- ), # , - () print(' 2- ', two_dim_arr[1][1]) # 5 # , print(' 2- ', two_dim_arr[1, 1]) # 5 print(two_dim_arr[:,1]) # [1 5 9] - print(two_dim_arr[1:,1]) # [5 9] - , # two_dim_arr # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] # [1]. # , [1,:] # , . print(' 1 ', two_dim_arr[1,:]) # [4 5 6 7] # [1,:] , 1- - ':' # 1 print(' 1 ', two_dim_arr[:,1]) # [1 5 9] # , ':' ( ) # 1. # 1 . # - - . # " " ':', "". . # . . # 'a' 2 a = two_dim_arr[2,:] print(' ', a) # [ 8 9 10 11] # a[0] = 25 # print(a) # [25 9 10 11] # print(two_dim_arr) # [[ 0 1 2 3] # [ 4 5 6 7] # [25 9 10 11]] - ! # , , . # , : , ? # , . # , , : two_dim_arr_cop = np.array(two_dim_arr) print(' ', two_dim_arr_cop) # [[ 0 1 2 3] # [ 4 5 6 7] # [25 9 10 11]] # , . two_dim_arr_cop[0,0] = 1000 print(two_dim_arr_cop) # [[1000 1 2 3] # [ 4 5 6 7] # [ 25 9 10 11]] print(two_dim_arr) # # [[ 0 1 2 3] # [ 4 5 6 7] # [25 9 10 11]] # ------------------------ ----------------------------- print(a[:2]) # [1 2] - 0 2- () print(a[2:]) # [3 4 5 6] - 2- . # print(a[:-2]) # [1 2 3 4] - 0 () print(a[1:4]) # [2 3 4] - 1- 4- () print(a[::]) # . a[]. # (broadcasting) # mas = np.arange(0, 10) print(mas) # [0 1 2 3 4 5 6 7 8 9] # 5 10 mas[5:] = 10 print(mas) # [ 0 1 2 3 4 10 10 10 10 10] # mas_new = mas[3:8] print(mas_new) # [ 3 4 10 10 10] - # mas_new[2:] = 7 print(mas_new) # [3 4 7 7 7] - # , print(mas) # [ 0 1 2 3 4 7 7 7 10 10] # , ! , . # , . # , , ? # copy() # ------------------------------------ copy() --------------------------------- # mas mas_c = mas.copy() print(' ', mas_c) # [ 0 1 2 3 4 7 7 7 10 10] # mas_c[:] = 0 print(mas_c) # [0 0 0 0 0 0 0 0 0 0] # print(mas) # [ 0 1 2 3 4 7 7 7 10 10] # -------------------------- , -------------------------------------- # # , , ? print(a > 5) # [False False False False False True] # , , . # . print(a % 2 == 0) # [False True False True False True] # mask_1 = a > 10 # "" print(mask_1) # [ True False False True] # bool_a, (). # print(' ', a[mask_1]) # [25 11] # print(a[a < 5]) # # ., # : , == 0 ( ) print(a[a % 2 == 0]) # [2 4 6] # , 5 print(a[(a > 2) & (a <= 5)]) # [3 4 5] # 4 print(a[a > 4]) # [5 6] # , numpy list # numpy # lst_num = [i for i in range(30)] print(lst_num) # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29] # - 30 . # numpy- np_lst_num = np.array(lst_num) print(np_lst_num) # [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29] # np_lst_num = np_lst_num.reshape(6, 5) print(np_lst_num) # [[ 0 1 2 3 4] # [ 5 6 7 8 9] # [10 11 12 13 14] # [15 16 17 18 19] # [20 21 22 23 24] # [25 26 27 28 29]] # . , print(np_lst_num[:2, :]) # : # - 2 ( , ; ), - . # [[0 1 2 3 4] # [5 6 7 8 9]] # , 3 ( 0, 1, 2, # 1 ( 2) ( : : - ) print(np_lst_num[:3, ::2]) # # [[ 0 2 4] # [ 5 7 9] # [10 12 14]] # 4 , 0- 1- print(np_lst_num[:4, [0, 1]]) # # [[ 0 1] # [ 5 6] # [10 11] # [15 16]] # , print('', np_lst_num) # [[ 0 1 2 3 4] # [ 5 6 7 8 9] # [10 11 12 13 14] # [15 16 17 18 19] # [20 21 22 23 24] # [25 26 27 28 29]] print(np_lst_num[[0, 1], [4, 2]]) # 1- 2- , 4- 2- : [4 7] # ( ) np_lst_sum = np_lst_num.sum(axis=1) # ( - 0) - . print(np_lst_sum) # [ 10 35 60 85 110 135] - . # - sum() # , 50 100, (:) # print(np_lst_num[50 <= np_lst_sum <= 100, :]) # : np_lst_num, , np_lst_sum 50 100, # . # , # , . # mask = (50 <= np_lst_sum <= 100) # . # mask = (np_lst_sum >= 50) & (np_lst_sum <= 100) print(mask) # [False False True True False False] - . # print(np_lst_num[mask, :]) # , ':' # [[10 11 12 13 14] # [15 16 17 18 19]] # , 50 100 # (2- ) , . print(' ',np_lst_num.sum()) # 435 # print(' ',np_lst_num.min(axis=0)) # [0 1 2 3 4] print(' ',np_lst_num.min(axis=1)) # [ 0 5 10 15 20 25] # ------------------------ numpy ------------------------------- # numpy- # print(a) # [25 9 10 11] for i in a: print(i) # 25 # 9 # 10 # 11 # - () a_arr_1 = np.array([i for i in a]) print(' ', a_arr_1) # [25 9 10 11] # - a_arr_2 = np.array([i for i in range(15)]) print(' ', a_arr_2) # [ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14] # , . # # a_arr_1 a_arr_2 mask = np.array([(i in a_arr_2) for i in a_arr_1]) # a_arr_1, , a_arr_2 print(mask) # [False True True True] # 25 a_arr_1 a_arr_2, . # # () print(a.shape) # (6,) - . , 6 0 for i in range(a.shape[0]): # a.shape[0] - 6 print(a[i]) # 1 # 2 # 3 # 4 # 5 # 6 # # print(np_lst_num.shape) # (6, 5) - - 6, - 5 # for i in range(np_lst_num.shape[0]): # [0] - print(' ', i, sep='') # 0. 1, : i+1 for j in range(np_lst_num.shape[1]): # [1] - , print(np_lst_num[i, j]) # . print() # 0 # 0 # 1 # 2 # 3 # 4 # # 1 # 5 # 6 # 7 # 8 # 9 # # 2 # 10 # 11 # 12 # 13 # 14 # # 3 # 15 # 16 # 17 # 18 # 19 # # 4 # 20 # 21 # 22 # 23 # 24 # # 5 # 25 # 26 # 27 # 28 # 29 # # print() , . # curr_str = '' # for i in range(np_lst_num.shape[0]): # [0] - , . curr_str = '' # , , , # print(' ', i, sep='') for j in range(np_lst_num.shape[1]): # ( - [1]) i curr_str += str(np_lst_num[i, j]) + ' ' # # str print(curr_str) # # 0 1 2 3 4 # 5 6 7 8 9 # 10 11 12 13 14 # 15 16 17 18 19 # 20 21 22 23 24 # 25 26 27 28 29 # numpy # numpy - , . # ------------------------ NumPy -------------------------- # ----------------------------- ------------------------------------ # 'a' print(a) # [1 2 3 4 5 6] # print(np_lst_num) # [[ 0 1 2 3 4] # [ 5 6 7 8 9] # [10 11 12 13 14] # [15 16 17 18 19] # [20 21 22 23 24] # [25 26 27 28 29]] # # -------------------------------- sum() -------------------------------- # print(a.sum()) # 21 print(np_lst_num.sum()) # 435 # , print(np_lst_num.sum(axis=0)) # - . # [75 81 87 93 99] # - 435 # , print(np_lst_num.sum(axis=1)) # - . # [ 10 35 60 85 110 135] # -------------------------------- mean() --------------------------------- # print(a.mean()) # 3.5 # -------------------------------- max() ---------------------------------- # print(a.max()) # 6 # np.max(a) print(' ', np_lst_num.max()) # print(np_lst_num.max(axis=0)) # [25 26 27 28 29] - print(np_lst_num.max(axis=1)) # [ 4 9 14 19 24 29] - # -------------------------------- min() --------------------------------- # print(a.min()) # 1 # np.min(a) # --------------------------------- argmax() argmin() ----------------------- # / print('. ', a.argmax()) # . 5 print('. ', a.argmin()) # . 0 # -------------------------------- prod() --------------------------------- # () print(a.prod()) # 720 # --------------------------------------- -------------------------- # print(b) # ['cat' 'mouse' 'dog' 'rat'] # --------------------------------- sort() --------------------------- b.sort() print(b) # ['cat' 'dog' 'mouse' 'rat'] - ( ) # d = [24, 65, 1, 23, 235, 4578, 12] d_numpy = np.array(d) # numpy d_numpy.sort() print(d_numpy) # [ 1 12 23 24 65 235 4578] # d_numpy = d_numpy[::-1] # -1 - print(d_numpy) # [4578 235 65 24 23 12 1] d_numpy = d_numpy[::-2] # -2 - 2 print(d_numpy) # [ 1 23 65 4578] # ----------------------- -------------------------- # -------------------------- arange() ------------------------- # - # numpy # - arange() , . print(np.arange(5)) # [0 1 2 3 4] # , range() # print(np.arange(3, 6)) # # [3 4 5] # print(np.arange(3, 16, 5)) # [ 3 8 13] # a = np.arange(5) print(a) # [0 1 2 3 4] b = np.arange(3, 8) print(b) # [3 4 5 6 7] # a_m = np.arange(12).reshape(3, 4) print(' ', a_m) # [[ 0 1 2 3] # [ 4 5 6 7] # [ 8 9 10 11]] # ---------------------------------- -------------------------------- print(a * 2) # [0 2 4 6 8] - 2 print(a ** 2) # [ 3 5 7 9 11] - # # - , () . # 'a' 'b' # print(' ', a + b) # [ 3 5 7 9 11] # , # NumPy # List , () # List, for, . # NumPy , , . # print(' ', a + a) # [0 2 4 6 8] # print(a - b) # [-3 -3 -3 -3 -3] print(a / b) # [0. 0.25 0.4 0.5 0.57142857] print(a * b) # [ 0 4 10 18 28] print(a ** b) # [ 0 1 32 729 16384] # print(b // a) # [0 4 2 2 1] # - 0: RuntimeWarning: divide by zero encountered in floor_divide # ( ) print(a + 1) # 1 # [1 2 3 4 5] # print(' ', a / 0) # [nan inf inf inf inf] # 0. 0 0 not a number (nan) # 0 infinity (inf) # print(a / a) # [nan 1. 1. 1. 1.] # - . . a ** 2 # # a_sqrt = np.sqrt(a) # print(' ', a_sqrt) # [0. 1. 1.41421356 1.73205081 2. ] # print(' ', np.exp(a)) # [ 1. 2.71828183 7.3890561 20.08553692 54.59815003] # print((a * b - a) ** 2) # [ 0 9 64 225 576] # - n = ((a * b - a) ** 2).sum() print(n) # 874 n = ((a * b - a) ** 2).mean() print(n) # 174.8 # v = a * 5 + b[0] * 17 # a * 5 - 'a' ( ) - # 'a' 0 b 17 print(v) # [51 56 61 66 71] # C NumPy . List # List . , List . # NumPy, numpy-, , . # . # , List NumPy # -------------------------- -------------------------------- # . # ------------------------- ----------------------------- # a_list = [[3, 6, 2, 7], [9, 2, 4, 8], [8, 2, 3, 6]] # numpy np_a_list = np.array(a_list) # np_list = np.array([[1,2], [3,4], [5,6], [7,8]]) # print(np_list) # [[1 2] # [3 4] # [5 6] # [7 8]] # print(a_list) # [[3, 6, 2, 7], [9, 2, 4, 8], [8, 2, 3, 6]] # print(np_a_list[0]) # [3 6 2 7] # print(np_a_list[1]) # [9 2 4 8] # print(np_a_list[-1]) # [8 2 3 6] # print(np_a_list[1, 2]) # 4 # # print(np_a_list[1:, 0]) # [9 8] - # - print(np_a_list[:, 1]) # [6 2 2] # . # . - . # -------------- - numpy- --------------------- # ------------------------------------ empty() ------------------------ # - A = np.empty((3,4), dtype=np.float32) # . , , # - . # , , print(' \n ', A) # [[-1.4012985e-45 2.8025969e-44 0.0000000e+00 0.0000000e+00] # [ 0.0000000e+00 1.7950633e-42 6.0185311e-36 2.9427268e-44] # [-4.9230647e-03 -1.0303581e-03 -1.8485262e-27 1.4026998e-42]] # , , , . # , , , - , . # . # -------------------- ones_like() -------------------------------------- # numpy- , . # numpy- # - ones_like() b_ = np.ones_like(np_a_list) # , np_a_list # , print(b_) # [[1 1 1 1] # [1 1 1 1] # [1 1 1 1]] # ------------------------ ones() ------------------------------ # - np.ones() c_ = np.ones((12, 8), dtype=np.complex64) # 128 # ( - float) print(c_) # [[1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j] # [1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j 1.+0.j]] # , . # ---------------------------------- zeros() -------------------------------- # , - np.zeros(). . a1 = np.zeros(10) # 10 print(a1) # [0. 0. 0. 0. 0. 0. 0. 0. 0. 0.] a_ = np.zeros((5, 3), dtype=np.float64) # 53 ( ) print(' \n ', a_) # [[0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.] # [0. 0. 0.]] # ----------------------------------- full() ------------------------------- # , D = np.full((5,3), 46, dtype=np.int64) # , , . print(' \n', D) # [[46 46 46] # [46 46 46] # [46 46 46] # [46 46 46] # [46 46 46]] # . 0,1 X1 = np.arange(0, 100, 0.1, dtype=np.float64) print(X1) # [ 0. 0.1 0.2 0.3 0.4 0.5 0.6 0.7 0.8 0.9 1. 1.1 1.2 1.3 # 1.4 1.5 1.6 1.7 1.8 1.9 2. 2.1 2.2 2.3 2.4 2.5 2.6 2.7 # 2.8 2.9 3. 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 4. 4.1 # 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 5. 5.1 5.2 5.3 5.4 5.5 # 5.6 5.7 5.8 5.9 6. 6.1 6.2 6.3 6.4 6.5 6.6 6.7 6.8 6.9 # 7. 7.1 7.2 7.3 7.4 7.5 7.6 7.7 7.8 7.9 8. 8.1 8.2 8.3 # ........................................................... # 92.4 92.5 92.6 92.7 92.8 92.9 93. 93.1 93.2 93.3 93.4 93.5 93.6 93.7 # 93.8 93.9 94. 94.1 94.2 94.3 94.4 94.5 94.6 94.7 94.8 94.9 95. 95.1 # 95.2 95.3 95.4 95.5 95.6 95.7 95.8 95.9 96. 96.1 96.2 96.3 96.4 96.5 # 96.6 96.7 96.8 96.9 97. 97.1 97.2 97.3 97.4 97.5 97.6 97.7 97.8 97.9 # 98. 98.1 98.2 98.3 98.4 98.5 98.6 98.7 98.8 98.9 99. 99.1 99.2 99.3 # 99.4 99.5 99.6 99.7 99.8 99.9] # float, . . # , 99.99999999, , 100. # arange() , , # , . # , - - linspace() # -------------------------------- linspace() --------------------------- # - . , d_ = np.linspace(0, 5, 5, dtype=np.float64) # , print(d_) # [0. 1.25 2.5 3.75 5. ] # 0 5, # - 1.25 d_1 = np.linspace(0, 10, 5) # , print(d_1) # [ 0. 2.5 5. 7.5 10. ] - - 2,5 # ( ). # - , . # () d_ = np.linspace(15, 37, 24, dtype=np.float64).reshape(2, 3, 4) print(d_) # [[[15. 15.95652174 16.91304348 17.86956522] # [18.82608696 19.7826087 20.73913043 21.69565217] # [22.65217391 23.60869565 24.56521739 25.52173913]] # # [[26.47826087 27.43478261 28.39130435 29.34782609] # [30.30434783 31.26086957 32.2173913 33.17391304] # [34.13043478 35.08695652 36.04347826 37. ]]] # -------------------------------- logspace() ------------------------- # . # - linspace(), X3 = np.logspace(0, 100, 101, dtype=np.float64) # 0 100 print('logspace\n', X3) # [1.e+000 1.e+001 1.e+002 1.e+003 1.e+004 1.e+005 1.e+006 1.e+007 1.e+008 # 1.e+009 1.e+010 1.e+011 1.e+012 1.e+013 1.e+014 1.e+015 1.e+016 1.e+017 # 1.e+018 1.e+019 1.e+020 1.e+021 1.e+022 1.e+023 1.e+024 1.e+025 1.e+026 # 1.e+027 1.e+028 1.e+029 1.e+030 1.e+031 1.e+032 1.e+033 1.e+034 1.e+035 # 1.e+036 1.e+037 1.e+038 1.e+039 1.e+040 1.e+041 1.e+042 1.e+043 1.e+044 # 1.e+045 1.e+046 1.e+047 1.e+048 1.e+049 1.e+050 1.e+051 1.e+052 1.e+053 # 1.e+054 1.e+055 1.e+056 1.e+057 1.e+058 1.e+059 1.e+060 1.e+061 1.e+062 # 1.e+063 1.e+064 1.e+065 1.e+066 1.e+067 1.e+068 1.e+069 1.e+070 1.e+071 # 1.e+072 1.e+073 1.e+074 1.e+075 1.e+076 1.e+077 1.e+078 1.e+079 1.e+080 # 1.e+081 1.e+082 1.e+083 1.e+084 1.e+085 1.e+086 1.e+087 1.e+088 1.e+089 # 1.e+090 1.e+091 1.e+092 1.e+093 1.e+094 1.e+095 1.e+096 1.e+097 1.e+098 # 1.e+099 1.e+100] # 1. # ------------------------------ geomspace() ------------------------------- # X4 = np.geomspace(1, 100, 101, dtype=np.float64) # 0. print('geomspace\n', X4) # [ 1. 1.04712855 1.0964782 1.14815362 1.20226443 # 1.25892541 1.31825674 1.38038426 1.44543977 1.51356125 # 1.58489319 1.65958691 1.73780083 1.81970086 1.90546072 # 1.99526231 2.08929613 2.18776162 2.29086765 2.39883292 # 2.51188643 2.63026799 2.7542287 2.8840315 3.01995172 # 3.16227766 3.31131121 3.4673685 3.63078055 3.80189396 # 3.98107171 4.16869383 4.36515832 4.5708819 4.78630092 # 5.01187234 5.2480746 5.49540874 5.75439937 6.02559586 # 6.30957344 6.60693448 6.91830971 7.2443596 7.58577575 # 7.94328235 8.31763771 8.7096359 9.12010839 9.54992586 # 10. 10.47128548 10.96478196 11.48153621 12.02264435 # 12.58925412 13.18256739 13.80384265 14.45439771 15.13561248 # 15.84893192 16.59586907 17.37800829 18.19700859 19.05460718 # 19.95262315 20.89296131 21.87761624 22.90867653 23.98832919 # 25.11886432 26.30267992 27.54228703 28.84031503 30.1995172 # 31.6227766 33.11311215 34.67368505 36.30780548 38.01893963 # 39.81071706 41.68693835 43.65158322 45.70881896 47.86300923 # 50.11872336 52.48074602 54.95408739 57.54399373 60.25595861 # 63.09573445 66.0693448 69.18309709 72.44359601 75.8577575 # 79.43282347 83.17637711 87.096359 91.20108394 95.4992586 # 100. ] # , - : # 1 - arange() - # 2 - linspace() - # 3 - logspace() - , linspace, # . # ------------------------------- identity() ------------------------ # . E = np.identity(10, dtype=np.float64) # , print('identity\n', E) # [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0.] # [0. 1. 0. 0. 0. 0. 0. 0. 0. 0.] # [0. 0. 1. 0. 0. 0. 0. 0. 0. 0.] # [0. 0. 0. 1. 0. 0. 0. 0. 0. 0.] # [0. 0. 0. 0. 1. 0. 0. 0. 0. 0.] # [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.] # [0. 0. 0. 0. 0. 0. 1. 0. 0. 0.] # [0. 0. 0. 0. 0. 0. 0. 1. 0. 0.] # [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.] # [0. 0. 0. 0. 0. 0. 0. 0. 0. 1.]] # --------------------------------------- eye() ------------------------------- # - Identity matrix / # , . m_ = np.eye(3) # 33 print(m_) # [[1. 0. 0.] # [0. 1. 0.] # [0. 0. 1.]] # ------------------------------------- # # b_ = b_ * 2 print(b_) # [[2 2 2 2] # [2 2 2 2] # [2 2 2 2]] # , . # print(a_list * b_) # [[ 6 12 4 14] # [18 4 8 16] # [16 4 6 12]] # ? # . , . # . # , # https://www.numpy.org - numpy # -------------------------- ( ) ------------------------- # np.random: # https://docs.scipy.org/doc/numpy-1.15.0/reference/routines.random.html # # --------------------------------------- rand() ---------------------------- # - rand() r_a = np.random.rand(3) # print(r_a) # [0.26295925 0.86094219 0.10804199] # 0 1 # . - r_a_2 = np.random.rand(3, 4) print(r_a_2) # [[0.35670423 0.33045392 0.69668886 0.87599185] # [0.45371986 0.52534176 0.20873434 0.0511607 ] # [0.60906173 0.17519525 0.85137775 0.17951122]] # ( .) 34 # ---------------------------- randn() --------------------------------- # - randn() r_n = np.random.randn(5) print(r_n) # [0.4619619 1.37952577 0.0024386 0.58737799 0.65258035] # r_n_2 = np.random.randn(3, 5) print(r_n_2) # [[ 0.11240541 0.88797712 -0.76090493 0.39046211 0.32887074] # [ 1.64754416 -0.53392785 2.16685259 0.36912093 1.37752072] # [-1.71455156 0.02808839 -1.50790139 -1.42062286 -1.62162641]] # r_n_3 = np.random.randn(2, 3, 5) print(' \n', r_n_3) # ------------------------------- normal() ------------------------------- # # normal() # 5 5 # # , 0 ( ) # - rr = np.random.normal(0, 1, (5, 5)) print(' ') print(rr) # [[ 0.44609444 1.46648639 -0.71085575 -1.37248413 -1.50204124] # [-0.91750705 0.65186486 -0.77443963 -0.41575527 -0.42991253] # [ 0.57163415 0.307304 0.797994 -0.63930071 -0.91871729] # [ 0.13777992 1.18317277 0.63241621 -1.70244244 -0.33194237] # [-0.13911916 0.33009841 0.26635273 -0.20181408 0.69920153]] # ? # , # [[ 1.24701787 -1.39613534 -0.26663356 0.689353 1.04496652] # [ 1.38255089 -0.04465846 0.74089134 0.47437058 0.27041353] # [-0.64641649 -0.4218203 0.75355706 0.57893304 -0.26714739] # [ 1.11584443 0.75603918 0.3494514 0.45091684 0.1791541 ] # [ 0.17316534 -1.37216487 0.26336408 0.83848343 -0.94691011]] # print(rr.mean()) # -0.10263729011138162 # print(rr.std()) # 0.8203049258132238 # ------------------------------- randint() ------------------------------- # # r_i = np.random.randint(0, 5, 10) # 10 , 0 5 # : ( ), (), print(r_i) # [0 0 3 4 0 1 0 2 2 1] # , . # r_i_1 = np.random.randint(0, 10, (5, 5)) # : 0 10, 55 # , random - , - # . print(r_i_1) # [[4 2 8 8 1] # [3 8 1 7 0] # [4 1 6 2 5] # [1 8 0 6 7] # [1 7 5 1 6]] # 55 0 10. # . data cience # () my_3d_array = np.random.randint(15, 37, 24).reshape(2, 3, 4) print(my_3d_array.shape) print(' ,\n',my_3d_array) # [[[24 22 29 16] # [22 21 32 29] # [23 18 15 20]] # [[35 17 28 29] # [35 15 26 33] # [33 17 28 26]]] # my_3d_array_2 = np.array([[[1,2,3],[4,5,6]],[[7,8,9],[10,11,12]]]) # - . print(my_3d_array_2.shape) # (1, 4, 3) - print(my_3d_array_2) # [[[ 1 2 3] # [ 4 5 6]] # [[ 7 8 9] # [10 11 12]]] # my_3d_array_2.reshape(2,2,3) print(' 2-2-3\n', my_3d_array_2.shape) # (2, 2, 3) - print(my_3d_array_2) # [[[ 1 2 3] # [ 4 5 6]] # [[ 7 8 9] # [10 11 12]]] # --------------------------- uniform() -------------------------------- # ( float) rr_v = np.random.uniform(0, 10, 5) print(rr_v) # [2.47530261 4.54122979 6.77168887 9.72958147 6.40804463] # ---------------------------------- ---------------------------------- # 2 3 4 5 my_tensor = np.random.random((2, 3, 4, 5)) # random - random, . # 2 3 , 4 5 # print('') print(my_tensor) # [[[[0.0228543 0.55214868 0.28702753 0.7214254 0.06949648] # [0.31906822 0.9058709 0.48809729 0.06816758 0.23458314] # [0.40860497 0.64053844 0.20512333 0.36238068 0.12410515] # [0.33983708 0.64704089 0.6163341 0.78368802 0.43499334]] # [[0.46821275 0.81510566 0.24827603 0.16203632 0.78381811] # [0.53502263 0.37368486 0.61733482 0.9794601 0.42355869] # [0.21006761 0.96619641 0.36370357 0.47724621 0.30139114] # [0.7190714 0.3189307 0.24450994 0.73103265 0.15298877]] # [[0.84871557 0.22551803 0.70160413 0.58642556 0.36792143] # [0.81993648 0.10407713 0.08112876 0.7922863 0.7666471 ] # [0.17596218 0.48372704 0.34017527 0.92996698 0.65175539] # [0.65137716 0.36383069 0.40195585 0.37628607 0.35812605]]] # [[[0.9237488 0.44324777 0.66224455 0.70508636 0.94383757] # [0.51201422 0.96100317 0.54144972 0.14302484 0.61772361] # [0.30654129 0.98086756 0.16150838 0.53675196 0.59581246] # [0.37647334 0.11722666 0.57120902 0.75427493 0.43846317]] # [[0.47306444 0.67935 0.1798013 0.82778145 0.33059486] # [0.61461207 0.8777961 0.38853882 0.36111666 0.45958279] # [0.64165245 0.87634013 0.40377337 0.22649195 0.64921675] # [0.73662732 0.91430142 0.77813516 0.75087619 0.44269413]] # [[0.95071329 0.2349081 0.05702258 0.21231127 0.52474573] # [0.71167152 0.0771482 0.1518983 0.73532062 0.14485726] # [0.7643846 0.58094057 0.04403048 0.26821343 0.38422551] # [0.52925118 0.73857103 0.13040456 0.25313323 0.5225339 ]]]] print(' ') print(my_tensor.shape) # (2, 3, 4, 5) # ------------------------------- ------------------------- # 0 print(my_tensor[0, 0, 0, 0]) # 0.0228543 # print(my_tensor[:, :, :, 0]) # [[[0.0228543 0.55214868 0.28702753 0.7214254 0.06949648] # [0.46821275 0.81510566 0.24827603 0.16203632 0.78381811] # [0.84871557 0.22551803 0.70160413 0.58642556 0.36792143]] # [[0.9237488 0.44324777 0.66224455 0.70508636 0.94383757] # [0.47306444 0.67935 0.1798013 0.82778145 0.33059486] # [0.95071329 0.2349081 0.05702258 0.21231127 0.52474573]]] # print(my_tensor.sum(axis=3)) # # [[[2.91164516 1.84178005 2.69273399 3.40057715] # [3.18896528 1.46415582 2.40975709 2.97385982] # [2.02457623 2.35238754 2.42994069 2.23911234]] # [[3.49961688 2.05497782 1.64820154 2.2829169 ] # [2.50865381 2.78498322 2.07394522 2.54780752] # [2.13326077 2.32027226 3.56564612 2.60500416]]] # ------------------------ np.matrix ------------------------------------ # matrix: # https://numpy.org/devdocs/reference/generated/numpy.matrix.html # # arr1 = np.array([[1,2,3],[4,5,6]]) print(type(arr1)) # <class 'numpy.ndarray'> # matrix my_matrix = np.matrix(arr1) print(my_matrix) print(type(my_matrix)) # <class 'numpy.matrix'> # [[1 2 3] # [4 5 6]] # # . . print(np.matrix(my_matrix).T) # [[1 4] # [2 5] # [3 6]] # , - # # np.matrix , !!! print(np.matrix(my_matrix) * np.matrix(my_matrix).T) # [[14 32] # [32 77]] # ------------------------------- ------------------------------- # # .dot # # arr_1 = np.array([1, 2, 3, 4, 5, 6]).reshape(3,2) print(arr_1) # [[1 2] # [3 4] # [5 6]] arr_2 = np.array([4, 3, 2, 6, 4, 3]).reshape(3,2) print(arr_2) # [[4 3] # [2 6] # [4 3]] # print(arr_1 * arr_2) # [ 4 6 6 24 20 18] # - dot, arrT = arr_1.T arr_3 = arrT.dot(arr_2) print(' ') print(arr_3) # [[30 36] # [40 48]] # ------------------------------------- -------------------------------- # -------------------------- ---------------- # , (str), (int) (float). dt = [('name', '<U10'), ('age', 'int32'), ('mark', 'float32')] std_list = [('Alex', 20, 4.3), ('Kate', 19, 4.8), ('Maks', 21, 4.1), ('Marry', 22, 4.6), ('Denis', 18, 3.8), ('Ann', 21, 4.2)] std_np = np.array(std_list, dtype=dt) # , dtype , int32 print((np.sort(std_np, order='name')).reshape(6,1)) # (). # reshape print() print((np.sort(std_np, order='age')).reshape(6,1)) # print() print((np.sort(std_np, order='mark'))[::-1].reshape(6,1)) # ( ) # # ------------------------------------------------------ # std_np['age'] = 10 print((np.sort(std_np, order='name'))[::-1]) # 'name' my_3d_array_2 = np.random.uniform(17, 23, (2, 3, 4))
37.842759
129
0.64563
2842d30820f635256f73ae79bc6c16f824dc89f6
704
py
Python
setup.py
jlevy44/Submit-HPC
83dfd60587fab2c75e02f1c14b688b4bc51aff8c
[ "MIT" ]
1
2020-06-11T00:51:24.000Z
2020-06-11T00:51:24.000Z
setup.py
jlevy44/Submit-HPC
83dfd60587fab2c75e02f1c14b688b4bc51aff8c
[ "MIT" ]
null
null
null
setup.py
jlevy44/Submit-HPC
83dfd60587fab2c75e02f1c14b688b4bc51aff8c
[ "MIT" ]
1
2020-06-19T01:05:07.000Z
2020-06-19T01:05:07.000Z
from setuptools import setup with open('README.md','r', encoding='utf-8') as f: long_description = f.read() setup(name='submit_hpc', version='0.1.2', description='Collection of growing job submission scripts, not to replace workflow specifications.', url='https://github.com/jlevy44/Submit-HPC', author='Joshua Levy', author_email='joshualevy44@berkeley.edu', license='MIT', scripts=[], entry_points={ 'console_scripts':['submit-job=submit_hpc.job_runner:job'] }, packages=['submit_hpc'], long_description=long_description, long_description_content_type='text/markdown', install_requires=['click','pandas'])
37.052632
106
0.666193
28461474953cc9c257de317f17581d4ef1a01795
18,209
py
Python
DQN/network.py
Xin-Ye-1/HIEM
6764f579eef6ec92dd85a005af27419f630df7da
[ "Apache-2.0" ]
2
2021-04-12T02:41:00.000Z
2021-05-15T02:18:15.000Z
DQN/network.py
Xin-Ye-1/HIEM
6764f579eef6ec92dd85a005af27419f630df7da
[ "Apache-2.0" ]
null
null
null
DQN/network.py
Xin-Ye-1/HIEM
6764f579eef6ec92dd85a005af27419f630df7da
[ "Apache-2.0" ]
null
null
null
#! /usr/bin/env python import tensorflow as tf import tensorflow.contrib.slim as slim seed = 0
55.012085
123
0.473228
2847498b54c2f788df1761ffd02163a689964021
5,924
py
Python
128/utility.py
Jeffrey-Ede/Adaptive-Partial-STEM
dc13e64ba3fb8266d39a260780af615b170a3c88
[ "MIT" ]
3
2020-04-29T21:45:21.000Z
2021-08-13T16:01:14.000Z
128/utility.py
Jeffrey-Ede/intelligent-partial-STEM
dc13e64ba3fb8266d39a260780af615b170a3c88
[ "MIT" ]
null
null
null
128/utility.py
Jeffrey-Ede/intelligent-partial-STEM
dc13e64ba3fb8266d39a260780af615b170a3c88
[ "MIT" ]
null
null
null
import tensorflow as tf import itertools import numpy as np FLAGS = tf.flags.FLAGS def auto_name(name): """Append number to variable name to make it unique. Inputs: name: Start of variable name. Returns: Full variable name with number afterwards to make it unique. """ scope = tf.contrib.framework.get_name_scope() vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope) names = [v.name for v in vars] #Increment variable number until unused name is found for i in itertools.count(): short_name = name + "_" + str(i) sep = "/" if scope != "" else "" full_name = scope + sep + short_name if not full_name in [n[:len(full_name)] for n in names]: return short_name def alrc( loss, num_stddev=3, decay=0.999, mu1_start=2, mu2_start=3**2, in_place_updates=False ): """Adaptive learning rate clipping (ALRC) of outlier losses. Inputs: loss: Loss function to limit outlier losses of. num_stddev: Number of standard deviation above loss mean to limit it to. decay: Decay rate for exponential moving averages used to track the first two raw moments of the loss. mu1_start: Initial estimate for the first raw moment of the loss. mu2_start: Initial estimate for the second raw moment of the loss. in_place_updates: If False, add control dependencies for moment tracking to tf.GraphKeys.UPDATE_OPS. This allows the control dependencies to be executed in parallel with other dependencies later. Return: Loss function with control dependencies for ALRC. """ #Varables to track first two raw moments of the loss mu = tf.get_variable( auto_name("mu1"), initializer=tf.constant(mu1_start, dtype=tf.float32)) mu2 = tf.get_variable( auto_name("mu2"), initializer=tf.constant(mu2_start, dtype=tf.float32)) #Use capped loss for moment updates to limit the effect of outlier losses on the threshold sigma = tf.sqrt(mu2 - mu**2+1.e-8) loss = tf.where(loss < mu+num_stddev*sigma, loss, loss/tf.stop_gradient(loss/(mu+num_stddev*sigma))) #Update moment moving averages mean_loss = tf.reduce_mean(loss) mean_loss2 = tf.reduce_mean(loss**2) update_ops = [mu.assign(decay*mu+(1-decay)*mean_loss), mu2.assign(decay*mu2+(1-decay)*mean_loss2)] if in_place_updates: with tf.control_dependencies(update_ops): loss = tf.identity(loss) else: #Control dependencies that can be executed in parallel with other update #ops. Often, these dependencies are added to train ops e.g. alongside #batch normalization update ops. for update_op in update_ops: tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_op) return loss if __name__ == "__main__": pass
33.280899
116
0.613774
2847f21bc2086528a4db0c276260fe7ae1c988d5
1,336
py
Python
tests/test_lexer.py
codyd51/camelback
2dd1269bcbc7ce35fcab1df7dfddce229c51e610
[ "MIT" ]
2
2018-11-22T16:45:24.000Z
2018-11-26T16:13:31.000Z
tests/test_lexer.py
codyd51/camelback
2dd1269bcbc7ce35fcab1df7dfddce229c51e610
[ "MIT" ]
null
null
null
tests/test_lexer.py
codyd51/camelback
2dd1269bcbc7ce35fcab1df7dfddce229c51e610
[ "MIT" ]
null
null
null
import os import unittest from camelback.lexer import Lexer
37.111111
113
0.355539
28484e4880b21cb0cfb818580cf5e99d4d59fc00
568
py
Python
tests/conftest.py
ljnsn/pycwatch
9fb8910b010e7e89357a9c6b99197697ee5a8cf6
[ "MIT" ]
1
2022-02-25T17:23:17.000Z
2022-02-25T17:23:17.000Z
tests/conftest.py
ljnsn/pycwatch
9fb8910b010e7e89357a9c6b99197697ee5a8cf6
[ "MIT" ]
1
2022-02-28T18:37:08.000Z
2022-02-28T18:37:08.000Z
tests/conftest.py
ljnsn/pycwatch
9fb8910b010e7e89357a9c6b99197697ee5a8cf6
[ "MIT" ]
null
null
null
"""Fixtures and configuration for the test suite.""" from pathlib import Path import pytest import vcr from pycwatch import CryptoWatchClient BASE_DIR = Path(__file__).parent.absolute() api_vcr = my_vcr = vcr.VCR( serializer="yaml", cassette_library_dir=BASE_DIR.joinpath("vcr_cassettes").as_posix(), record_mode="new_episodes", match_on=["uri", "method", "query"], decode_compressed_response=True, )
20.285714
71
0.739437
2848866b78d851fe0130aa00ed413f094e4d8df4
6,861
py
Python
cxphasing/CXResolutionEstimate.py
jbgastineau/cxphasing
a9847a0afb9a981d81f027e75c06c9bb2b531d33
[ "MIT" ]
3
2018-05-11T16:05:55.000Z
2021-12-20T08:52:02.000Z
cxphasing/CXResolutionEstimate.py
jbgastineau/cxphasing
a9847a0afb9a981d81f027e75c06c9bb2b531d33
[ "MIT" ]
null
null
null
cxphasing/CXResolutionEstimate.py
jbgastineau/cxphasing
a9847a0afb9a981d81f027e75c06c9bb2b531d33
[ "MIT" ]
2
2018-11-14T08:57:10.000Z
2021-12-20T08:52:06.000Z
""" .. module:: CXResolutionEstimate.py :platform: Unix :synopsis: A class for predicting the resolution of a ptychography measurement. .. moduleauthor:: David Vine <djvine@gmail.com> """ import requests import pdb import scipy as sp import numpy as np import scipy.fftpack as spf from pylab import * def recommend_beamstop(q_dependence=-3.5, energy=10.0, det=Detector('pilatus100k'), z_or_dx={'dx': 10e-9}): """.. func:: recommend_beamstop(q_dependence, detector, z_or_dx) :param float q_dependence: Intensity vs q scaling in far field. :param float energy: Incident X-ray energy. :param Detector det: Detector to be used for calculation. :param dict z_or_dx: Choose the optimal beamstop when (i) the detector is placed at z or, (ii) the desired resolution is dx. """ det_npix = min(det.xpix, det.ypix) det_width = det.pix_size*det_npix/2 l = 1.24e-9/energy if 'z' in z_or_dx.keys(): z = z_or_dx['z'] dx = l*z/det_width else: dx = z_or_dx['dx'] z = dx*det_width/(2*l) det_domain_x = sp.arange(det_npix)*det.pix_size det_domain_q = 4*math.pi*det_domain_x/(l*z) intensity = lambda q: (1+q**2.0)**-2.0 full_dynamic_range = log10(intensity(det_domain_q[-1]/det_domain_q[0])) detector_dynamic_range = log10(det.dr) required_dynamic_range = full_dynamic_range-detector_dynamic_range # Is a beamstop required? if required_dynamic_range>0: # Yes pass if __name__=='__main__': main()
27.011811
126
0.69538
284932efb61177d76bc830e6f9381821ff06ec7e
997
py
Python
classes/migrations/0007_auto_20201206_1223.py
henrylameck/school_management_system
38c270977d001d28f2338eb90fffc3e8c2598d06
[ "MIT" ]
null
null
null
classes/migrations/0007_auto_20201206_1223.py
henrylameck/school_management_system
38c270977d001d28f2338eb90fffc3e8c2598d06
[ "MIT" ]
3
2021-06-05T00:01:48.000Z
2021-09-22T19:39:12.000Z
classes/migrations/0007_auto_20201206_1223.py
henrylameck/school_management_system
38c270977d001d28f2338eb90fffc3e8c2598d06
[ "MIT" ]
null
null
null
# Generated by Django 3.1 on 2020-12-06 09:23 from django.db import migrations, models
26.236842
53
0.563691
2849b9c3dc25b3aa339fd03d7bee0279359be673
1,129
py
Python
Analysis/views_in_dow.py
harrisonxia/Lil-Data
204467aa740bef10d865925508d7cf007cac19b3
[ "MIT" ]
5
2018-11-14T03:31:13.000Z
2022-01-12T04:20:16.000Z
Analysis/views_in_dow.py
harrisonxia/Lil-Data
204467aa740bef10d865925508d7cf007cac19b3
[ "MIT" ]
null
null
null
Analysis/views_in_dow.py
harrisonxia/Lil-Data
204467aa740bef10d865925508d7cf007cac19b3
[ "MIT" ]
null
null
null
import sys from pyspark.sql import SparkSession, functions, types from pyspark.sql.functions import date_format import json if __name__ == '__main__': spark = SparkSession.builder.appName('views_in_dow').getOrCreate() spark.sparkContext.setLogLevel('WARN') views_in_dow()
35.28125
154
0.723649
284d7bf5c6289c980a9a21998f63efdfc660f3b8
570
py
Python
videoprocessor/app.py
ashish1595/uPresent
663acc6ad7c958c8d45699918c60e48535aff3b3
[ "MIT" ]
1
2020-09-02T23:51:15.000Z
2020-09-02T23:51:15.000Z
videoprocessor/app.py
ashish1595/uPresent
663acc6ad7c958c8d45699918c60e48535aff3b3
[ "MIT" ]
1,143
2020-01-26T07:18:37.000Z
2022-03-31T21:02:44.000Z
videoprocessor/app.py
ashish1595/uPresent
663acc6ad7c958c8d45699918c60e48535aff3b3
[ "MIT" ]
4
2020-01-27T07:47:29.000Z
2020-07-22T10:54:15.000Z
from flask import Flask from flask_restful import Api from elasticapm.contrib.flask import ElasticAPM from flask_restful_swagger import swagger from resources import custom_logger from resources.routes import initialize_routes import logging app = Flask(__name__) app.config.from_object("config.Config") # Initializing custom logger log = logging.getLogger("root") log.setLevel(logging.INFO) log.addHandler(custom_logger.LogHandler()) apm = ElasticAPM(app) api = Api(app) api = swagger.docs(Api(app), apiVersion="0.1") initialize_routes(api) app.run(host="0.0.0.0")
24.782609
47
0.801754
284e0e3fc7904eb4e425103fc8997d9f8ee44f17
1,593
py
Python
cogs/rng.py
Ana-gram/Amanager
5ceef312125b1c73dea59d37f8f06e22293c8960
[ "Apache-2.0" ]
12
2021-04-23T18:10:24.000Z
2021-05-03T13:08:54.000Z
cogs/rng.py
Ana-gram/Amanager
5ceef312125b1c73dea59d37f8f06e22293c8960
[ "Apache-2.0" ]
3
2021-04-04T17:47:02.000Z
2021-11-20T10:59:46.000Z
cogs/rng.py
Margana314/Amanager
87e241d942ca07f3ed8dfc5e1aebfde6f58bbdac
[ "Apache-2.0" ]
3
2021-04-30T11:07:28.000Z
2021-05-01T11:35:27.000Z
import discord, random from discord.ext import commands from discord_slash import cog_ext from discord_slash.utils.manage_commands import create_option
40.846154
190
0.603264
284ed97a6e7a6cca5b8e4fac818f272a6b8ee59d
1,538
py
Python
app.py
dodoche/essaie
53d22cfec969a8f992f4b5cb473bb41a215975b6
[ "Apache-2.0" ]
null
null
null
app.py
dodoche/essaie
53d22cfec969a8f992f4b5cb473bb41a215975b6
[ "Apache-2.0" ]
null
null
null
app.py
dodoche/essaie
53d22cfec969a8f992f4b5cb473bb41a215975b6
[ "Apache-2.0" ]
null
null
null
from flask import Flask from kubernetes.client.rest import ApiException from pprint import pprint from kubernetes import client, config app = Flask(__name__) if __name__ == "__main__": app.run(host='0.0.0.0',port=8000)
48.0625
171
0.727568
284f1b158ca4db2c6a71a6bb4e065847635e83d7
447
py
Python
Chapter2_Python/Logic.py
LKilian1/UdemyML_Template
4d9cd40c35ff29d796e2b7d327e0032ee7dc2f5a
[ "MIT" ]
null
null
null
Chapter2_Python/Logic.py
LKilian1/UdemyML_Template
4d9cd40c35ff29d796e2b7d327e0032ee7dc2f5a
[ "MIT" ]
null
null
null
Chapter2_Python/Logic.py
LKilian1/UdemyML_Template
4d9cd40c35ff29d796e2b7d327e0032ee7dc2f5a
[ "MIT" ]
null
null
null
i_am_broke = False if i_am_broke: print("I am broke.") else: print("I am not broke.") my_bank_account = 1000 if my_bank_account <= 0: print("I am broke.") else: print("I am not broke.") # equal == # less < # greater > # not equal != # less or equal <= # greater or equal >= my_age = 21 if my_age < 18: print("You are a child.") elif my_age < 66: print("You are a an adult.") else: print("You are a pensioner.")
13.96875
33
0.604027
284f52f9892227b12c3c28dae697a2873a90c1e7
34
py
Python
my_script.py
jeroenpijpker/easy_CD_tutorial
2827508a7060c74ff937d7820c5b0f5cdfa4d6a4
[ "MIT" ]
null
null
null
my_script.py
jeroenpijpker/easy_CD_tutorial
2827508a7060c74ff937d7820c5b0f5cdfa4d6a4
[ "MIT" ]
null
null
null
my_script.py
jeroenpijpker/easy_CD_tutorial
2827508a7060c74ff937d7820c5b0f5cdfa4d6a4
[ "MIT" ]
null
null
null
print("x") print("x") print("x")
6.8
10
0.529412
2851770a789aa6df103f42e0f34d59c8093d59ff
498
py
Python
neighbour/migrations/0014_auto_20211228_2342.py
mary-wan/Neighbourhood
4150ea60d8ab471fce7173c50c040f36320f3f40
[ "Unlicense" ]
2
2022-01-17T03:52:59.000Z
2022-02-18T15:09:34.000Z
neighbour/migrations/0014_auto_20211228_2342.py
mary-wan/Neighbourhood
4150ea60d8ab471fce7173c50c040f36320f3f40
[ "Unlicense" ]
null
null
null
neighbour/migrations/0014_auto_20211228_2342.py
mary-wan/Neighbourhood
4150ea60d8ab471fce7173c50c040f36320f3f40
[ "Unlicense" ]
null
null
null
# Generated by Django 2.2.24 on 2021-12-28 20:42 from django.db import migrations
21.652174
52
0.566265
2853e0d7d747d6c3288b88732191d861e6eecd97
427
py
Python
scipy/ndimage/tests/__init__.py
Ennosigaeon/scipy
2d872f7cf2098031b9be863ec25e366a550b229c
[ "BSD-3-Clause" ]
9,095
2015-01-02T18:24:23.000Z
2022-03-31T20:35:31.000Z
scipy/ndimage/tests/__init__.py
Ennosigaeon/scipy
2d872f7cf2098031b9be863ec25e366a550b229c
[ "BSD-3-Clause" ]
11,500
2015-01-01T01:15:30.000Z
2022-03-31T23:07:35.000Z
scipy/ndimage/tests/__init__.py
Ennosigaeon/scipy
2d872f7cf2098031b9be863ec25e366a550b229c
[ "BSD-3-Clause" ]
5,838
2015-01-05T11:56:42.000Z
2022-03-31T23:21:19.000Z
from __future__ import annotations from typing import List, Type import numpy # list of numarray data types integer_types: List[Type] = [ numpy.int8, numpy.uint8, numpy.int16, numpy.uint16, numpy.int32, numpy.uint32, numpy.int64, numpy.uint64] float_types: List[Type] = [numpy.float32, numpy.float64] complex_types: List[Type] = [numpy.complex64, numpy.complex128] types: List[Type] = integer_types + float_types
26.6875
63
0.754098
285632d72a4a6ee14ee3a1b9a5965b712d109e62
10,286
py
Python
experiments/utils.py
linshaoxin-maker/taas
34e11fab167a7beb78fbe6991ff8721dc9208793
[ "MIT" ]
4
2021-02-28T11:58:18.000Z
2022-02-03T03:26:45.000Z
experiments/utils.py
linshaoxin-maker/taas
34e11fab167a7beb78fbe6991ff8721dc9208793
[ "MIT" ]
null
null
null
experiments/utils.py
linshaoxin-maker/taas
34e11fab167a7beb78fbe6991ff8721dc9208793
[ "MIT" ]
null
null
null
import torch import os import subprocess as sp from mlutils.pt.training import TrainerBatch from mlutils.callbacks import Callback from os import path import numpy as np import json def torch_detach(x): return x.detach().cpu().numpy() def save_topics(save_path, vocab, topic_prob, topk=100, logger=None): """topic_prob: n_topic x vocab_size. Assumed that topic_prob[i] is probability distribution for all i. """ if logger: logger.info('saving topics to {}'.format(save_path)) values, indices = torch.topk(topic_prob, k=topk, dim=-1) indices = torch_detach(indices) values = torch_detach(values) topics = [] for t in indices: topics.append(' '.join([vocab.itos[i] for i in t])) with open(save_path+'.topics', 'w') as f: f.write('\n'.join(topics)) str_values = [] for t in values: str_values.append(' '.join([str(v) for v in t])) with open(save_path+'.values', 'w') as f: f.write('\n'.join(str_values)) torch.save(topic_prob, save_path + '.pt') def evaluate_topic_coherence(topic_path, ref_corpus_dir, res_path, logger): """Evaluating topic coherence at topic_path whose lines are topics top 10 words. The evaluation uses the script at scripts/topics-20news.sh """ if not os.path.exists(topic_path): logger.warning('topic file {} not exists'.format(topic_path)) return -1 v = -1 try: p = sp.run(['bash', 'scripts/topic_coherence.sh', topic_path, ref_corpus_dir, res_path], encoding='utf-8', timeout=20, stdout=sp.PIPE, stderr=sp.DEVNULL) v = float(p.stdout.split('\n')[-3].split()[-1]) except (ValueError, IndexError, TimeoutError): logger.warning('error when calculating topic coherence at {}'.format(topic_path)) return v def normalize(v2d, eps=1e-12): return v2d / (np.linalg.norm(v2d, axis=1, keepdims=True) + eps) def wetc(e): """embedding matrix: N x D where N is the first N words in a topic, D is the embedding dimension.""" e = normalize(e) t = normalize(e.mean(axis=0, keepdims=True)) return float(e.dot(t.T).mean()) def recover_topic_embedding(topic_word_paths, embedding_path, dataset_dir): """Evaluate the WETC of topics generated by NPMI metric.""" from data_utils import read_dataset assert isinstance(topic_word_paths, list), 'Multiple paths should be specified.' _, _, vocab = read_dataset(dataset_dir) embedding = np.load(embedding_path) scores = [] for p in topic_word_paths: with open(p) as f: r = [] for line in f: idx = [int(vocab.stoi[w]) for w in line.split()] r.append(wetc(embedding[idx])) scores.append(r) return np.array(scores)
33.504886
119
0.586914
28567dfbf8e22fcad08ad19553a2399e95399e25
6,612
py
Python
noise/perlin.py
AnthonyBriggs/Python-101
e6c7584fd6791bb5d7d05fd419faa46dc7148f61
[ "MIT" ]
3
2017-08-02T23:40:55.000Z
2018-07-02T14:59:07.000Z
noise/perlin.py
AnthonyBriggs/Python-101
e6c7584fd6791bb5d7d05fd419faa46dc7148f61
[ "MIT" ]
null
null
null
noise/perlin.py
AnthonyBriggs/Python-101
e6c7584fd6791bb5d7d05fd419faa46dc7148f61
[ "MIT" ]
null
null
null
#!/usr/bin/python """ TODO: where'd I get this from? """ import math import random p = ( 151,160,137,91,90,15,131,13,201,95,96,53,194,233,7,225,140,36,103, 30,69,142,8,99,37,240,21,10,23,190,6,148,247,120,234,75,0,26,197, 62,94,252,219,203,117,35,11,32,57,177,33,88,237,149,56,87,174,20, 125,136,171,168,68,175,74,165,71,134,139,48,27,166,77,146,158,231, 83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,102, 143,54,65,25,63,161,1,216,80,73,209,76,132,187,208,89,18,169,200, 196,135,130,116,188,159,86,164,100,109,198,173,186,3,64,52,217,226, 250,124,123,5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16, 58,17,182,189,28,42,223,183,170,213,119,248,152,2,44,154,163,70, 221,153,101,155,167,43,172,9,129,22,39,253,19,98,108,110,79,113, 224,232,178,185,112,104,218,246,97,228,251,34,242,193,238,210,144, 12,191,179,162,241,81,51,145,235,249,14,239,107,49,192,214,31,181, 199,106,157,184,84,204,176,115,121,50,45,127,4,150,254,138,236, 205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180, 151,160,137,91,90,15,131,13,201,95,96,53,194,233,7,225,140,36,103, 30,69,142,8,99,37,240,21,10,23,190,6,148,247,120,234,75,0,26,197, 62,94,252,219,203,117,35,11,32,57,177,33,88,237,149,56,87,174,20, 125,136,171,168,68,175,74,165,71,134,139,48,27,166,77,146,158,231, 83,111,229,122,60,211,133,230,220,105,92,41,55,46,245,40,244,102, 143,54,65,25,63,161,1,216,80,73,209,76,132,187,208,89,18,169,200, 196,135,130,116,188,159,86,164,100,109,198,173,186,3,64,52,217,226, 250,124,123,5,202,38,147,118,126,255,82,85,212,207,206,59,227,47,16, 58,17,182,189,28,42,223,183,170,213,119,248,152,2,44,154,163,70, 221,153,101,155,167,43,172,9,129,22,39,253,19,98,108,110,79,113, 224,232,178,185,112,104,218,246,97,228,251,34,242,193,238,210,144, 12,191,179,162,241,81,51,145,235,249,14,239,107,49,192,214,31,181, 199,106,157,184,84,204,176,115,121,50,45,127,4,150,254,138,236, 205,93,222,114,67,29,24,72,243,141,128,195,78,66,215,61,156,180) def perlin_multifractal(x,y,z, octaves, lambda_, amplitude): """Multi fractal just means that we have more noise, at higher frequencies (aka. octaves), layered on top of the existing noise.""" sum = 0 for oct in range(octaves): #print oct, amplitude, lambda_ amp = amplitude / (2 ** oct); lam = lambda_ / (2 ** oct); # todo - find a decent interpolation function? #add = interpolate(x/lam, y/lam, z/lam) * amp; add = pnoise(x/lam, y/lam, z/lam) * amp; if oct > 1: add *= sum; sum += add; return sum def perlin_ridged(x, y, z): """Ridged means that instead of varying from -1..1, the value varies from -1..1..-1""" value = pnoise(x, y, z) if value > 0: value = (-value) + 1 else: value = value + 1 return 2*value - 1 def make_landscape(x_size, y_size, noise_func, startx, stepx, starty, stepy): """Display some perlin noise as an image in a window.""" heights = [] landscape = Image.new("RGB", (x_size, y_size)) for x in range(x_size): for y in range(y_size): xnoise, ynoise = (startx + x*stepx, starty + y*stepy) # make noise 0..2.0 instead of -1..1 and mult up to 256 max height = 128 * (noise_func(xnoise, ynoise, 0.5) + 1.0) heights.append((height, height, height)) print(len(heights), "heights generated") landscape.putdata(heights) landscape.show() if __name__ == '__main__': for z in range(10): for y in range(10): for x in range(10): print("%.02f" % pnoise(x/10.0, y/10.0, z/10.0), end=' ') print() print() # pnoise is deterministic for i in range(10): print(pnoise(0.1, 0.1, 0.1)) import Image x_size = 200; y_size = 200 #startx, endx, stepx = get_random_range() #starty, endy, stepy = get_random_range() startx, endx, stepx = (0.0, 5.0, 5.0/x_size) starty, endy, stepy = (0.0, 5.0, 5.0/y_size) make_landscape(x_size, y_size, pnoise, startx, stepx, starty, stepy) make_landscape(x_size, y_size, perlin_multi_common, startx, stepx, starty, stepy) make_landscape(x_size, y_size, perlin_ridged, startx, stepx, starty, stepy) make_landscape(x_size, y_size, perlin_ridged_multi_common, startx, stepx, starty, stepy)
31.942029
92
0.58908
2856b66bfd0e689f82a8cb47b06e0b491d804f30
3,555
py
Python
tests/test_random.py
xadrnd/display_sdk_ios
f8a140d1cf3d1e8f63b915caf8c723d771dac13f
[ "BSD-3-Clause" ]
null
null
null
tests/test_random.py
xadrnd/display_sdk_ios
f8a140d1cf3d1e8f63b915caf8c723d771dac13f
[ "BSD-3-Clause" ]
null
null
null
tests/test_random.py
xadrnd/display_sdk_ios
f8a140d1cf3d1e8f63b915caf8c723d771dac13f
[ "BSD-3-Clause" ]
null
null
null
from test_base import DisplaySDKTest from utils import *
25.76087
106
0.501547
28573be47f861d148cdbe92b563ea159f2c46f16
1,964
py
Python
libretto/plugin/__init__.py
johnwcchau/libretto
8b8cde81a978536f1c797e070818188bd2c006e5
[ "MIT" ]
null
null
null
libretto/plugin/__init__.py
johnwcchau/libretto
8b8cde81a978536f1c797e070818188bd2c006e5
[ "MIT" ]
null
null
null
libretto/plugin/__init__.py
johnwcchau/libretto
8b8cde81a978536f1c797e070818188bd2c006e5
[ "MIT" ]
null
null
null
import logging from configparser import ConfigParser from typing import Callable __plugins = {}
32.733333
82
0.588595
2858823061844e57eb3d0e1bf225fe8863fd7485
30,325
py
Python
cax/tasks/tsm_mover.py
XENON1T/cax
06de9290851904695275fd34d7c74e2c9eb7fe59
[ "0BSD" ]
2
2016-05-19T05:51:15.000Z
2017-10-13T13:43:00.000Z
cax/tasks/tsm_mover.py
XENON1T/cax
06de9290851904695275fd34d7c74e2c9eb7fe59
[ "0BSD" ]
93
2016-03-26T20:34:01.000Z
2021-03-25T21:41:57.000Z
cax/tasks/tsm_mover.py
XENON1T/cax
06de9290851904695275fd34d7c74e2c9eb7fe59
[ "0BSD" ]
2
2017-05-19T03:47:09.000Z
2018-12-19T18:10:45.000Z
"""Handle copying data between sites. tsm_mover.py contains the necessary classes to upload and download from tape backup and syncronize it with the runDB. Author: Boris Bauermeister Email: Boris.Bauermeister@fysik.su.se """ import datetime import logging import os import time import hashlib import json import random import requests import signal import socket import subprocess import sys import time import traceback import datetime import time import tarfile import copy import shutil import checksumdir import tempfile import scp from paramiko import SSHClient, util from cax import config from cax.task import Task # Class: Add checksums for missing tsm-server entries in the runDB: # Class: Log-file analyser:
38.679847
142
0.54526
285a8bab289bfb8c666439b93d30129bb0e1ff4e
2,076
py
Python
src/network/topology.py
joelwanner/smtax
7d46f02cb3f15f2057022c574e0f3a8e5236d647
[ "MIT" ]
null
null
null
src/network/topology.py
joelwanner/smtax
7d46f02cb3f15f2057022c574e0f3a8e5236d647
[ "MIT" ]
null
null
null
src/network/topology.py
joelwanner/smtax
7d46f02cb3f15f2057022c574e0f3a8e5236d647
[ "MIT" ]
null
null
null
from network.route import * # TODO: remove workaround for circular dependencies import interface.parse as parser
23.590909
102
0.560694
285ae74b0cd6ba7b852c770105e6b5d4523be1ae
5,310
py
Python
leetcode_python/Sort/wiggle-sort-ii.py
yennanliu/Python_basics
6a597442d39468295946cefbfb11d08f61424dc3
[ "Unlicense" ]
18
2019-08-01T07:45:02.000Z
2022-03-31T18:05:44.000Z
leetcode_python/Sort/wiggle-sort-ii.py
yennanliu/Python_basics
6a597442d39468295946cefbfb11d08f61424dc3
[ "Unlicense" ]
null
null
null
leetcode_python/Sort/wiggle-sort-ii.py
yennanliu/Python_basics
6a597442d39468295946cefbfb11d08f61424dc3
[ "Unlicense" ]
15
2019-12-29T08:46:20.000Z
2022-03-08T14:14:05.000Z
# V0 # V1 # https://www.hrwhisper.me/leetcode-wiggle-sort-ii/ # V1' # http://bookshadow.com/weblog/2015/12/31/leetcode-wiggle-sort-ii/ # V1'' # https://www.jiuzhang.com/solution/wiggle-sort-ii/#tag-highlight-lang-python # V2 # Time: O(n) ~ O(n^2) # Space: O(1) # Tri Partition (aka Dutch National Flag Problem) with virtual index solution. (TLE) from random import randint
34.038462
103
0.468927
285af3b4261da560e2b691c6b8e6dc0c7a368402
517
py
Python
model/Rules/ZigZag.py
GigliOneiric/ElliotWave
5d0fc166530a57132dce4e4c00ecb33cb8101aaa
[ "Apache-2.0" ]
null
null
null
model/Rules/ZigZag.py
GigliOneiric/ElliotWave
5d0fc166530a57132dce4e4c00ecb33cb8101aaa
[ "Apache-2.0" ]
null
null
null
model/Rules/ZigZag.py
GigliOneiric/ElliotWave
5d0fc166530a57132dce4e4c00ecb33cb8101aaa
[ "Apache-2.0" ]
null
null
null
import config.Text
21.541667
97
0.54352
285ef9691cdce93a606e382f9fdd9a1cebb1c5b6
232
py
Python
views.py
Rexypoo/shortnsweet
e773f01f2fdd6630b8d649232b48a753aa387c4f
[ "Apache-2.0" ]
null
null
null
views.py
Rexypoo/shortnsweet
e773f01f2fdd6630b8d649232b48a753aa387c4f
[ "Apache-2.0" ]
null
null
null
views.py
Rexypoo/shortnsweet
e773f01f2fdd6630b8d649232b48a753aa387c4f
[ "Apache-2.0" ]
null
null
null
from django.shortcuts import get_object_or_404, redirect, render from .models import ShortURL
29
64
0.806034
285f254edbac663963266cf598f4a69429de61bf
4,153
py
Python
pyautomailer/command_line.py
matteocappello94/pyautomailer
933c23426d00d32543da3af03fe67a7e8fa38247
[ "MIT" ]
null
null
null
pyautomailer/command_line.py
matteocappello94/pyautomailer
933c23426d00d32543da3af03fe67a7e8fa38247
[ "MIT" ]
2
2018-08-08T07:51:14.000Z
2018-08-10T14:35:32.000Z
pyautomailer/command_line.py
matteocappello94/pyautomailer
933c23426d00d32543da3af03fe67a7e8fa38247
[ "MIT" ]
null
null
null
import argparse import sys import logging as log from pyautomailer import PyAutoMailer, PyAutoMailerMode # Bulk-send mode function # One-send mode function # From log_level string get log_level object of logging module
36.752212
79
0.575969
285fac9d4a82de6931604146cf170d6983d5310f
4,225
py
Python
test-suite/handwritten-src/python/test_foo_interface.py
trafi/trafi-djinni
47cd2c849782e2ab4b38e5dc6a5a3104cc87f673
[ "Apache-2.0" ]
16
2020-10-18T20:09:29.000Z
2022-02-21T07:11:13.000Z
test-suite/handwritten-src/python/test_foo_interface.py
trafi/trafi-djinni
47cd2c849782e2ab4b38e5dc6a5a3104cc87f673
[ "Apache-2.0" ]
53
2020-10-13T20:08:29.000Z
2022-03-10T14:59:50.000Z
test-suite/handwritten-src/python/test_foo_interface.py
trafi/trafi-djinni
47cd2c849782e2ab4b38e5dc6a5a3104cc87f673
[ "Apache-2.0" ]
13
2020-10-16T20:31:36.000Z
2022-01-28T15:37:05.000Z
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function, unicode_literals # Example code written by a python developer to access cpp implementation of Foo # This file should be hand-written by a python developer from foo_interface import FooInterface from djinni.support import decoded_utf_8 import sys PYTHON3 = sys.version_info[0] >= 3 # Can set: unicode strings (python 2 and 3), bytes utf-8 encoded (python 3) # Will get: utf-8 encoded strings, and utf-8 encoded bytes respectively DECODEUtf8 = 1
31.296296
108
0.626509
286006999e3a7c33bbad8b78611b5c41448309f0
4,324
py
Python
examples/dvrl_asr/dvrl_asr_finetuning.py
SeunghyunSEO/seosh_fairseq
443b2a8effb6b8fba5758989076cf992470ccb62
[ "MIT" ]
null
null
null
examples/dvrl_asr/dvrl_asr_finetuning.py
SeunghyunSEO/seosh_fairseq
443b2a8effb6b8fba5758989076cf992470ccb62
[ "MIT" ]
2
2022-02-22T08:28:06.000Z
2022-02-22T09:26:26.000Z
examples/dvrl_asr/dvrl_asr_finetuning.py
SeunghyunSEO/seosh_fairseq
443b2a8effb6b8fba5758989076cf992470ccb62
[ "MIT" ]
null
null
null
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import logging import os import torch import json from argparse import Namespace from dataclasses import dataclass, field from typing import Optional, Any from fairseq.data import AddTargetDataset, Dictionary, encoders from fairseq.tasks.audio_pretraining import AudioPretrainingTask, AudioPretrainingConfig from fairseq.tasks.audio_finetuning import AudioFinetuningTask, AudioFinetuningConfig from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.configs import GenerationConfig from fairseq.data.text_compressor import TextCompressor, TextCompressionLevel from fairseq.tasks import FairseqTask, register_task from fairseq import utils from fairseq.logging import metrics from fairseq.optim.amp_optimizer import AMPOptimizer from omegaconf import MISSING, II, OmegaConf logger = logging.getLogger(__name__)
36.336134
132
0.698196
286032d47c652ffe5a80685b84caa53cf8ed1a03
2,727
py
Python
opensource/opencv/write_to_video.py
marciojv/hacks-cognitives-plataforms
5b43f52d6afde4ad2768ad5b85e376578e2c9b2f
[ "Apache-2.0" ]
1
2021-05-14T18:43:51.000Z
2021-05-14T18:43:51.000Z
opensource/opencv/write_to_video.py
marciojv/hacks-cognitives-plataforms
5b43f52d6afde4ad2768ad5b85e376578e2c9b2f
[ "Apache-2.0" ]
null
null
null
opensource/opencv/write_to_video.py
marciojv/hacks-cognitives-plataforms
5b43f52d6afde4ad2768ad5b85e376578e2c9b2f
[ "Apache-2.0" ]
9
2019-02-04T22:08:08.000Z
2021-07-17T12:12:12.000Z
# para executar # python write_to_video.py --output example.avi # ou # python write_to_video.py --output example.avi --picamera 1 # python -m pip install imutils from __future__ import print_function from imutils.video import VideoStream import numpy as np import argparse import imutils import time import cv2 #https://www.pyimagesearch.com/2016/02/22/writing-to-video-with-opencv/ # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-o", "--output", required=True, help="path to output video file") ap.add_argument("-p", "--picamera", type=int, default=-1, help="whether or not the Raspberry Pi camera should be used") ap.add_argument("-f", "--fps", type=int, default=20, help="FPS of output video") ap.add_argument("-c", "--codec", type=str, default="MJPG", help="codec of output video") args = vars(ap.parse_args()) # initialize the video stream and allow the camera # sensor to warmup print("[INFO] warming up camera...") vs = VideoStream(usePiCamera=args["picamera"] > 0).start() time.sleep(2.0) # initialize the FourCC, video writer, dimensions of the frame, and # zeros array fourcc = cv2.VideoWriter_fourcc(*args["codec"]) writer = None (h, w) = (None, None) zeros = None # loop over frames from the video stream while True: # grab the frame from the video stream and resize it to have a # maximum width of 300 pixels frame = vs.read() frame = imutils.resize(frame, width=300) # check if the writer is None if writer is None: # store the image dimensions, initialize the video writer, # and construct the zeros array (h, w) = frame.shape[:2] writer = cv2.VideoWriter(args["output"], fourcc, args["fps"], (w * 2, h * 2), True) zeros = np.zeros((h, w), dtype="uint8") # break the image into its RGB components, then construct the # RGB representation of each frame individually (B, G, R) = cv2.split(frame) R = cv2.merge([zeros, zeros, R]) G = cv2.merge([zeros, G, zeros]) B = cv2.merge([B, zeros, zeros]) # construct the final output frame, storing the original frame # at the top-left, the red channel in the top-right, the green # channel in the bottom-right, and the blue channel in the # bottom-left output = np.zeros((h * 2, w * 2, 3), dtype="uint8") output[0:h, 0:w] = frame output[0:h, w:w * 2] = R output[h:h * 2, w:w * 2] = G output[h:h * 2, 0:w] = B # write the output frame to file writer.write(output) # show the frames cv2.imshow("Frame", frame) cv2.imshow("Output", output) key = cv2.waitKey(1) & 0xFF # if the `q` key was pressed, break from the loop if key == ord("q"): break # do a bit of cleanup print("[INFO] cleaning up...") cv2.destroyAllWindows() vs.stop() writer.release()
27.826531
71
0.69527