hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8d2b9627ee560b695980d399a9b852afb9663aac
| 1,593
|
py
|
Python
|
tests/test_clamp.py
|
josemolinagarcia/maya-math-nodes
|
1f83eef1d1efe0b0c3dbb1477ca31ed9f8911ee4
|
[
"MIT"
] | 148
|
2018-01-12T20:30:45.000Z
|
2022-02-28T05:20:46.000Z
|
tests/test_clamp.py
|
josemolinagarcia/maya-math-nodes
|
1f83eef1d1efe0b0c3dbb1477ca31ed9f8911ee4
|
[
"MIT"
] | 13
|
2018-01-17T18:02:13.000Z
|
2021-11-23T06:06:24.000Z
|
tests/test_clamp.py
|
josemolinagarcia/maya-math-nodes
|
1f83eef1d1efe0b0c3dbb1477ca31ed9f8911ee4
|
[
"MIT"
] | 41
|
2018-01-16T01:41:29.000Z
|
2021-08-24T01:27:56.000Z
|
# Copyright (c) 2018 Serguei Kalentchouk et al. All rights reserved.
# Use of this source code is governed by an MIT license that can be found in the LICENSE file.
from node_test_case import NodeTestCase, cmds
| 44.25
| 114
| 0.595731
|
8d2bec83c642f547afb331d447ae8ff19041fd5a
| 1,111
|
py
|
Python
|
src/tests/tests_get_formatted_items.py
|
kazqvaizer/checklistbot
|
f715280fbe7035bc2ce4f69cbf95595d9fe3a225
|
[
"MIT"
] | 5
|
2020-10-06T13:42:45.000Z
|
2021-12-21T07:35:08.000Z
|
src/tests/tests_get_formatted_items.py
|
kazqvaizer/checklistbot
|
f715280fbe7035bc2ce4f69cbf95595d9fe3a225
|
[
"MIT"
] | null | null | null |
src/tests/tests_get_formatted_items.py
|
kazqvaizer/checklistbot
|
f715280fbe7035bc2ce4f69cbf95595d9fe3a225
|
[
"MIT"
] | null | null | null |
import pytest
from models import TodoItem
pytestmark = [
pytest.mark.usefixtures("use_db"),
]
def test_format_without_strike(items, chat):
lines = chat.get_formatted_items().split("\n")
assert len(lines) == 2
assert "1. Hello" == lines[0]
assert "2. Nice!" == lines[1]
def test_format_with_strike(items, chat):
items[0].is_checked = True
items[0].save()
lines = chat.get_formatted_items().split("\n")
assert len(lines) == 2
assert "<s>1. Hello</s>" == lines[0]
assert "2. Nice!" == lines[1]
def test_respect_order_by_id(items, chat):
TodoItem.update(id=100500).where(TodoItem.id == items[0].id).execute()
lines = chat.get_formatted_items().split("\n")
assert len(lines) == 2
assert "1. Nice!" == lines[0]
assert "2. Hello" == lines[1]
def test_no_items_is_okay(chat):
assert chat.get_formatted_items() == ""
| 20.574074
| 74
| 0.640864
|
8d2cd1060b91fea7d66c9afe4a0c6e646802593b
| 3,945
|
py
|
Python
|
web/multilingual/database.py
|
mahoyen/web
|
1d190a86e3277315804bfcc0b8f9abd4f9c1d780
|
[
"MIT"
] | null | null | null |
web/multilingual/database.py
|
mahoyen/web
|
1d190a86e3277315804bfcc0b8f9abd4f9c1d780
|
[
"MIT"
] | null | null | null |
web/multilingual/database.py
|
mahoyen/web
|
1d190a86e3277315804bfcc0b8f9abd4f9c1d780
|
[
"MIT"
] | null | null | null |
import copy
import json
from django.contrib import admin
from django.db import models
from web.multilingual.data_structures import MultiLingualTextStructure
from web.multilingual.form import MultiLingualFormField, MultiLingualRichTextFormField, \
MultiLingualRichTextUploadingFormField
from web.multilingual.widgets import MultiLingualTextInput, MultiLingualRichText, MultiLingualRichTextUploading
| 39.848485
| 119
| 0.694043
|
8d2fec927240532eb03988da6b6277edf3bec73d
| 2,859
|
py
|
Python
|
cart/tests/test_views.py
|
mohsenamoon1160417237/ECommerce-app
|
4cca492214b04b56f625aef2a2979956a8256710
|
[
"MIT"
] | null | null | null |
cart/tests/test_views.py
|
mohsenamoon1160417237/ECommerce-app
|
4cca492214b04b56f625aef2a2979956a8256710
|
[
"MIT"
] | null | null | null |
cart/tests/test_views.py
|
mohsenamoon1160417237/ECommerce-app
|
4cca492214b04b56f625aef2a2979956a8256710
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from shop.models import Product
from django.contrib.auth.models import User
from coupons.forms import CouponForm
| 28.878788
| 77
| 0.550892
|
8d341997147380f82b39848b173c8f836285f331
| 2,134
|
py
|
Python
|
tests/conftest.py
|
gpontesss/botus_receptus
|
bf29f5f70a2e7ae3548a44287c636515f78e7e77
|
[
"BSD-3-Clause"
] | 3
|
2019-04-15T01:45:46.000Z
|
2020-04-07T13:31:19.000Z
|
tests/conftest.py
|
gpontesss/botus_receptus
|
bf29f5f70a2e7ae3548a44287c636515f78e7e77
|
[
"BSD-3-Clause"
] | 244
|
2020-04-20T22:10:23.000Z
|
2022-03-31T23:03:48.000Z
|
tests/conftest.py
|
gpontesss/botus_receptus
|
bf29f5f70a2e7ae3548a44287c636515f78e7e77
|
[
"BSD-3-Clause"
] | 1
|
2021-11-08T08:52:32.000Z
|
2021-11-08T08:52:32.000Z
|
from __future__ import annotations
import asyncio
from typing import Any
import asynctest.mock # type: ignore
import pytest # type: ignore
import pytest_mock._util # type: ignore
pytest_mock._util._mock_module = asynctest.mock
| 28.837838
| 79
| 0.680412
|
8d352ba96be56207cce46e2dc458765a09de6f97
| 1,247
|
py
|
Python
|
Shark_Training/pyimagesearch/preprocessing/meanpreprocessor.py
|
crpurcell/MQ_DPI_Release
|
97444513e8b8d48ec91ff8a43b9dfaed0da029f9
|
[
"MIT"
] | null | null | null |
Shark_Training/pyimagesearch/preprocessing/meanpreprocessor.py
|
crpurcell/MQ_DPI_Release
|
97444513e8b8d48ec91ff8a43b9dfaed0da029f9
|
[
"MIT"
] | null | null | null |
Shark_Training/pyimagesearch/preprocessing/meanpreprocessor.py
|
crpurcell/MQ_DPI_Release
|
97444513e8b8d48ec91ff8a43b9dfaed0da029f9
|
[
"MIT"
] | null | null | null |
#=============================================================================#
# #
# MODIFIED: 15-Jan-2019 by C. Purcell #
# #
#=============================================================================#
import cv2
#-----------------------------------------------------------------------------#
| 35.628571
| 79
| 0.36648
|
8d36012ec39c8b5de0335c08778adaf22f20af3c
| 985
|
py
|
Python
|
aiida_quantumespresso/parsers/constants.py
|
unkcpz/aiida-quantumespresso
|
fbac0993bb8b6cdeba85717453debcf0ab062b5a
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/parsers/constants.py
|
unkcpz/aiida-quantumespresso
|
fbac0993bb8b6cdeba85717453debcf0ab062b5a
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/parsers/constants.py
|
unkcpz/aiida-quantumespresso
|
fbac0993bb8b6cdeba85717453debcf0ab062b5a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Physical or mathematical constants.
Since every code has its own conversion units, this module defines what
QE understands as for an eV or other quantities.
Whenever possible, we try to use the constants defined in
:py:mod:aiida.common.constants:, but if some constants are slightly different
among different codes (e.g., different standard definition), we define
the constants in this file.
"""
from aiida.common.constants import (
ang_to_m,
bohr_si,
bohr_to_ang,
hartree_to_ev,
invcm_to_THz,
ry_si,
ry_to_ev,
timeau_to_sec,
)
# From the definition of Quantum ESPRESSO, conversion from atomic mass
# units to Rydberg units:
# REAL(DP), PARAMETER :: AMU_SI = 1.660538782E-27_DP ! Kg
# REAL(DP), PARAMETER :: ELECTRONMASS_SI = 9.10938215E-31_DP ! Kg
# REAL(DP), PARAMETER :: AMU_AU = AMU_SI / ELECTRONMASS_SI
# REAL(DP), PARAMETER :: AMU_RY = AMU_AU / 2.0_DP
amu_Ry = 911.4442421323
| 31.774194
| 77
| 0.700508
|
8d3e794674c7c132a4877a4a375649bf2399c45b
| 2,639
|
py
|
Python
|
venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | 1
|
2021-05-24T10:08:51.000Z
|
2021-05-24T10:08:51.000Z
|
venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
venv/lib/python3.8/site-packages/keras/api/_v2/keras/applications/__init__.py
|
JIANG-CX/data_labeling
|
8d2470bbb537dfc09ed2f7027ed8ee7de6447248
|
[
"MIT"
] | null | null | null |
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.keras.applications namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from keras.api._v2.keras.applications import densenet
from keras.api._v2.keras.applications import efficientnet
from keras.api._v2.keras.applications import imagenet_utils
from keras.api._v2.keras.applications import inception_resnet_v2
from keras.api._v2.keras.applications import inception_v3
from keras.api._v2.keras.applications import mobilenet
from keras.api._v2.keras.applications import mobilenet_v2
from keras.api._v2.keras.applications import mobilenet_v3
from keras.api._v2.keras.applications import nasnet
from keras.api._v2.keras.applications import resnet
from keras.api._v2.keras.applications import resnet50
from keras.api._v2.keras.applications import resnet_v2
from keras.api._v2.keras.applications import vgg16
from keras.api._v2.keras.applications import vgg19
from keras.api._v2.keras.applications import xception
from keras.applications.densenet import DenseNet121
from keras.applications.densenet import DenseNet169
from keras.applications.densenet import DenseNet201
from keras.applications.efficientnet import EfficientNetB0
from keras.applications.efficientnet import EfficientNetB1
from keras.applications.efficientnet import EfficientNetB2
from keras.applications.efficientnet import EfficientNetB3
from keras.applications.efficientnet import EfficientNetB4
from keras.applications.efficientnet import EfficientNetB5
from keras.applications.efficientnet import EfficientNetB6
from keras.applications.efficientnet import EfficientNetB7
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.applications.inception_v3 import InceptionV3
from keras.applications.mobilenet import MobileNet
from keras.applications.mobilenet_v2 import MobileNetV2
from keras.applications.mobilenet_v3 import MobileNetV3Large
from keras.applications.mobilenet_v3 import MobileNetV3Small
from keras.applications.nasnet import NASNetLarge
from keras.applications.nasnet import NASNetMobile
from keras.applications.resnet import ResNet101
from keras.applications.resnet import ResNet152
from keras.applications.resnet import ResNet50
from keras.applications.resnet_v2 import ResNet101V2
from keras.applications.resnet_v2 import ResNet152V2
from keras.applications.resnet_v2 import ResNet50V2
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras.applications.xception import Xception
del _print_function
| 47.981818
| 82
| 0.869269
|
8d3ebf8c27b4787edb5db6336b9fad286f003b92
| 97
|
py
|
Python
|
flash/vision/embedding/__init__.py
|
alvin-chang/lightning-flash
|
481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5
|
[
"Apache-2.0"
] | 2
|
2021-06-25T08:42:36.000Z
|
2021-06-25T08:49:29.000Z
|
flash/vision/embedding/__init__.py
|
alvin-chang/lightning-flash
|
481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5
|
[
"Apache-2.0"
] | null | null | null |
flash/vision/embedding/__init__.py
|
alvin-chang/lightning-flash
|
481d4d369ff0a5d8c2b2d9e4970c5608a92b3ff5
|
[
"Apache-2.0"
] | null | null | null |
from flash.vision.embedding.image_embedder_model import ImageEmbedder, ImageEmbedderDataPipeline
| 48.5
| 96
| 0.907216
|
8d3f8941dd6434ce1537415533cd51f289916f52
| 5,554
|
py
|
Python
|
configstruct/config_struct.py
|
bradrf/configstruct
|
aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8
|
[
"MIT"
] | null | null | null |
configstruct/config_struct.py
|
bradrf/configstruct
|
aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8
|
[
"MIT"
] | 16
|
2016-10-13T09:53:46.000Z
|
2022-03-24T15:04:51.000Z
|
configstruct/config_struct.py
|
bradrf/configstruct
|
aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8
|
[
"MIT"
] | null | null | null |
import os
import sys
import logging
from configparser import ConfigParser
from .open_struct import OpenStruct
from .section_struct import SectionStruct
# TODO: use file lock when read/write
def choose_theirs(section, option, mine, theirs):
'''Always prefer values for keys from file.'''
return theirs
def choose_mine(section, option, mine, theirs):
'''Always prefer values for keys in memory.'''
return mine
LOG_LEVELS = ['debug-all', 'debug', 'info', 'warning', 'error', 'critical']
LOG_OPTIONS = {'log_level': 'info', 'log_file': 'STDERR'}
| 40.540146
| 100
| 0.659705
|
8d4042ed9b0586457ce903d2cc6db6a880c03485
| 10,327
|
py
|
Python
|
test_apps/python_app/tests/compiler_test.py
|
Origen-SDK/o2
|
5b0f9a6d113ddebc73c7ee224931e8b2d0301794
|
[
"MIT"
] | null | null | null |
test_apps/python_app/tests/compiler_test.py
|
Origen-SDK/o2
|
5b0f9a6d113ddebc73c7ee224931e8b2d0301794
|
[
"MIT"
] | 127
|
2019-11-23T17:09:35.000Z
|
2021-09-02T11:06:20.000Z
|
test_apps/python_app/tests/compiler_test.py
|
Origen-SDK/o2
|
5b0f9a6d113ddebc73c7ee224931e8b2d0301794
|
[
"MIT"
] | null | null | null |
import origen # pylint: disable=import-error
import pytest, pathlib, os, stat, abc
from os import access, W_OK, X_OK, R_OK
from tests.shared import clean_falcon, clean_compiler, tmp_dir
def user_compiler():
''' End users should access the compiler via ``origen.app.compiler``. '''
return origen.app.compiler
MakoRenderer = origen.compiler.MakoRenderer
# JinjaRenderer = origen.compiler.JinjaRenderer
def test_render_file(self):
''' Test that the renderer can render a given file '''
rendered = user_compiler().render(self.input_filename,
syntax=self.syntax,
direct_src=False,
output_dir=tmp_dir(),
context=self.additional_context)
assert isinstance(rendered, pathlib.Path)
assert rendered == self.output_filename
assert rendered.exists
assert open(rendered, 'r').read() == self.expected_dut_info_output
def test_render_str(self):
''' Test that the renderer can render a given string '''
rendered = user_compiler().render(self.str_render,
syntax=self.syntax,
direct_src=True)
assert rendered == self.expected_str_render
def test_render_with_standard_context(self):
''' Renders output using the standard context '''
rendered = user_compiler().render(
self.str_render_with_standard_context,
syntax=self.syntax,
direct_src=True)
assert rendered == self.expected_str_render_with_standard_context
def test_render_with_additional_context(self):
''' Renders output using additional context given as an option
-> Test that the renderer supports the 'additional_context' option
'''
rendered = user_compiler().render(
self.str_render_with_additional_context,
syntax=self.syntax,
direct_src=True,
context={'test_renderer_name': self.syntax})
assert rendered == self.expected_str_render_with_additional_context
# class TestJinjaCompiler:
# pass
| 39.117424
| 97
| 0.637165
|
8d42c2702dd5a391e27f8a389f8a934778ba0c95
| 999
|
py
|
Python
|
api/api.py
|
devSessions/crvi
|
1ecc68d6c968294bcc5ceea747604ee237f6080c
|
[
"MIT"
] | 25
|
2017-12-31T06:51:54.000Z
|
2021-11-17T11:29:30.000Z
|
api/api.py
|
amittomar-1/crvi
|
1ecc68d6c968294bcc5ceea747604ee237f6080c
|
[
"MIT"
] | 23
|
2020-01-28T21:34:12.000Z
|
2022-03-11T23:11:54.000Z
|
api/api.py
|
amittomar-1/crvi
|
1ecc68d6c968294bcc5ceea747604ee237f6080c
|
[
"MIT"
] | 11
|
2018-01-04T12:30:33.000Z
|
2020-12-01T18:08:59.000Z
|
from flask import Flask, jsonify, request
import predict
import socket
app = Flask(__name__)
#to spedicy route after url
if __name__ == '__main__':
#for remote host
ip = socket.gethostbyname(socket.gethostname())
app.run(port=5000,host=ip)
#for local host
#app.run(debug=True, port=5000)
| 19.211538
| 51
| 0.58959
|
8d4484e9d066b90a85e8763af3ea488f55a3ae34
| 68
|
py
|
Python
|
exe/__init__.py
|
whisperaven/0ops.exed
|
ab9f14868fec664fe78edab6fb7eb572b3048c58
|
[
"MIT"
] | 10
|
2017-03-17T02:15:18.000Z
|
2019-10-26T23:54:21.000Z
|
exe/__init__.py
|
whisperaven/0ops
|
ab9f14868fec664fe78edab6fb7eb572b3048c58
|
[
"MIT"
] | 1
|
2017-03-20T03:17:17.000Z
|
2017-03-20T04:04:26.000Z
|
exe/__init__.py
|
whisperaven/0ops
|
ab9f14868fec664fe78edab6fb7eb572b3048c58
|
[
"MIT"
] | 3
|
2017-03-17T02:46:23.000Z
|
2018-04-14T15:49:56.000Z
|
# (c) 2016, Hao Feng <whisperaven@gmail.com>
__version__ = '0.1.0'
| 17
| 44
| 0.661765
|
8d4492744de35276bcea0bf1ccb409c9aa59295e
| 418
|
py
|
Python
|
Special_Viewer.py
|
Akivamelka/unsupervised_mid_semester
|
5393185d7b0327bbb7cd4b3700d4d00704a5623f
|
[
"MIT"
] | null | null | null |
Special_Viewer.py
|
Akivamelka/unsupervised_mid_semester
|
5393185d7b0327bbb7cd4b3700d4d00704a5623f
|
[
"MIT"
] | null | null | null |
Special_Viewer.py
|
Akivamelka/unsupervised_mid_semester
|
5393185d7b0327bbb7cd4b3700d4d00704a5623f
|
[
"MIT"
] | null | null | null |
from Dimension_Reduction import Viewer
import pandas as pd
view_tool = Viewer()
reduc = 'pca'
suffix = '5'
data_plot = pd.read_csv(f"{reduc}_dim2_{suffix}.csv", delimiter=",")
models = ['km', 'fuzz', 'gmm', 'dbsc', 'hier', 'spec' ]
for model in models:
print(model)
labels = pd.read_csv(f"labels_{model}_{suffix}.csv", delimiter=",")
view_tool.view_vs_target(data_plot, labels, suffix, model)
| 32.153846
| 72
| 0.669856
|
8d481fde3510821315275850b3a25299bc9b350d
| 6,621
|
py
|
Python
|
pytumblr/types.py
|
9999years/pytumblr
|
fe9b2fb60866785141fc0deb5a357a773c0f4229
|
[
"Apache-2.0"
] | null | null | null |
pytumblr/types.py
|
9999years/pytumblr
|
fe9b2fb60866785141fc0deb5a357a773c0f4229
|
[
"Apache-2.0"
] | null | null | null |
pytumblr/types.py
|
9999years/pytumblr
|
fe9b2fb60866785141fc0deb5a357a773c0f4229
|
[
"Apache-2.0"
] | null | null | null |
from collections import UserList
from dataclasses import dataclass, field
from datetime import datetime
from typing import List, Dict, Any, Optional, Type
DATE_FORMAT = '%Y-%m-%d %H:%M:%S %Z'
_link_classes = {'navigation': NavigationLink,
'action': ActionLink}
# a type -> class dict
POST_CLASSES: Dict[str, Type] = {
'photo': LegacyPhotoPost,
'quote': LegacyQuotePost,
'link': LegacyLinkPost,
'chat': LegacyChatPost,
'audio': LegacyAudioPost,
'video': LegacyVideoPost,
'answer': LegacyAnswerPost,
}
| 19.247093
| 71
| 0.661683
|
8d4876f42fc49dd8332e5b4739b6a7de0c8b9bb2
| 311
|
py
|
Python
|
simple_jobs_scraper.py
|
Engnation/Jobs-Scraper
|
6f8b1207731da9f187db406a5be6916774ba3bc5
|
[
"MIT"
] | null | null | null |
simple_jobs_scraper.py
|
Engnation/Jobs-Scraper
|
6f8b1207731da9f187db406a5be6916774ba3bc5
|
[
"MIT"
] | null | null | null |
simple_jobs_scraper.py
|
Engnation/Jobs-Scraper
|
6f8b1207731da9f187db406a5be6916774ba3bc5
|
[
"MIT"
] | null | null | null |
from jobs_scraper import JobsScraper
# Let's create a new JobsScraper object and perform the scraping for a given query.
position_var = "Python"
scraper = JobsScraper(country="ca", position=position_var, location="Toronto", pages=3)
df = scraper.scrape()
df.to_csv(rf'{position_var} jobs.csv', index = False)
| 34.555556
| 87
| 0.768489
|
8d4a0164b56629bd4e65dd24b9c1a1fba70a5ea1
| 810
|
py
|
Python
|
mac/redRMacUpdater.py
|
PiRSquared17/r-orange
|
6bc383f1db3c10c59e16b39daffc44df904ce031
|
[
"Apache-2.0"
] | 1
|
2019-04-15T13:50:30.000Z
|
2019-04-15T13:50:30.000Z
|
mac/redRMacUpdater.py
|
PiRSquared17/r-orange
|
6bc383f1db3c10c59e16b39daffc44df904ce031
|
[
"Apache-2.0"
] | null | null | null |
mac/redRMacUpdater.py
|
PiRSquared17/r-orange
|
6bc383f1db3c10c59e16b39daffc44df904ce031
|
[
"Apache-2.0"
] | 1
|
2016-01-21T23:00:21.000Z
|
2016-01-21T23:00:21.000Z
|
import tarfile, sys,os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
app = QApplication(sys.argv)
try:
zfile = tarfile.open(sys.argv[1], "r:gz" )
zfile.extractall(sys.argv[2])
zfile.close()
mb = QMessageBox('Red-R Updated', "Red-R has been updated'",
QMessageBox.Information, QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton, QMessageBox.NoButton)
except:
mb = QMessageBox('Red-R Updated', "There was an Error in updating Red-R.\n\n%s" % sys.exc_info()[0],
QMessageBox.Information, QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton, QMessageBox.NoButton)
app.setActiveWindow(mb)
mb.setFocus()
mb.show()
app.exit(0)
#mb.exec_()
sys.exit(app.exec_())
os.remove(sys.argv[1])
| 30
| 105
| 0.646914
|
8d4be9a3c0385e4ebdfd3712a699e128c38acafc
| 9,346
|
py
|
Python
|
darknet_websocket_demo.py
|
wutianze/darknet-superb-service
|
fdee5a932c8a3898701c1e302e4642fbff853630
|
[
"MIT"
] | null | null | null |
darknet_websocket_demo.py
|
wutianze/darknet-superb-service
|
fdee5a932c8a3898701c1e302e4642fbff853630
|
[
"MIT"
] | null | null | null |
darknet_websocket_demo.py
|
wutianze/darknet-superb-service
|
fdee5a932c8a3898701c1e302e4642fbff853630
|
[
"MIT"
] | null | null | null |
from ctypes import *
#from multiprocessing import Process, Queue
import queue
import time
from threading import Lock,Thread
from fastapi import FastAPI
from fastapi import Request
from fastapi import WebSocket, WebSocketDisconnect
import uvicorn
#from yolo_service import *
import socket
import random
from typing import List
import darknet
import cv2
import time
import io
import struct
import os
import numpy as np
import base64
import json
from jtracer.tracing import init_tracer
import pynng
from PIL import Image
from opentracing.propagation import Format
def convert2relative(bbox,darknet_height,darknet_width):
"""
YOLO format use relative coordinates for annotation
"""
x, y, w, h = bbox
_height = darknet_height
_width = darknet_width
return x/_width, y/_height, w/_width, h/_height
app = FastAPI()
manager = ConnectionManager()
if __name__ == "__main__":
uvicorn.run("darknet_websocket_demo:app",host="0.0.0.0",port=int(os.getenv("SUPB_SERVICE_PORT")),log_level="info")
| 35.003745
| 180
| 0.652044
|
8d4d42f7498f1a4af52daeaede069016fb2ef667
| 2,389
|
py
|
Python
|
tests/unit/test_sherman_morrison.py
|
willwheelera/pyqmc
|
0c8d1f308bbccb1560aa680a5a75e7a4fe7a69fb
|
[
"MIT"
] | 44
|
2019-06-04T13:53:26.000Z
|
2022-03-31T08:36:30.000Z
|
tests/unit/test_sherman_morrison.py
|
willwheelera/pyqmc
|
0c8d1f308bbccb1560aa680a5a75e7a4fe7a69fb
|
[
"MIT"
] | 121
|
2019-05-13T14:05:20.000Z
|
2022-02-16T19:24:37.000Z
|
tests/unit/test_sherman_morrison.py
|
willwheelera/pyqmc
|
0c8d1f308bbccb1560aa680a5a75e7a4fe7a69fb
|
[
"MIT"
] | 35
|
2019-04-26T21:57:50.000Z
|
2022-02-14T07:56:34.000Z
|
import numpy as np
from pyqmc.slater import sherman_morrison_row
from pyqmc.slater import sherman_morrison_ms
if __name__ == "__main__":
r_err, inv_err = list(zip(*[run_sherman_morrison() for i in range(2000)]))
print(np.amax(r_err))
print(np.amax(inv_err))
counts, bins = np.histogram(np.log10(inv_err), bins=np.arange(-16, 0))
print(np.stack([counts, bins[1:]]))
| 30.628205
| 78
| 0.631226
|
8d4df1f93edc3b8bb4e583e03cb8610d1cc0439f
| 1,543
|
py
|
Python
|
script/licel-plotter.py
|
FedeVerstraeten/smn-lidar-controller
|
7850fd48702d5f2e00d07b499812b3b2fb2b7676
|
[
"MIT"
] | null | null | null |
script/licel-plotter.py
|
FedeVerstraeten/smn-lidar-controller
|
7850fd48702d5f2e00d07b499812b3b2fb2b7676
|
[
"MIT"
] | 1
|
2021-10-05T03:53:55.000Z
|
2021-10-05T03:53:55.000Z
|
script/licel-plotter.py
|
FedeVerstraeten/smnar-lidar-controller
|
7850fd48702d5f2e00d07b499812b3b2fb2b7676
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import socket
import time
import numpy as np
import matplotlib.pyplot as plt
HOST = '10.49.234.234'
PORT = 2055
if __name__ == '__main__':
# Select TR
command_select='SELECT 0'
rsp=repr(command_to_licel(command_select))
print('Received',rsp)
# Clear memory
command_clear='MCLEAR'
rsp=repr(command_to_licel(command_clear))
print('Received',rsp)
# Start TR
command_start='MSTART'
rsp=repr(command_to_licel(command_start))
print('Received',rsp)
time.sleep(5)
# Stop TR
command_stop='MSTOP'
rsp=repr(command_to_licel(command_stop))
print('Received',rsp)
# Get data
command_data='DATA? 0 4001 LSW A'
rsp=command_to_licel(command_data)
#print('Received',rsp)
# with open('outputlicel', 'w') as f:
# f.write(rsp)
data_output=rsp
# Plot
t = np.arange(0, len(data_output), 1)
data_arr=[]
for data_byte in data_output:
data_arr.append(int(data_byte))
fig, ax = plt.subplots()
ax.plot(t, data_arr)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',title='SMN LICEL')
ax.grid()
fig.savefig("test.png")
plt.show()
| 24.109375
| 70
| 0.644848
|
8d500786de7e53c7c13f50132e8ecbc760d095db
| 13,860
|
py
|
Python
|
horizon/openstack_dashboard/dashboards/identity/account/tables.py
|
yianjiajia/openstack_horizon
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
[
"Apache-2.0"
] | null | null | null |
horizon/openstack_dashboard/dashboards/identity/account/tables.py
|
yianjiajia/openstack_horizon
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
[
"Apache-2.0"
] | null | null | null |
horizon/openstack_dashboard/dashboards/identity/account/tables.py
|
yianjiajia/openstack_horizon
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import json
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from django.conf import settings
from horizon import forms
from horizon import tables
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION", lambda p, r: True)
def action(self, request, obj_id):
try:
user = api.keystone.user_get(request, obj_id)
default_user_role = api.keystone.get_default_role(request)
default_project_admin_role = api.keystone.get_default_project_admin_role(request)
api.keystone.remove_tenant_user_role(request, project=user.default_project_id,
user=user.id, role=default_user_role.id)
api.keystone.user_update(request, obj_id, **{'default_role_id': default_project_admin_role.id})
api.keystone.add_tenant_user_role(request, project=user.default_project_id,
user=user.id, role=default_project_admin_role.id)
# operation log
config = _('Old role %s, new role %s') % (default_user_role.name, default_project_admin_role.name)
api.logger.Logger(request).create(resource_type='account', action_name='Role_Change',
resource_name='Account', config=config,
status='Success')
except Exception:
# operation log
config = _('Old role %s, new role %s') % (default_user_role.name, default_project_admin_role.name)
api.logger.Logger(request).create(resource_type='account', action_name='Role_Change',
resource_name='Account', config=config,
status='Error')
class ChangePasswordLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "change_password"
verbose_name = _("Change Password")
url = "horizon:identity:account:change_password"
classes = ("ajax-modal",)
icon = "key"
policy_rules = (("identity", "identity:change_password"),)
policy_target_attrs = (("user_id", "id"),)
class UpdateRegionsLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "regions"
verbose_name = _("Update Regions")
url = "horizon:identity:account:regions"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_user_regions"),)
class UpdateMembersLink(tables.LinkAction):
name = "users"
verbose_name = _("Manage Members")
url = "horizon:identity:account:update_member"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:list_grants"))
STATUS_DISPLAY_CHOICES = (
(False, _("Delete")),
(True, _("Normal")),
)
| 37.258065
| 165
| 0.581818
|
8d5291b6a1ce7e03aab2c5b10e8c178dc0212bb3
| 2,278
|
py
|
Python
|
3Sum.py
|
Muthu2093/Algorithms-practice
|
999434103a9098a4361104fd39cba5913860fa9d
|
[
"MIT"
] | null | null | null |
3Sum.py
|
Muthu2093/Algorithms-practice
|
999434103a9098a4361104fd39cba5913860fa9d
|
[
"MIT"
] | null | null | null |
3Sum.py
|
Muthu2093/Algorithms-practice
|
999434103a9098a4361104fd39cba5913860fa9d
|
[
"MIT"
] | null | null | null |
## Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
## Note:
## The solution set must not contain duplicate triplets.
## Example:
## Given array nums = [-1, 0, 1, 2, -1, -4],
## A solution set is:
## [
## [-1, 0, 1],
## [-1, -1, 2]
## ]
| 27.780488
| 164
| 0.368306
|
8d52b06f889e9040ed2102aec6867ed5ea6a3b70
| 684
|
py
|
Python
|
moim/models.py
|
gyukebox/django-tutorial-moim
|
ea9bea85dadf22bff58ae26ee1ac59171bbe0240
|
[
"MIT"
] | null | null | null |
moim/models.py
|
gyukebox/django-tutorial-moim
|
ea9bea85dadf22bff58ae26ee1ac59171bbe0240
|
[
"MIT"
] | 4
|
2018-01-01T09:26:30.000Z
|
2018-01-06T07:13:01.000Z
|
moim/models.py
|
gyukebox/django-tutorial-moim
|
ea9bea85dadf22bff58ae26ee1ac59171bbe0240
|
[
"MIT"
] | null | null | null |
from django.db import models
from user.models import UserModel
| 36
| 68
| 0.730994
|
8d5338ad6760bdfbd08440494b1ea9d0eab1dc53
| 1,809
|
py
|
Python
|
developers_chamber/scripts/gitlab.py
|
dstlmrk/developers-chamber
|
93f928048f57c049f1c85446d18078b73376462a
|
[
"MIT"
] | 8
|
2019-08-23T15:46:30.000Z
|
2021-03-23T20:12:21.000Z
|
developers_chamber/scripts/gitlab.py
|
dstlmrk/developers-chamber
|
93f928048f57c049f1c85446d18078b73376462a
|
[
"MIT"
] | 14
|
2019-09-17T20:24:18.000Z
|
2021-05-18T21:10:12.000Z
|
developers_chamber/scripts/gitlab.py
|
dstlmrk/developers-chamber
|
93f928048f57c049f1c85446d18078b73376462a
|
[
"MIT"
] | 6
|
2019-08-23T15:46:21.000Z
|
2022-02-18T11:01:18.000Z
|
import os
import click
from developers_chamber.git_utils import get_current_branch_name
from developers_chamber.gitlab_utils import \
create_merge_request as create_merge_request_func
from developers_chamber.scripts import cli
DEFAULT_API_URL = os.environ.get('GITLAB_API_URL', 'https://gitlab.com/api/v4')
DEFAULT_PROJECT = os.environ.get('GITLAB_PROJECT')
DEFAULT_TARGET_BRANCH = os.environ.get('GITLAB_TARGET_BRANCH', 'next')
DEFAULT_TOKEN = os.environ.get('GITLAB_TOKEN')
| 38.489362
| 116
| 0.726368
|
8d5577a30127caeb2ef24f4e9b841abc050103d0
| 15,790
|
py
|
Python
|
tests_pytest/state_machines/autoinstall/test_autoinstall_smbase.py
|
tessia-project/tessia
|
b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540
|
[
"Apache-2.0"
] | 5
|
2020-06-04T10:20:33.000Z
|
2020-10-26T15:09:19.000Z
|
tests_pytest/state_machines/autoinstall/test_autoinstall_smbase.py
|
tessia-project/tessia
|
b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540
|
[
"Apache-2.0"
] | null | null | null |
tests_pytest/state_machines/autoinstall/test_autoinstall_smbase.py
|
tessia-project/tessia
|
b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test base autoinstall machine
A smallest implementation on SmBase is used to test common features
"""
# pylint: disable=invalid-name # we have really long test names
# pylint: disable=redefined-outer-name # use of fixtures
# pylint: disable=unused-argument # use of fixtures for their side effects
#
# IMPORTS
#
from pathlib import Path
from tessia.baselib.hypervisors.hmc.volume_descriptor import FcpVolumeDescriptor
from tessia.server.config import Config
from tessia.server.state_machines.autoinstall import plat_lpar, plat_zvm, plat_kvm
from tessia.server.state_machines.autoinstall import plat_base, sm_base
from tessia.server.state_machines.autoinstall.model import AutoinstallMachineModel
from tessia.server.state_machines.autoinstall.sm_base import SmBase
from tests_pytest.decorators import tracked
from tests_pytest.state_machines.ssh_stub import SshClient
from tests_pytest.state_machines.null_hypervisor import NullHypervisor
import pytest
import yaml
#
# CONSTANTS AND DEFINITIONS
#
CREDS = {'user': 'unit', 'password': 'test'}
#
# CODE
#
# wait_install()
class NullPostInstallChecker:
"""
PostInstallChecked that checks that it has been called
"""
def test_boot_and_postinstall_check_on_lpar_dasd(
lpar_dasd_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on an LPAR on DASD disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
lpar_dasd_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == lpar_dasd_system.hypervisor.boot_options['partition-name']
assert cpus == lpar_dasd_system.cpus
assert mem == lpar_dasd_system.memory
# installation device does not show up in HmcHypervisor boot,
# it is only used later during installation
assert attrs['boot_params']['boot_method'] == 'dasd'
assert attrs['boot_params']['devicenr'] == \
lpar_dasd_system.hypervisor.boot_options['boot-device']
def test_boot_and_postinstall_check_on_lpar_scsi(
lpar_scsi_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on an LPAR on SCSI disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
lpar_scsi_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == lpar_scsi_system.hypervisor.boot_options['partition-name']
assert cpus == lpar_scsi_system.cpus
assert mem == lpar_scsi_system.memory
# installation device does not show up in HmcHypervisor boot,
# it is only used later during installation
assert attrs['boot_params']['boot_method'] == 'dasd'
assert attrs['boot_params']['devicenr'] == \
lpar_scsi_system.hypervisor.boot_options['boot-device']
def test_boot_and_postinstall_check_on_vm_dasd(
vm_dasd_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on a VM on DASD disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
vm_dasd_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_zvm.PlatZvm.create_hypervisor(model)
platform = plat_zvm.PlatZvm(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == vm_dasd_system.system_name
assert cpus == vm_dasd_system.cpus
assert mem == vm_dasd_system.memory
assert vm_dasd_system.volumes[0].device_id == \
attrs['storage_volumes'][0]['devno']
def test_boot_and_postinstall_check_on_vm_scsi(
vm_scsi_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on a VM on SCSI disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
vm_scsi_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_zvm.PlatZvm.create_hypervisor(model)
platform = plat_zvm.PlatZvm(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == vm_scsi_system.system_name
assert cpus == vm_scsi_system.cpus
assert mem == vm_scsi_system.memory
assert vm_scsi_system.volumes[0].lun == \
attrs['storage_volumes'][0]['lun']
def testboot_and_postinstall_check_on_kvm_scsi(
kvm_scsi_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on a KVM on SCSI disk
Verify correct device paths
and that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
kvm_scsi_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_kvm.PlatKvm.create_hypervisor(model)
platform = plat_kvm.PlatKvm(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == kvm_scsi_system.system_name
assert cpus == kvm_scsi_system.cpus
assert mem == kvm_scsi_system.memory
assert kvm_scsi_system.volumes[0].lun == \
attrs['storage_volumes'][0]['volume_id']
for volume in model.system_profile.volumes:
assert '/dev/disk/by-path/ccw' in volume.device_path
def test_network_boot_on_lpar_scsi(
scsi_volume, osa_iface, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on an LPAR on SCSI disk
using network boot
Verify that hypervisor is called with correct parameters
"""
ins_file = 'user@password:inst.local/some-os/boot.ins'
hmc_hypervisor = AutoinstallMachineModel.HmcHypervisor(
'hmc', 'hmc.local',
{'user': '', 'password': ''},
{
'partition-name': 'LP10',
'boot-method': 'network',
'boot-uri': 'ftp://' + ins_file,
})
system = AutoinstallMachineModel.SystemProfile(
'lp10', 'default',
hypervisor=hmc_hypervisor,
hostname='lp10.local',
cpus=2, memory=8192,
volumes=[scsi_volume],
interfaces=[(osa_iface, True)]
)
model = AutoinstallMachineModel(*default_os_tuple, system, CREDS)
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
with tmpdir.as_cwd():
smbase = NullMachine(model, platform)
smbase.start()
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == hmc_hypervisor.boot_options['partition-name']
assert cpus == system.cpus
assert mem == system.memory
assert attrs['boot_params']['boot_method'] == 'ftp'
assert attrs['boot_params']['insfile'] == ins_file
def test_template_lpar_dasd(lpar_dasd_system, default_os_tuple, tmpdir):
"""
Test major template parameters
"""
*os_tuple, _, _ = default_os_tuple
package_repo = AutoinstallMachineModel.PackageRepository(
'aux', 'http://example.com/repo', 'package repo')
model = AutoinstallMachineModel(
*os_tuple, [], [package_repo], lpar_dasd_system, CREDS)
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
with tmpdir.as_cwd():
smbase = NullMachine(model, platform)
autofile_path = (Path.cwd() / 'lp10-default')
smbase.start()
autofile = yaml.safe_load(autofile_path.read_text())
assert autofile['system']['type'] == 'LPAR'
assert autofile['system']['hostname'] == 'lp10.local'
assert autofile['gw_iface']['type'] == 'OSA'
assert autofile['gw_iface']['osname'] == 'enccw0b01'
assert autofile['gw_iface']['search_list'] == ['example.com', 'local']
assert autofile['ifaces'][0]['osname'] == 'enccw0b01'
assert autofile['volumes'][0]['type'] == 'DASD'
assert autofile['volumes'][0]['partitions'] == [
{'fs': 'ext4', 'mp': '/', 'size': '18000M'}
]
assert autofile['repos'][0]['name'] == 'os-repo'
assert autofile['repos'][1]['name'] == 'aux'
def test_template_kvm_scsi(kvm_scsi_system, default_os_tuple, tmpdir):
"""
Test major template parameters
"""
model = AutoinstallMachineModel(*default_os_tuple,
kvm_scsi_system, CREDS)
hyp = plat_kvm.PlatKvm.create_hypervisor(model)
platform = plat_kvm.PlatKvm(model, hyp)
with tmpdir.as_cwd():
smbase = NullMachine(model, platform)
autofile_path = (Path.cwd() / 'kvm54-default')
smbase.start()
autofile = yaml.safe_load(autofile_path.read_text())
assert autofile['system']['type'] == 'KVM'
assert autofile['system']['hostname'] == 'kvm54.local'
assert autofile['gw_iface']['type'] == 'MACVTAP'
assert autofile['gw_iface']['osname'] == 'eth0'
assert autofile['ifaces'][0]['is_gateway']
| 34.326087
| 82
| 0.664155
|
8d5578255a37005da9d4bcc07955742be9a91579
| 2,261
|
py
|
Python
|
tests/test_command/test_cat_command.py
|
bbglab/openvariant
|
ea1e1b6edf0486b0dea34f43227ba333df1071cc
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_command/test_cat_command.py
|
bbglab/openvariant
|
ea1e1b6edf0486b0dea34f43227ba333df1071cc
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_command/test_cat_command.py
|
bbglab/openvariant
|
ea1e1b6edf0486b0dea34f43227ba333df1071cc
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
from os import getcwd
from click.testing import CliRunner
from openvariant.commands.openvar import cat
| 37.065574
| 121
| 0.640867
|
8d559eab2b8075257716e7bc85f5c9d82b0d3221
| 4,766
|
py
|
Python
|
resnet.py
|
rVSaxena/VAE
|
26aa3452a0c8f663153d8cfc8bf1686e242d2fac
|
[
"Unlicense"
] | null | null | null |
resnet.py
|
rVSaxena/VAE
|
26aa3452a0c8f663153d8cfc8bf1686e242d2fac
|
[
"Unlicense"
] | null | null | null |
resnet.py
|
rVSaxena/VAE
|
26aa3452a0c8f663153d8cfc8bf1686e242d2fac
|
[
"Unlicense"
] | null | null | null |
import torch
import torch.nn as nn
| 34.042857
| 170
| 0.640579
|
8d56bf9a638e31e26421d0d5ccd052c3c7de5f95
| 246
|
py
|
Python
|
camknows/camknows.py
|
dreoporto/camknows
|
769aeb91ff16ff654aa1b182f3564dd26a0f7ad6
|
[
"MIT"
] | 2
|
2021-09-20T12:29:57.000Z
|
2021-09-28T11:09:06.000Z
|
camknows/camknows.py
|
dreoporto/camknows
|
769aeb91ff16ff654aa1b182f3564dd26a0f7ad6
|
[
"MIT"
] | null | null | null |
camknows/camknows.py
|
dreoporto/camknows
|
769aeb91ff16ff654aa1b182f3564dd26a0f7ad6
|
[
"MIT"
] | null | null | null |
from camera import Camera
if __name__ == '__main__':
main()
| 15.375
| 55
| 0.630081
|
8d58501cd2a4cf7d4be038ee750ddd345cd594fc
| 196
|
py
|
Python
|
src/main.py
|
C4theBomb/python-calendar-app
|
6776403f7f2440c6497d9a53be5e8d617a2ee817
|
[
"MIT"
] | null | null | null |
src/main.py
|
C4theBomb/python-calendar-app
|
6776403f7f2440c6497d9a53be5e8d617a2ee817
|
[
"MIT"
] | null | null | null |
src/main.py
|
C4theBomb/python-calendar-app
|
6776403f7f2440c6497d9a53be5e8d617a2ee817
|
[
"MIT"
] | null | null | null |
from calendarApp import shell, models
import os
if __name__ == "__main__":
main()
| 15.076923
| 38
| 0.673469
|
8d5852ea5b1463bc9be5da885619fc756c5bd1fc
| 4,329
|
py
|
Python
|
personal/Ervin/Word2Vec_recommender.py
|
edervishaj/spotify-recsys-challenge
|
4077201ac7e4ed9da433bd10a92c183614182437
|
[
"Apache-2.0"
] | 3
|
2018-10-12T20:19:57.000Z
|
2019-12-11T01:11:38.000Z
|
personal/Ervin/Word2Vec_recommender.py
|
kiminh/spotify-recsys-challenge
|
5e7844a77ce3c26658400f161d2d74d682f30e69
|
[
"Apache-2.0"
] | null | null | null |
personal/Ervin/Word2Vec_recommender.py
|
kiminh/spotify-recsys-challenge
|
5e7844a77ce3c26658400f161d2d74d682f30e69
|
[
"Apache-2.0"
] | 4
|
2018-10-27T20:30:18.000Z
|
2020-10-14T07:43:27.000Z
|
import time
import numpy as np
import scipy.sparse as sps
from gensim.models import Word2Vec
from tqdm import tqdm
from recommenders.recommender import Recommender
from utils.datareader import Datareader
from utils.evaluator import Evaluator
from utils.post_processing import eurm_to_recommendation_list
from recommenders.similarity.s_plus import dot_product
if __name__ == '__main__':
dr = Datareader(only_load=True, mode='offline', test_num='1', verbose=False)
pid = dr.get_test_playlists().transpose()[0]
urm = dr.get_urm()
urm.data = np.ones(urm.data.shape[0])
ev = Evaluator(datareader=dr)
model = W2VRecommender()
model.fit(urm, pid)
model.compute_model(verbose=True, size=50)
model.compute_rating(verbose=True, small=True, top_k=750)
ev.evaluate(recommendation_list=eurm_to_recommendation_list(model.eurm, remove_seed=True, datareader=dr),
name="W2V", old_mode=False)
| 37.973684
| 116
| 0.613075
|
8d58f2b0959a8386b4c708d7cc38bd2e9f103bb6
| 1,321
|
py
|
Python
|
pyesasky/__init__.py
|
pierfra-ro/pyesasky
|
a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f
|
[
"BSD-3-Clause"
] | 13
|
2019-05-30T19:57:37.000Z
|
2021-09-10T09:43:49.000Z
|
pyesasky/__init__.py
|
pierfra-ro/pyesasky
|
a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f
|
[
"BSD-3-Clause"
] | 21
|
2019-06-21T18:55:25.000Z
|
2022-02-27T14:48:13.000Z
|
pyesasky/__init__.py
|
pierfra-ro/pyesasky
|
a9342efcaa5cca088ed9a5afa2c98d3e9aa4bd0f
|
[
"BSD-3-Clause"
] | 8
|
2019-05-30T12:20:48.000Z
|
2022-03-04T04:01:20.000Z
|
from ._version import __version__ # noqa
from .pyesasky import ESASkyWidget # noqa
from .catalogue import Catalogue # noqa
from .catalogueDescriptor import CatalogueDescriptor # noqa
from .cooFrame import CooFrame # noqa
from .footprintSet import FootprintSet # noqa
from .footprintSetDescriptor import FootprintSetDescriptor # noqa
from .HiPS import HiPS # noqa
from .imgFormat import ImgFormat # noqa
from .jupyter_server import load_jupyter_server_extension # noqa
from .metadataDescriptor import MetadataDescriptor # noqa
from .metadataType import MetadataType # noqa
import json
from pathlib import Path
HERE = Path(__file__).parent.resolve()
with (HERE / "labextension" / "package.json").open() as fid:
data = json.load(fid)
# Jupyter Extension points
| 33.025
| 65
| 0.711582
|
8d5933b202fa0260d94c68bc7edbd14a32abb844
| 2,930
|
py
|
Python
|
visualize.py
|
jcamstan3370/MachineLearningPerovskites
|
d7bc433bac349bf53473dc6d636954cae996b8d2
|
[
"MIT"
] | 6
|
2020-05-09T17:18:00.000Z
|
2021-09-22T09:37:40.000Z
|
visualize.py
|
jstanai/ml_perovskites
|
d7bc433bac349bf53473dc6d636954cae996b8d2
|
[
"MIT"
] | null | null | null |
visualize.py
|
jstanai/ml_perovskites
|
d7bc433bac349bf53473dc6d636954cae996b8d2
|
[
"MIT"
] | 1
|
2021-03-24T04:21:31.000Z
|
2021-03-24T04:21:31.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Jared
"""
import numpy as np
import pandas as pd
import myConfig
import matplotlib.pyplot as plt
from ast import literal_eval
from plotter import getTrendPlot1
from matplotlib.pyplot import figure
df = pd.read_csv(myConfig.extOutput)
dffExt = pd.read_csv(myConfig.featurePathExt)
dffExt = dffExt.copy().dropna(axis=0, how='any').reset_index()
y_predict_ext = df['yhat_ext']
print('Num dummy crystals: {}'.format(len(y_predict_ext)))
print([n for n in dffExt.columns if 'p_' not in n])
s = 'fracCl'
dffExt['yhat_ext'] = df['yhat_ext']
ylabel = '$E_{g}$ (eV)'
getTrendPlot1(dffExt, y_predict_ext, s,
ylabel = ylabel,
xlabel = s,
title = 'Trend')
plt.show()
'''
s = 'volume'
g = dffExt.groupby('fracCl')
for i, group in g:
getTrendPlot1(group, y_predict_ext, s,
ylabel = ylabel,
xlabel = s,
title = 'Trend',
scatter = False)
plt.show()
'''
s = 'fracCs'
g = dffExt.groupby('fracSn')
for i, group in g:
getTrendPlot1(group, y_predict_ext, s,
ylabel = ylabel,
xlabel = s,
title = 'Trend',
scatter = False)
plt.show()
'''
print(dffExt[['fracCs', 'fracRb', 'fracK', 'fracNa',
'fracSn' , 'fracGe',
'fracCl', 'fracI', 'fracBr', 'yhat_ext']].head(10))
'''
g = dffExt.groupby([
'fracCs', 'fracRb', 'fracK', 'fracNa',
'fracSn' , 'fracGe',
'fracCl', 'fracI', 'fracBr'])
x = []
y = []
x_all = []
y_all = []
for (gr, gi) in g:
labels = ['Cs', 'Rb', 'K', 'Na', 'Sn', 'Ge',
'Cl', 'I', 'Br']
#print(gr)
sarr = []
for i, n in enumerate(gr):
if i < 6:
m = 1
else:
m = 3
if n != 0:
#if n == 1.0:
sarr.append(labels[i] + '$_{' + str(int(4*m*n)) + '}$')
#else:
#sarr.append(labels[i] + '$_{' + str(4*m*n) + '}$')
#print(sarr, gr)
x += [''.join(sarr)]
y.append(gi['yhat_ext'].mean())
x_all += [''.join(sarr)]*len(gi)
y_all += gi['yhat_ext'].tolist()
print(len(x_all), len(x))
fig = plt.figure(figsize=(13, 4), dpi=200)
#(Atomic 3%, Lattice 10%)
#plt.title('Stability Trends')
plt.title('Direct Bandgap Trends')
#plt.ylabel('$\Delta E_{hull}$ (meV/atom)')
plt.ylabel('$E_{g}$ (eV)')
plt.xticks(rotation=90)
plt.scatter(x, y)
#figure(num=None, figsize=(8, 6), dpi=200, facecolor='w', edgecolor='k')
plt.savefig('/Users/Jared/Documents/test.png', bbox_inches='tight')
plt.show()
'''
plt.title('Bandgap Trends (Atomic 5%, Lattice 5%)')
plt.ylabel('E$_{g}$ (eV)')
plt.xticks(rotation=90)
plt.scatter(x_all, y_all)
figure(num=None, figsize=(8, 6), dpi=200, facecolor='w', edgecolor='k')
'''
| 23.821138
| 72
| 0.531058
|
8d5938563047da10af2e319b379482b6a7545552
| 237
|
py
|
Python
|
11-if-elif-else-condition.py
|
GunarakulanGunaretnam/python-basic-fundamentals
|
c62bf939fbaef8895d28f85af9ef6ced70801f96
|
[
"Apache-2.0"
] | null | null | null |
11-if-elif-else-condition.py
|
GunarakulanGunaretnam/python-basic-fundamentals
|
c62bf939fbaef8895d28f85af9ef6ced70801f96
|
[
"Apache-2.0"
] | null | null | null |
11-if-elif-else-condition.py
|
GunarakulanGunaretnam/python-basic-fundamentals
|
c62bf939fbaef8895d28f85af9ef6ced70801f96
|
[
"Apache-2.0"
] | null | null | null |
name = input("Enter your name? ")
if name == "guna":
print("1234567890")
elif name == "david":
print("0987654321")
elif name == "rakulan":
print("12345")
elif name == "raj":
print("1234455667")
else:
print("No contacts found")
| 13.941176
| 33
| 0.632911
|
8d595677f62dbebf986ab917f4b41f5f89af2fea
| 13,409
|
py
|
Python
|
InstagramCrawler.py
|
Bagas8015/Instagram-Posts-Crawler-Users-v1
|
82d5da12f7f6caf8c085085135134f58affb1ec7
|
[
"MIT"
] | null | null | null |
InstagramCrawler.py
|
Bagas8015/Instagram-Posts-Crawler-Users-v1
|
82d5da12f7f6caf8c085085135134f58affb1ec7
|
[
"MIT"
] | null | null | null |
InstagramCrawler.py
|
Bagas8015/Instagram-Posts-Crawler-Users-v1
|
82d5da12f7f6caf8c085085135134f58affb1ec7
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import emoji
import string
import csv
import os
browser = webdriver.Chrome()
user = input('Masukkan username akun anda: ')
passwo = input('Masukkan password akun anda: ')
url = 'https://www.instagram.com'
username = user
password = passwo
mulaiProgram(url, username, password)
browser.quit()
| 50.220974
| 205
| 0.548736
|
8d596a354fbcf53937f22d7c7dc7a505553f0379
| 5,310
|
py
|
Python
|
pages/process.py
|
nchibana/dash-app-template
|
a51ad0ac92e719b2ef60739b6c1126aebb920d47
|
[
"MIT"
] | null | null | null |
pages/process.py
|
nchibana/dash-app-template
|
a51ad0ac92e719b2ef60739b6c1126aebb920d47
|
[
"MIT"
] | 4
|
2020-03-24T17:36:39.000Z
|
2021-08-23T20:13:16.000Z
|
pages/process.py
|
nchibana/dash-app-template
|
a51ad0ac92e719b2ef60739b6c1126aebb920d47
|
[
"MIT"
] | null | null | null |
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.graph_objects as go
import plotly.express as px
import plotly.figure_factory as ff
from sklearn.metrics import roc_curve
import pandas as pd
from joblib import load
from app import app
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Process
********
To build this model, two datasets with similar labels were combined to form a dataset with 102,840 observations.
I would like to thank the research team behind [this study](https://arxiv.org/pdf/1802.00393.pdf), as they promptly gave me access to their data, which was labeled through Crowdflower.
This model builds largely on their work, as well as that of [this previous study](https://aaai.org/ocs/index.php/ICWSM/ICWSM17/paper/view/15665).
After gaining access to both datasets, I proceeded to retrieve the corresponding tweet text for all IDs in the second set (as it was not provided) via Twitter's API.
This was [the code](https://stackoverflow.com/questions/44581647/retrieving-a-list-of-tweets-using-tweet-id-in-tweepy) I used to retrieve the text, without exceeding the rate limit.
"""
),
html.Iframe(src='data:text/html;charset=utf-8,%3Cbody%3E%3Cscript%20src%3D%22https%3A%2F%2Fgist.github.com%2Fnchibana%2F20d6d9f8ae62a6cc36b773d37dd7dc70.js%22%3E%3C%2Fscript%3E%3C%2Fbody%3E', style=dict(border=0, padding=40), height=780, width=1000),
dcc.Markdown(
"""
After that, I proceeded to combine the datasets and eliminate all duplicate tweets. I also defined a baseline accuracy score of 56%, which is the percent accuracy the model would achieve
if it predicted the majority class for all tweets.
Using some of the processes followed by the studies mentioned above, I also continued to preprocess the data by eliminating excess spaces, removing punctuation and retrieving the stem words of terms
used in tweets.
Next, I used Scikit-learn's [TfidVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) to convert tweet text into a matrix of
TF-IDF features, which is a statistic that calculates how important a word is to a document or collection of words.
"""
),
html.Iframe(src='data:text/html;charset=utf-8,%3Cbody%3E%3Cscript%20src%3D%22https://gist.github.com/nchibana/c15cbc4a1d97af02fa62fff5868bc36e.js%22%3E%3C%2Fscript%3E%3C%2Fbody%3E', style=dict(border=0, padding=40), height=460, width=1000),
dcc.Markdown(
"""
To increase the accuracy of the model, additional features were engineered, such as the number of syllables per word, the total number of characters, the number of words, the number of unique
terms, as well as readability and sentiment scores for each tweet.
Additionally, the number of mentions, hashtags and links in each tweet were also counted. For this study, images or any other type of media content were not analyzed.
"""
),
html.Iframe(src='data:text/html;charset=utf-8,%3Cbody%3E%3Cscript%20src%3D%22https%3A%2F%2Fgist.github.com%2Fnchibana%2F5cebfbfa700974edcd9f5fa6e43cc513.js%22%3E%3C%2Fscript%3E%3C%2Fbody%3E', style=dict(border=0, padding=40), height=600, width=1000),
dcc.Markdown(
"""
After testing several models such as Linear SVC, I finally settled on a logistic regression model which I trained on the data and used for the final model and app.
I also used grid search to find the optimal parameters for this logistic regression model.
Finally, I computed all accuracy scores and proceeded to plot visualizations to help me get a deeper understanding of the model, such as a confusion matrix to visualize misclassified tweets.
"""
),
html.Iframe(src='data:text/html;charset=utf-8,%3Cbody%3E%3Cscript%20src%3D%22https%3A%2F%2Fgist.github.com%2Fnchibana%2F0cc0c44c9b5a991adbc2690c97023d0c.js%22%3E%3C%2Fscript%3E%3C%2Fbody%3E', style=dict(border=0, padding=40), height=300, width=1000),
dcc.Markdown(
"""
## Sources
********
1. Automated Hate Speech Detection and the Problem of Offensive Language
Davidson, Thomas and Warmsley, Dana and Macy, Michael and Weber, Ingmar
Proceedings of the 11th International AAAI Conference on Web and Social Media p. 512-515. 2017
2. Large Scale Crowdsourcing and Characterization of Twitter Abusive Behavior
Founta, Antigoni-Maria and Djouvas, Constantinos and Chatzakou, Despoina and Leontiadis, Ilias and Blackburn, Jeremy and Stringhini, Gianluca and Vakali, Athena and Sirivianos, Michael and Kourtellis, Nicolas
11th International Conference on Web and Social Media, ICWSM 2018 2018
"""
),
],
md=12,
)
layout = dbc.Row([column1])
| 50.09434
| 259
| 0.688512
|
8d597279dcdef01055e59ebc350f3cf1d766f1a3
| 599
|
py
|
Python
|
tests/sdk/test_service.py
|
kusanagi/katana-sdk-python3
|
cd089409ec0d822f4d7bd6b4bebd527e003089ee
|
[
"MIT"
] | 2
|
2017-03-21T20:02:47.000Z
|
2017-05-02T19:32:01.000Z
|
tests/sdk/test_service.py
|
kusanagi/katana-sdk-python3
|
cd089409ec0d822f4d7bd6b4bebd527e003089ee
|
[
"MIT"
] | 19
|
2017-03-10T12:09:34.000Z
|
2018-06-01T18:10:06.000Z
|
tests/sdk/test_service.py
|
kusanagi/katana-sdk-python3
|
cd089409ec0d822f4d7bd6b4bebd527e003089ee
|
[
"MIT"
] | 5
|
2017-03-10T11:40:50.000Z
|
2019-03-26T06:28:33.000Z
|
from katana.sdk.service import get_component
from katana.sdk.service import Service
| 27.227273
| 61
| 0.736227
|
8d5b40af3f077c2c14c5035c4efe391b9a38cc70
| 527
|
py
|
Python
|
DesignPatterns/MVC/server/controllers/index.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | 1
|
2020-08-13T19:09:27.000Z
|
2020-08-13T19:09:27.000Z
|
DesignPatterns/MVC/server/controllers/index.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | null | null | null |
DesignPatterns/MVC/server/controllers/index.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | null | null | null |
from .base_controller import BaseController
from ..helper.utils import render_template
from ..helper.constants import STATUS_OK
| 27.736842
| 47
| 0.654649
|
8d5bd4af92a66ece14d4931534ffa3416cb4b661
| 3,919
|
py
|
Python
|
plugins/tff_backend/bizz/payment.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | null | null | null |
plugins/tff_backend/bizz/payment.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 178
|
2017-08-02T12:58:06.000Z
|
2017-12-20T15:01:12.000Z
|
plugins/tff_backend/bizz/payment.py
|
threefoldfoundation/app_backend
|
b3cea2a3ff9e10efcc90d3d6e5e8e46b9e84312a
|
[
"Apache-2.0"
] | 2
|
2018-01-10T10:43:12.000Z
|
2018-03-18T10:42:23.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017 GIG Technology NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.3@@
import time
from google.appengine.api import users
from google.appengine.ext import ndb
from framework.utils import now
from mcfw.rpc import returns, arguments
from plugins.rogerthat_api.exceptions import BusinessException
from plugins.tff_backend.models.payment import ThreeFoldTransaction, ThreeFoldPendingTransaction
from plugins.tff_backend.to.payment import WalletBalanceTO
| 42.597826
| 120
| 0.720082
|
8d5f94f57caf92571a35ef22a1aa7566e2df0d65
| 1,582
|
py
|
Python
|
tasks/tests/ui/conftest.py
|
MisterLenivec/django_simple_todo_app
|
8e694a67df43de7feaae785c0b3205534c701923
|
[
"MIT"
] | null | null | null |
tasks/tests/ui/conftest.py
|
MisterLenivec/django_simple_todo_app
|
8e694a67df43de7feaae785c0b3205534c701923
|
[
"MIT"
] | 4
|
2020-06-07T01:25:14.000Z
|
2021-06-10T18:34:10.000Z
|
tasks/tests/ui/conftest.py
|
MisterLenivec/django_simple_todo_app
|
8e694a67df43de7feaae785c0b3205534c701923
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import pytest
import os
| 27.275862
| 77
| 0.653603
|
8d606a3efd5feb490b057183d05dc39513b2525a
| 3,519
|
py
|
Python
|
erp/migrations/0026_auto_20200205_0950.py
|
Foohx/acceslibre
|
55135e096f2ec4e413ff991f01c17f5e0d5925c0
|
[
"MIT"
] | 8
|
2020-07-23T08:17:28.000Z
|
2022-03-09T22:31:36.000Z
|
erp/migrations/0026_auto_20200205_0950.py
|
Foohx/acceslibre
|
55135e096f2ec4e413ff991f01c17f5e0d5925c0
|
[
"MIT"
] | 37
|
2020-07-01T08:47:33.000Z
|
2022-02-03T19:50:58.000Z
|
erp/migrations/0026_auto_20200205_0950.py
|
Foohx/acceslibre
|
55135e096f2ec4e413ff991f01c17f5e0d5925c0
|
[
"MIT"
] | 4
|
2021-04-08T10:57:18.000Z
|
2022-01-31T13:16:31.000Z
|
# Generated by Django 3.0.3 on 2020-02-05 08:50
from django.db import migrations, models
| 59.644068
| 627
| 0.651321
|
8d60c377538ddae6447654f6c37f24bae517225c
| 3,629
|
py
|
Python
|
convert.py
|
Ellen7ions/bin2mem
|
51e3216cbf5e78547751968ef1619a925f2f55ef
|
[
"MIT"
] | 3
|
2021-05-18T13:07:39.000Z
|
2021-05-24T12:46:43.000Z
|
convert.py
|
Ellen7ions/bin2mem
|
51e3216cbf5e78547751968ef1619a925f2f55ef
|
[
"MIT"
] | null | null | null |
convert.py
|
Ellen7ions/bin2mem
|
51e3216cbf5e78547751968ef1619a925f2f55ef
|
[
"MIT"
] | null | null | null |
import os, sys
import json
if __name__ == '__main__':
c = Convert()
c.apply()
# c.mips_gcc_c()
# c.mips_objcopy()
# c.mips_bin2mem()
# config = Config()
| 28.801587
| 92
| 0.590245
|
8d61a4b35ddf035024fe7d951c745cb83a2a9d4d
| 3,161
|
py
|
Python
|
stats.py
|
DisinfoResearch/TwitterCollector
|
183b6761cca54b5db5b98a2f9f86bd8bcc98a7cb
|
[
"MIT"
] | null | null | null |
stats.py
|
DisinfoResearch/TwitterCollector
|
183b6761cca54b5db5b98a2f9f86bd8bcc98a7cb
|
[
"MIT"
] | null | null | null |
stats.py
|
DisinfoResearch/TwitterCollector
|
183b6761cca54b5db5b98a2f9f86bd8bcc98a7cb
|
[
"MIT"
] | null | null | null |
#!/bin/python3
# Copyright (C) 2021, Michigan State University.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import csv
import json
import argparse
import sys
import datetime
from dateutil.parser import parse
parser = argparse.ArgumentParser(description='Convert JSON to CSV', epilog='P.S. Trust The Plan')
parser.add_argument('--format', help='either JSON or CSV', required=True)
parser.add_argument('input', help='JSON File, or stdin if not specified', type=argparse.FileType('r', encoding='utf-8'), default=sys.stdin)
parser.add_argument('output', help='output to File, or stdout if not specified', type=argparse.FileType('w', encoding='utf-8'), default=sys.stdout)
args = parser.parse_args()
today = datetime.date.today()
if args.format.upper() == 'CSV':
process_csv(args.input, args.output)
elif args.format.upper() == 'JSON':
process_json(args.input, args.output)
else:
print(f"Error: '{args.format}' is an invalid format, must be CSV or JSON.", end="\n\n")
parser.print_help()
exit(-1)
| 45.157143
| 326
| 0.726669
|
8d61d1b5d6b0de975b9d576cfadcd886cc44204a
| 10,970
|
py
|
Python
|
Scratch/lstm.py
|
imadtoubal/MultimodalDeepfakeDetection
|
46539e16c988ee9fdfb714893788bbbf72836595
|
[
"MIT"
] | 2
|
2022-03-12T09:18:13.000Z
|
2022-03-23T08:29:10.000Z
|
Scratch/lstm.py
|
imadtoubal/MultimodalDeepfakeDetection
|
46539e16c988ee9fdfb714893788bbbf72836595
|
[
"MIT"
] | null | null | null |
Scratch/lstm.py
|
imadtoubal/MultimodalDeepfakeDetection
|
46539e16c988ee9fdfb714893788bbbf72836595
|
[
"MIT"
] | null | null | null |
import torch
from torch import nn
import torch.nn.functional as F
import torch.optim as optim
from preprocess import *
from torch.utils.data import Dataset, DataLoader
from blazeface import BlazeFace
import os
import cv2
import numpy as np
from matplotlib import pyplot as plt
import random
import pickle
DATA_FOLDER = '../input/deepfake-detection-challenge'
TRAIN_SAMPLE_FOLDER = 'train_sample_videos'
TEST_FOLDER = 'test_videos'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
NET = BlazeFace().to(device)
NET.load_weights("../input/blazeface.pth")
NET.load_anchors("../input/anchors.npy")
sequence = 24 # 1 sec of video
feature_size = 167 # length of spatial frequency
def main():
# prepare_data()
'''
stack = read_video(os.path.join(DATA_FOLDER, TRAIN_SAMPLE_FOLDER, 'aagfhgtpmv.mp4'))
print(stack.shape)
stack = stack.mean(axis=-1) / 255
spects = get_spects(stack)
# print(spects.shape)
print(spects[0])
plt.plot(spects[0])
plt.xlabel('Spatial Frequency')
plt.ylabel('Power Spectrum')
plt.show()
'''
training_data = read_data()
train(training_data)
if __name__ == '__main__':
main()
| 34.388715
| 116
| 0.591978
|
8d63217e5fdc8f7f711034a43dd2b7d398591281
| 18,373
|
py
|
Python
|
analysis/plot/python/plot_groups/estimator.py
|
leozz37/makani
|
c94d5c2b600b98002f932e80a313a06b9285cc1b
|
[
"Apache-2.0"
] | 1,178
|
2020-09-10T17:15:42.000Z
|
2022-03-31T14:59:35.000Z
|
analysis/plot/python/plot_groups/estimator.py
|
leozz37/makani
|
c94d5c2b600b98002f932e80a313a06b9285cc1b
|
[
"Apache-2.0"
] | 1
|
2020-05-22T05:22:35.000Z
|
2020-05-22T05:22:35.000Z
|
analysis/plot/python/plot_groups/estimator.py
|
leozz37/makani
|
c94d5c2b600b98002f932e80a313a06b9285cc1b
|
[
"Apache-2.0"
] | 107
|
2020-09-10T17:29:30.000Z
|
2022-03-18T09:00:14.000Z
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Plots relating to the estimator."""
from makani.analysis.plot.python import mplot
from makani.avionics.common import plc_messages
from makani.control import control_types
from makani.lib.python import c_helpers
from makani.lib.python.h5_utils import numpy_utils
from matplotlib.pyplot import plot
from matplotlib.pyplot import yticks
import numpy as np
from scipy import interpolate
MFig = mplot.PlotGroup.MFig # pylint: disable=invalid-name
_WING_GPS_RECEIVER_HELPER = c_helpers.EnumHelper(
'WingGpsReceiver', control_types)
_GROUND_STATION_MODE_HELPER = c_helpers.EnumHelper(
'GroundStationMode', plc_messages)
# TODO: Create separate 'simulator' plot group.
| 46.047619
| 80
| 0.56828
|
8d633804dd70bc9958af00b42a11e0de38e402fd
| 4,122
|
py
|
Python
|
scripts/old/modbus_server.py
|
SamKaiYang/ros_modbus_nex
|
b698cc73df65853866112f7501432a8509a2545c
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/old/modbus_server.py
|
SamKaiYang/ros_modbus_nex
|
b698cc73df65853866112f7501432a8509a2545c
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/old/modbus_server.py
|
SamKaiYang/ros_modbus_nex
|
b698cc73df65853866112f7501432a8509a2545c
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
###########################################################################
# This software is graciously provided by HumaRobotics
# under the Simplified BSD License on
# github: git@www.humarobotics.com:baxter_tasker
# HumaRobotics is a trademark of Generation Robots.
# www.humarobotics.com
# Copyright (c) 2013, Generation Robots.
# All rights reserved.
# www.generationrobots.com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
import rospy
from modbus.modbus_wrapper_server import ModbusWrapperServer
from std_msgs.msg import Int32MultiArray as HoldingRegister
if __name__=="__main__":
rospy.init_node("modbus_server")
port = 1234 # custom modbus port without requirement of sudo rights
# port = 502 # default modbus port
if rospy.has_param("~port"):
port = rospy.get_param("~port")
else:
rospy.loginfo("For not using the default port %d, add an arg e.g.: '_port:=1234'",port)
# Init modbus server with specific port
mws = ModbusWrapperServer(port)
# Stop the server if ros is shutdown. This should show that the server is stoppable
rospy.on_shutdown(mws.stopServer)
# Starts the server in a non blocking call
mws.startServer()
print "Server started"
###############
# Example 1
# write to the Discrete Input
mws.setDigitalInput(0,1) # args: address , value. sets address to value
# Example 2
# read from clients coil output
print "waiting for line 0 to be set to True"
result = mws.waitForCoilOutput(0,5) # args: address,timeout in sec. timeout of 0 is infinite. waits until address is true
if result:
print "got line 0 is True from baxter"
else:
print "timeout waiting for signal on line 0"
###############
# Example 3
# Listen for the writeable modbus registers in any node
sub = rospy.Subscriber("modbus_server/read_from_registers",HoldingRegister,callback,queue_size=500)
###############
###############
# Example 4
# Publisher to write first 20 modbus registers from any node
pub = rospy.Publisher("modbus_server/write_to_registers",HoldingRegister,queue_size=500)
rospy.sleep(1)
msg = HoldingRegister()
msg.data = range(20)
msg2 = HoldingRegister()
msg2.data = range(20,0,-1)
while not rospy.is_shutdown():
pub.publish(msg)
rospy.sleep(1)
pub.publish(msg2)
rospy.sleep(1)
################
rospy.spin()
mws.stopServer()
| 40.411765
| 125
| 0.694081
|
8d638991d71730377e930b6afff8fce13cde7b4a
| 4,453
|
py
|
Python
|
siptrackdlib/objectregistry.py
|
sii/siptrackd
|
f124f750c5c826156c31ae8699e90ff95a964a02
|
[
"Apache-2.0"
] | null | null | null |
siptrackdlib/objectregistry.py
|
sii/siptrackd
|
f124f750c5c826156c31ae8699e90ff95a964a02
|
[
"Apache-2.0"
] | 14
|
2016-03-18T13:28:16.000Z
|
2019-06-02T21:11:29.000Z
|
siptrackdlib/objectregistry.py
|
sii/siptrackd
|
f124f750c5c826156c31ae8699e90ff95a964a02
|
[
"Apache-2.0"
] | 7
|
2016-03-18T13:04:54.000Z
|
2021-06-22T10:39:04.000Z
|
from siptrackdlib import errors
from siptrackdlib import log
object_registry = ObjectRegistry()
| 37.108333
| 88
| 0.650348
|
8d66576529e5704ad9e6b2d90cc87687907b8c91
| 1,139
|
py
|
Python
|
src/kol/request/CombatRequest.py
|
ZJ/pykol
|
c0523a4a4d09bcdf16f8c86c78da96914e961076
|
[
"BSD-3-Clause"
] | 1
|
2016-05-08T13:26:56.000Z
|
2016-05-08T13:26:56.000Z
|
src/kol/request/CombatRequest.py
|
ZJ/pykol
|
c0523a4a4d09bcdf16f8c86c78da96914e961076
|
[
"BSD-3-Clause"
] | null | null | null |
src/kol/request/CombatRequest.py
|
ZJ/pykol
|
c0523a4a4d09bcdf16f8c86c78da96914e961076
|
[
"BSD-3-Clause"
] | null | null | null |
from GenericAdventuringRequest import GenericAdventuringRequest
| 32.542857
| 92
| 0.72432
|
8d683b8c02d8d22cc3724afc4a6f8b486b4fd023
| 325
|
py
|
Python
|
OLD.dir/myclient1.py
|
romchegue/Python
|
444476088e64d5da66cb00174f3d1d30ebbe38f6
|
[
"bzip2-1.0.6"
] | null | null | null |
OLD.dir/myclient1.py
|
romchegue/Python
|
444476088e64d5da66cb00174f3d1d30ebbe38f6
|
[
"bzip2-1.0.6"
] | null | null | null |
OLD.dir/myclient1.py
|
romchegue/Python
|
444476088e64d5da66cb00174f3d1d30ebbe38f6
|
[
"bzip2-1.0.6"
] | null | null | null |
'''
myclient1.py - imports mymod.py and check its operation.
'''
from mymod import test, countChars, countChars1, countLines, countLines1
text = 'test.txt'
file = open(text)
print(test(text), test(file))
print(countChars(text), countChars1(file))
print(countLines(text), countLines1(file))
print('\nedited again version')
| 23.214286
| 72
| 0.744615
|
8d6a85cb3cf62644daa8bec049af6d5de6f147e2
| 632
|
py
|
Python
|
src/modules/dates/searchDates.py
|
leonardoleyva/api-agenda-uas
|
697740a0a3feebb2ada01133db020fcf5127e1de
|
[
"MIT"
] | 1
|
2022-03-13T02:28:29.000Z
|
2022-03-13T02:28:29.000Z
|
src/modules/dates/searchDates.py
|
leonardoleyva/api-agenda-uas
|
697740a0a3feebb2ada01133db020fcf5127e1de
|
[
"MIT"
] | null | null | null |
src/modules/dates/searchDates.py
|
leonardoleyva/api-agenda-uas
|
697740a0a3feebb2ada01133db020fcf5127e1de
|
[
"MIT"
] | null | null | null |
from .date import Date
from ..response import handleResponse
from datetime import datetime
| 27.478261
| 126
| 0.643987
|
8d6c38dd172b4fa935c4b931081b7a40d9bc40a8
| 6,045
|
py
|
Python
|
Spark/spark_media_localidad.py
|
Dielam/Dielam.github.io
|
19f01d693ef2c590f3ac35a3a143ae3dedf8594e
|
[
"MIT"
] | null | null | null |
Spark/spark_media_localidad.py
|
Dielam/Dielam.github.io
|
19f01d693ef2c590f3ac35a3a143ae3dedf8594e
|
[
"MIT"
] | null | null | null |
Spark/spark_media_localidad.py
|
Dielam/Dielam.github.io
|
19f01d693ef2c590f3ac35a3a143ae3dedf8594e
|
[
"MIT"
] | 1
|
2020-12-23T16:45:20.000Z
|
2020-12-23T16:45:20.000Z
|
#!/usr/bin/python
import sys
from pyspark import SparkContext
from shutil import rmtree
import os.path as path
if len(sys.argv) > 1:
if path.exists("output"):
rmtree("output")
sc = SparkContext()
localidad = sys.argv[1]
localidadRDD = sc.textFile("Gasolineras.csv")
localidadRDD = localidadRDD.map(lambda line: line.encode("ascii", "ignore"))
localidadRDD = localidadRDD.map(lambda rows: rows.split(","))
localidadRDD = localidadRDD.filter(lambda rows: localidad == rows[5])
localidadRDD = localidadRDD.map(lambda rows: (rows[5], rows[7], rows[8], rows[9],rows[10], rows[11], rows[12], rows[13], rows[14], rows[15], rows[16], rows[17], rows[18], rows[19], rows[20], rows[21], rows[22], rows[23], rows[24]))
datosRDD = localidadRDD.map(generar)
if datosRDD.isEmpty():
result = sc.parallelize("0")
result.saveAsTextFile("output")
else:
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[5])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gasolina_95.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[6])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gasoleo_a.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[7])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gasoleo_b.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[8])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_bioetanol.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[9])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_nuevo_gasoleo_a.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[10])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_biodiesel.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[11])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_ester_metilico.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[12])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_bioalcohol.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[13])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gasolina_98.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[14])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gas_natural_comprimido.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[15])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gas_natural_licuado.txt")
precioRDD = datosRDD.map(lambda rows: ([rows[0], float(rows[16])]))
precioRDD = precioRDD.reduceByKey(lambda x,y: x+y)
tamRDD = datosRDD.count()
mediaTotal = precioRDD.map(lambda rows: ([rows[1], int(tamRDD)]))
mediaTotal = mediaTotal.map(lambda calc:(calc[0]/calc[1]))
mediaTotal.saveAsTextFile("output/media_localidad_gas_licuados_del_petr.txt")
else:
print "Error no ha introducido localidad."
| 45.451128
| 235
| 0.643176
|
8d6cc5852312640c236532b7026c1ac08efbc30f
| 13,148
|
py
|
Python
|
core/views/misc.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 3
|
2018-02-27T13:48:28.000Z
|
2018-03-03T21:57:50.000Z
|
core/views/misc.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 6
|
2020-02-12T00:07:46.000Z
|
2022-03-11T23:25:59.000Z
|
core/views/misc.py
|
ditttu/gymkhana-Nominations
|
2a0e993c1b8362c456a9369b0b549d1c809a21df
|
[
"MIT"
] | 1
|
2019-03-26T20:19:57.000Z
|
2019-03-26T20:19:57.000Z
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse, reverse_lazy
from django.http import HttpResponseRedirect
from django.shortcuts import render,HttpResponse
from django.views.generic.edit import CreateView, UpdateView, DeleteView
import csv, json
from datetime import date,datetime
from itertools import chain
from operator import attrgetter
from forms.models import Questionnaire
from forms.views import replicate
from core.models import *
from core.forms import *
from .nomi_cr import get_access_and_post_for_result, get_access_and_post
'''
mark_as_interviewed, reject_nomination, accept_nomination: Changes the interview status/ nomination_instance status
of the applicant
'''
'''
append_user, replace_user: Adds and Removes the current post-holders according to their selection status
'''
## ------------------------------------------------------------------------------------------------------------------ ##
############################################ PROFILE VIEWS ##################################################
## ------------------------------------------------------------------------------------------------------------------ ##
def UserProfileUpdate(request,pk):
profile = UserProfile.objects.get(pk = pk)
if profile.user == request.user:
form = ProfileForm(request.POST or None, instance=profile)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('profile'))
return render(request, 'nomi/userprofile_form.html', context={'form': form})
else:
return render(request, 'no_access.html')
class CommentUpdate(UpdateView):
model = Commment
fields = ['comments']
class CommentDelete(DeleteView):
model = Commment
def all_nominations(request):
all_nomi = Nomination.objects.all().exclude(status='Nomination created')
return render(request, 'all_nominations.html', context={'all_nomi': all_nomi})
| 34.783069
| 179
| 0.6601
|
8d6deeb2db5e44e12af11dde00260d1e8aae607e
| 29,706
|
py
|
Python
|
make_paper_plots.py
|
mjbasso/asymptotic_formulae_examples
|
a1ba177426bf82e2a58e7b54e1874b088a86595f
|
[
"MIT"
] | 1
|
2021-08-06T14:58:51.000Z
|
2021-08-06T14:58:51.000Z
|
make_paper_plots.py
|
mjbasso/asymptotic_formulae_examples
|
a1ba177426bf82e2a58e7b54e1874b088a86595f
|
[
"MIT"
] | null | null | null |
make_paper_plots.py
|
mjbasso/asymptotic_formulae_examples
|
a1ba177426bf82e2a58e7b54e1874b088a86595f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import logging
import os
import pickle
import time
from os.path import join as pjoin
import matplotlib.pyplot as plt
import numpy as np
import scipy
from matplotlib import rc
from scipy.optimize import least_squares
import asymptotic_formulae
from asymptotic_formulae import GaussZ0
from asymptotic_formulae import GaussZ0_MC
from asymptotic_formulae import nCRZ0
from asymptotic_formulae import nCRZ0_MC
from asymptotic_formulae import nSRZ0
from asymptotic_formulae import nSRZ0_MC
rc('font', **{'family': 'sans-serif','sans-serif': ['Helvetica']})
rc('text', usetex = True)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s : %(name)s : %(levelname)s : %(message)s'))
logger.addHandler(sh)
# For creating a set of uniformly-spaced points on a log scale
# As described in Section 2.1.4
def nCRZ0_DiagTau(s, b, tau):
''' Calculate the asymptotic significance for a 1 SR + N CRs, diagonal tau measurement
s := expected signal yield in SR (float)
b := expected background yields in SR (vector of floats, size N)
tau := transfer coefficients, tau[i] carries background i yield in SR to CR i (vector of floats, size N)
Returns Z0 (float) '''
# Argument checking
b, tau = np.array(b), np.array(tau)
s, b, tau = float(s), b.astype(float), tau.astype(float)
assert b.ndim == 1 # b should be a vector
assert tau.ndim == 1 # tau should be a vector
assert len(b) == len(tau)
assert (tau >= 0.).all() # Assert tau contains transfer factors (i.e., all positive)
n = s + np.sum(b)
# System of equations
# Perform our minimization
res = least_squares(func, x0 = b, bounds = [tuple(len(b) * [0.]), tuple(len(b) * [np.inf])])
if not res.success:
raise RuntimeError('Minimization failed: status = %s, message = \'%s\'' % (res.status, res.message))
bhh = np.array(res.x)
# Calculate our significance
Z0 = np.sqrt(-2. * np.log((np.sum(bhh) / n) ** n * np.prod([(bhh[k] / b[k]) ** (tau[k] * b[k]) for k in range(len(b))])))
return Z0
# As described in Section 2.4.2
def GaussZ0_Decorr(s, b, sigma):
''' Calculate the asymptotic significance for a 1 SR + N CRs, diagonal tau measurement
s := expected signal yield in SR (float)
b := expected background yields in SR (vector of floats, size N)
sigma := width of Gaussian constraint ("absolute uncertainty") for each background yield (vector of floats, size N)
Returns Z0 (float) '''
# Argument checking
b, sigma = np.array(b), np.array(sigma)
s, b, sigma = float(s), b.astype(float), sigma.astype(float)
assert b.ndim == 1 # b should be a vector
assert sigma.ndim == 1 # sigma should be a vector
assert len(b) == len(sigma)
assert (sigma >= 0.).all() # Assert sigma contains widths (i.e., all positive)
n = s + np.sum(b)
# System of equations
# Perform our minimization
res = least_squares(func, x0 = b, bounds = [tuple(len(b) * [0.]), tuple(len(b) * [np.inf])])
if not res.success:
raise RuntimeError('Minimization failed: status = %s, message = \'%s\'' % (res.status, res.message))
bhh = np.array(res.x)
# Calculate our significance
Z0 = np.sqrt(-2. * (n * np.log(np.sum(bhh) / n) + n - np.sum(bhh + 0.5 * ((b - bhh) / sigma) ** 2)))
return Z0
if __name__ == '__main__':
main()
| 46.85489
| 272
| 0.523564
|
8d6e5ae82deb7b5311529c66cb9a669824faeec2
| 2,645
|
py
|
Python
|
tests/test_compliance.py
|
simongarisch/pytrade
|
6245c0a47017a880299fa7704a49580f394fa87b
|
[
"MIT"
] | 2
|
2020-10-19T02:44:57.000Z
|
2021-11-08T10:45:25.000Z
|
tests/test_compliance.py
|
simongarisch/pytrade
|
6245c0a47017a880299fa7704a49580f394fa87b
|
[
"MIT"
] | 1
|
2020-12-24T02:59:58.000Z
|
2020-12-24T02:59:58.000Z
|
tests/test_compliance.py
|
simongarisch/pytrade
|
6245c0a47017a880299fa7704a49580f394fa87b
|
[
"MIT"
] | null | null | null |
import pytest
from pxtrade.assets import reset, Stock, Portfolio
from pxtrade.compliance import (
Compliance,
UnitLimit,
WeightLimit,
)
| 32.654321
| 73
| 0.625331
|
8d7113f4a3fa2caf2cf878a899bd18ce82a24a1b
| 103
|
py
|
Python
|
article/serializers/__init__.py
|
mentix02/medialist-backend
|
397b1a382b12bab273360dadb0b3c32de43747cd
|
[
"MIT"
] | 1
|
2019-11-22T19:29:39.000Z
|
2019-11-22T19:29:39.000Z
|
article/serializers/__init__.py
|
mentix02/medialist-backend
|
397b1a382b12bab273360dadb0b3c32de43747cd
|
[
"MIT"
] | 1
|
2019-11-25T09:50:07.000Z
|
2021-07-15T07:05:28.000Z
|
article/serializers/__init__.py
|
mentix02/medialist-backend
|
397b1a382b12bab273360dadb0b3c32de43747cd
|
[
"MIT"
] | null | null | null |
from article.serializers.serializers import (
ArticleListSerializer,
ArticleDetailSerializer
)
| 20.6
| 45
| 0.805825
|
8d73a34deeb4110e24d2f659a64dcdc60d79219a
| 1,447
|
py
|
Python
|
delong_functions/initialization.py
|
braddelong/22-jupyter-ps01
|
95e8714e1723fb8328380a5d14aafabe2ee0795a
|
[
"MIT"
] | null | null | null |
delong_functions/initialization.py
|
braddelong/22-jupyter-ps01
|
95e8714e1723fb8328380a5d14aafabe2ee0795a
|
[
"MIT"
] | null | null | null |
delong_functions/initialization.py
|
braddelong/22-jupyter-ps01
|
95e8714e1723fb8328380a5d14aafabe2ee0795a
|
[
"MIT"
] | null | null | null |
# set up the environment by reading in libraries:
# os... graphics... data manipulation... time... math... statistics...
import sys
import os
from urllib.request import urlretrieve
import matplotlib as mpl
import matplotlib.pyplot as plt
import PIL as pil
from IPython.display import Image
import pandas as pd
from pandas import DataFrame, Series
import pandas_datareader
from datetime import datetime
import scipy as sp
import numpy as np
import math
import random
import seaborn as sns
import statsmodels
import statsmodels.api as sm
import statsmodels.formula.api as smf
# graphics setup: seaborn-darkgrid and figure size...
plt.style.use('seaborn-darkgrid')
figure_size = plt.rcParams["figure.figsize"]
figure_size[0] = 7
figure_size[1] = 7
plt.rcParams["figure.figsize"] = figure_size
# import delong functions
from delong_functions.data_functions import getdata_read_or_download # get or download data file
from delong_functions.stat_functions import initialize_basic_figure # initialize graphics
from delong_functions.data_functions import data_FREDseries # construct a useful dict with source
# and notes info from a previously
# downloaded FRED csv file
# check to see if functions successfully created...
# NOW COMMENTED OUT: getdata_read_or_download? initialize_basic_figure?
| 32.155556
| 106
| 0.724948
|
8d74d9562cd8858adb9b65b43c92263f531590a9
| 608
|
py
|
Python
|
sdk/bento/graph/value.py
|
bentobox-dev/bento-box
|
3e10c62f586c1251529e059b6af515d4d03c60e9
|
[
"MIT"
] | 1
|
2021-01-02T02:50:15.000Z
|
2021-01-02T02:50:15.000Z
|
sdk/bento/graph/value.py
|
joeltio/bento-box
|
3e10c62f586c1251529e059b6af515d4d03c60e9
|
[
"MIT"
] | 48
|
2020-10-21T07:42:30.000Z
|
2021-02-15T19:34:55.000Z
|
sdk/bento/graph/value.py
|
joeltio/bento-box
|
3e10c62f586c1251529e059b6af515d4d03c60e9
|
[
"MIT"
] | null | null | null |
#
# Bentobox
# SDK - Graph
# Graph Value
#
from typing import Any
from bento.value import wrap
from bento.protos.graph_pb2 import Node
def wrap_const(val: Any):
"""Wrap the given native value as a Constant graph node.
If val is a Constant node, returns value as is.
Args:
val: Native value to wrap.
Returns:
The given value wrapped as a constant graph node.
"""
# check if already constant node, return as is if true.
if isinstance(val, Node) and val.WhichOneof("op") == "const_op":
return val
return Node(const_op=Node.Const(held_value=wrap(val)))
| 25.333333
| 68
| 0.677632
|
8d76f8f9957c274ab98fcb861cac123b90567879
| 771
|
py
|
Python
|
app/validators/user_validator.py
|
allanzi/truck-challenge
|
7734a011de899184b673e99fd1c2ff92a6af65b9
|
[
"CECILL-B"
] | null | null | null |
app/validators/user_validator.py
|
allanzi/truck-challenge
|
7734a011de899184b673e99fd1c2ff92a6af65b9
|
[
"CECILL-B"
] | null | null | null |
app/validators/user_validator.py
|
allanzi/truck-challenge
|
7734a011de899184b673e99fd1c2ff92a6af65b9
|
[
"CECILL-B"
] | null | null | null |
from marshmallow import Schema, fields
from marshmallow.validate import Length, Range
| 42.833333
| 75
| 0.743191
|
8d783ab1b46b55a24509d554110a68bdbb340935
| 11,660
|
py
|
Python
|
montecarlo/mcpy/monte_carlo.py
|
v-asatha/EconML
|
eb9ac829e93abbc8a163ab09d905b40370b21b1a
|
[
"MIT"
] | null | null | null |
montecarlo/mcpy/monte_carlo.py
|
v-asatha/EconML
|
eb9ac829e93abbc8a163ab09d905b40370b21b1a
|
[
"MIT"
] | null | null | null |
montecarlo/mcpy/monte_carlo.py
|
v-asatha/EconML
|
eb9ac829e93abbc8a163ab09d905b40370b21b1a
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import sys
import numpy as np
from joblib import Parallel, delayed
import joblib
import argparse
import importlib
from itertools import product
import collections
from copy import deepcopy
from mcpy.utils import filesafe
from mcpy import plotting
def check_valid_config(config):
"""
Performs a basic check of the config file, checking if the necessary
subsections are present.
If multiple config files are being made that use the same dgps and/or methods,
it may be helpful to tailor the config check to those dgps and methods. That way,
one can check that the correct parameters are being provided for those dgps and methods.
This is specific to one's implementation, however.
"""
assert 'type' in config, "config dict must specify config type"
assert 'dgps' in config, "config dict must contain dgps"
assert 'dgp_opts' in config, "config dict must contain dgp_opts"
assert 'method_opts' in config, "config dict must contain method_opts"
assert 'mc_opts' in config, "config dict must contain mc_opts"
assert 'metrics' in config, "config dict must contain metrics"
assert 'methods' in config, "config dict must contain methods"
assert 'plots' in config, "config dict must contain plots"
assert 'single_summary_metrics' in config, "config dict must specify which metrics are plotted in a y-x plot vs. as a single value per dgp and method"
assert 'target_dir' in config, "config must contain target_dir"
assert 'reload_results' in config, "config must contain reload_results"
assert 'n_experiments' in config['mc_opts'], "config[mc_opts] must contain n_experiments"
assert 'seed' in config['mc_opts'], "config[mc_opts] must contain seed"
| 48.786611
| 154
| 0.660806
|
8d7ad5ef06de97e8b617443c00cdb60123831b97
| 5,845
|
py
|
Python
|
MusicGame.py
|
kfparri/MusicGame
|
f2914cae7a68585ca1a569c78ac13f68c1adb827
|
[
"MIT"
] | null | null | null |
MusicGame.py
|
kfparri/MusicGame
|
f2914cae7a68585ca1a569c78ac13f68c1adb827
|
[
"MIT"
] | null | null | null |
MusicGame.py
|
kfparri/MusicGame
|
f2914cae7a68585ca1a569c78ac13f68c1adb827
|
[
"MIT"
] | null | null | null |
#------------------------------------------------------------------------------------------------------
# File Name: MusicGame.py
# Author: Kyle Parrish
# Date: 7/4/2014
# Description: This is a simple program that I wrote for the raspberry pi so that my daughter can
# play with. It is a simple program that plays a different sound with every keystroke. It also
# displays a simple shape pattern on the screen with each keypress. The pi can also be setup to
# allow users to change the sounds by uploading them to a web form on the pi itself. This code
# will be included when it is finished.
# Change log:
# 4.30.15 - Updated the header to test out Visual Studio Code git integration
# 9.18.15 - Started making some changes to the application. Natalie is starting to enjoy
# the application so I'm starting to make it do more:
# - Updated the code to put circles as well as squares on the screen.
#------------------------------------------------------------------------------------------------------
# Basic imports for the game
import os,sys,datetime, sqlite3
import pygame
# I don't believe that I need the time references anymore, to be removed with next commit
#from time import strftime, localtime
from random import randint
from pygame.locals import *
# Setup basic constants
test = 640
# Screen height and width
SCREEN_WIDTH = test
SCREEN_HEIGHT = 480
#CENTER_POINT = (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 2)
#LOWER_CENTER = (SCREEN_WIDTH / 2, SCREEN_HEIGHT / 4)
#CENTER_RECT_HEIGHT = 40
#CLOCK_TEXT_FONT = 48
# Colors, any of these can be used in the program
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
MATRIX_GREEN = (0, 255, 21)
# Code taken from: http://code.activestate.com/recipes/521884-play-sound-files-with-pygame-in-a-cross-platform-m/
# global constants
FREQ = 44100 # same as audio CD
BITSIZE = -16 # unsigned 16 bit
CHANNELS = 2 # 1 == mono, 2 == stereo
BUFFER = 1024 # audio buffer size in no. of samples
FRAMERATE = 30 # how often to check if playback has finished
sounds = ["Typewrit-Intermed-538_hifi.ogg",
"Typewrit-Bell-Patrick-8344_hifi.ogg",
"Arcade_S-wwwbeat-8528_hifi.ogg",
"Arcade_S-wwwbeat-8529_hifi.ogg",
"Arcade_S-wwwbeat-8530_hifi.ogg",
"Arcade_S-wwwbeat-8531_hifi.ogg",
"PowerUp-Mark_E_B-8070_hifi.ogg",
"PulseGun-Mark_E_B-7843_hifi.ogg",
"PulseSho-Mark_E_B-8071_hifi.ogg",
"SineySpa-Mark_E_B-7844_hifi.ogg",
"ToySpace-Mark_E_B-7846_hifi.ogg",
"ZipUp-Mark_E_B-8079_hifi.ogg"]
soundFiles = []
def playsound(soundfile):
"""Play sound through default mixer channel in blocking manner.
This will load the whole sound into memory before playback
"""
soundfile.play()
#sound = pygame.mixer.Sound(soundfile)
#clock = pygame.time.Clock()
#sound.play()
#while pygame.mixer.get_busy():
#clock.tick(FRAMERATE)
if __name__ == '__main__': main()
| 34.791667
| 134
| 0.609239
|
8d7d0cccfbda47460eb1aeba6470425e3ed12174
| 243
|
py
|
Python
|
tests/utils/img_processing_utils_test.py
|
venkatakolagotla/robin
|
4497bf8ffcd03182f68f9a6d7c806bfdaa4791cb
|
[
"MIT"
] | 4
|
2019-12-20T05:37:51.000Z
|
2020-03-18T16:32:59.000Z
|
tests/utils/img_processing_utils_test.py
|
venkatakolagotla/robin
|
4497bf8ffcd03182f68f9a6d7c806bfdaa4791cb
|
[
"MIT"
] | null | null | null |
tests/utils/img_processing_utils_test.py
|
venkatakolagotla/robin
|
4497bf8ffcd03182f68f9a6d7c806bfdaa4791cb
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from robin.utils import img_processing_utils
import numpy as np
| 24.3
| 61
| 0.814815
|
8d7e6b625734d32d6eb3ec106401a004caa7962c
| 5,763
|
py
|
Python
|
DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/4) Neural Network.py
|
ghost9023/DeepLearningPythonStudy
|
4d319c8729472cc5f490935854441a2d4b4e8818
|
[
"MIT"
] | 1
|
2019-06-27T04:05:59.000Z
|
2019-06-27T04:05:59.000Z
|
DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/4) Neural Network.py
|
ghost9023/DeepLearningPythonStudy
|
4d319c8729472cc5f490935854441a2d4b4e8818
|
[
"MIT"
] | null | null | null |
DeepLearning/DeepLearning/07_Deep_LeeYS/Week_1/4. Single-Layer NN/4) Neural Network.py
|
ghost9023/DeepLearningPythonStudy
|
4d319c8729472cc5f490935854441a2d4b4e8818
|
[
"MIT"
] | null | null | null |
#4)
##########KEYWORD###############
################################
# .
# ..
# (Feed Forward NN) .
# .
# () .
# Layer .
# .
# .
# 2 400% 10% .
# .
# 0 9 10 3 .
# 3 . .
#25 P35
#
#Layer Node Node Shape Weight Shape Bias Shape
# 2 2 2 X 3 Matrix 3 Vector (1) = ((*1) + 1)
#(1) 3 3 3 X 2 Matrix 2 Vector (2) = ((1) * 2 + 2)
#(2) 2 2 2 X 2 Matrix 2 Vector = ((2) * 3 + 3)
# 2 2
# 3 .
# 2 . 2 2.
#
#w12^(1), a2(1) . (1) 1 .
# 12 1 2 . w12^(!) 1 2 1
# .
# 3 2 1 1 .
# a1(1) ... .
#a1(1) = w11^(1)x1 + w12^(1)x2 + b1^(1) .
# 1 A^(1) = (a1^(1),a2^(1),a3^(1)), 1
#W^(1) ...
# numpy 1 .
# 1 2 (1 ), ,
# 2 .
# 1 1 2 2 2
# 2 .
# 30 2
# . .
# .
# .
# , .
# . f(x) = x .
#
# .
# .
# .
# 1 .
# 0 1
#1 .
# .
#
#y[0] = 0.018, y[1] = 0.245, y[2] = 0.737 1.8% 0 , 24.5% 1 , 73.7% 2
# 2 2 . .
# () . \
# exp() .
# .
# ( )
# . .
# . (Overflow) .
# 100 exp(100) 10 40 . .
# . [ 13] P40
# C . .
# x = a ^ log(a,x) C exp() .
# C exp() log(e,C) = ln C ln C C` .
#
import numpy as np
a = np.array([1010,1000,990])
np.exp(a) / np.sum(np.exp(a)) #
# softmax
c = np.max(a)
np.exp(a-c) / np.sum(np.exp(a-c)) #
# .
# .
# . .
# , 2 .
# 2 ( . 3 )
# .
# .
# 2 .
import numpy as np
#
#identify function
# . .
# . ,
# 3
network = init_network() #
x = np.array([1.0,0.5])
y = forward(network ,x)
print(y)
# .
# .
| 36.707006
| 120
| 0.640986
|
8d7eb5aaefc17250eb9787e23ab1f5200d2d65f8
| 466
|
py
|
Python
|
label_gen.py
|
avasid/gaze_detection
|
dbb76a2b3f3eedff5801b53bc95b3a95bc715bc5
|
[
"MIT"
] | 1
|
2020-02-07T21:34:10.000Z
|
2020-02-07T21:34:10.000Z
|
label_gen.py
|
avasid/gaze_detection
|
dbb76a2b3f3eedff5801b53bc95b3a95bc715bc5
|
[
"MIT"
] | 8
|
2020-11-13T18:37:12.000Z
|
2022-03-12T00:14:04.000Z
|
label_gen.py
|
avasid/gaze_detection
|
dbb76a2b3f3eedff5801b53bc95b3a95bc715bc5
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
dictt = {}
i = 0
for label in ['down', 'up', 'left', 'right']:
img_lst = os.listdir("./data/img_data/" + label + "/")
temp_label = [0] * 4
temp_label[i] = 1
for img in img_lst:
print(label + " " + img)
dictt[img] = temp_label
i += 1
label_df = pd.DataFrame(data=dictt, index=['down', 'up', 'left', 'right']).transpose()
label_df = label_df.sample(frac=1)
label_df.to_csv("./data/label_data.csv")
| 23.3
| 86
| 0.592275
|
8d7fb31d8d0c397a081d7685e96fa1bf8414f9a6
| 2,398
|
py
|
Python
|
rubik_race/rubiks_race/solver_test.py
|
ZengLawrence/rubiks_race
|
3d78484f0a68c7e483953cea68130f1edde2739a
|
[
"MIT"
] | null | null | null |
rubik_race/rubiks_race/solver_test.py
|
ZengLawrence/rubiks_race
|
3d78484f0a68c7e483953cea68130f1edde2739a
|
[
"MIT"
] | null | null | null |
rubik_race/rubiks_race/solver_test.py
|
ZengLawrence/rubiks_race
|
3d78484f0a68c7e483953cea68130f1edde2739a
|
[
"MIT"
] | null | null | null |
'''
Created on Jun 27, 2017
@author: lawrencezeng
'''
import unittest
from rubiks_race import solver
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.test_solve']
unittest.main()
| 31.142857
| 96
| 0.27648
|
8d80488b5bce65f6332a7212b2c16986023812ef
| 1,625
|
py
|
Python
|
wagtail_translation/migrations/0001_initial.py
|
patroqueeet/wagtail2-translation
|
6a7ad4eea5d900c8640f965ebf7a442dd7bc7e74
|
[
"MIT"
] | null | null | null |
wagtail_translation/migrations/0001_initial.py
|
patroqueeet/wagtail2-translation
|
6a7ad4eea5d900c8640f965ebf7a442dd7bc7e74
|
[
"MIT"
] | null | null | null |
wagtail_translation/migrations/0001_initial.py
|
patroqueeet/wagtail2-translation
|
6a7ad4eea5d900c8640f965ebf7a442dd7bc7e74
|
[
"MIT"
] | 1
|
2021-01-08T19:25:46.000Z
|
2021-01-08T19:25:46.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from modeltranslation import settings as mt_settings
from modeltranslation.utils import build_localized_fieldname, get_translation_fields
from django.db import migrations, models
| 37.790698
| 84
| 0.715692
|
8d8293dd05c195d7acdf3af64d74eb27c71ed3fc
| 99,195
|
py
|
Python
|
WORC/WORC.py
|
MStarmans91/WORC
|
b6b8fc2ccb7d443a69b5ca20b1d6efb65b3f0fc7
|
[
"ECL-2.0",
"Apache-2.0"
] | 47
|
2018-01-28T14:08:15.000Z
|
2022-03-24T16:10:07.000Z
|
WORC/WORC.py
|
JZK00/WORC
|
14e8099835eccb35d49b52b97c0be64ecca3809c
|
[
"ECL-2.0",
"Apache-2.0"
] | 13
|
2018-08-28T13:32:57.000Z
|
2020-10-26T16:35:59.000Z
|
WORC/WORC.py
|
JZK00/WORC
|
14e8099835eccb35d49b52b97c0be64ecca3809c
|
[
"ECL-2.0",
"Apache-2.0"
] | 16
|
2017-11-13T10:53:36.000Z
|
2022-03-18T17:02:04.000Z
|
#!/usr/bin/env python
# Copyright 2016-2021 Biomedical Imaging Group Rotterdam, Departments of
# Medical Informatics and Radiology, Erasmus MC, Rotterdam, The Netherlands
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
import fastr
import graphviz
import configparser
from pathlib import Path
from random import randint
import WORC.IOparser.file_io as io
from fastr.api import ResourceLimit
from WORC.tools.Slicer import Slicer
from WORC.tools.Elastix import Elastix
from WORC.tools.Evaluate import Evaluate
import WORC.addexceptions as WORCexceptions
import WORC.IOparser.config_WORC as config_io
from WORC.detectors.detectors import DebugDetector
from WORC.export.hyper_params_exporter import export_hyper_params_to_latex
from urllib.parse import urlparse
from urllib.request import url2pathname
| 53.822572
| 228
| 0.559897
|
8d832e77f438b0dd65c0dff2da0ca039538bc5cd
| 2,019
|
py
|
Python
|
utils/tweets_to_txt.py
|
magsol/datascibun
|
bb118eac59dc238c42f659871e25619d994f8575
|
[
"Apache-2.0"
] | null | null | null |
utils/tweets_to_txt.py
|
magsol/datascibun
|
bb118eac59dc238c42f659871e25619d994f8575
|
[
"Apache-2.0"
] | null | null | null |
utils/tweets_to_txt.py
|
magsol/datascibun
|
bb118eac59dc238c42f659871e25619d994f8575
|
[
"Apache-2.0"
] | 1
|
2022-03-01T01:45:47.000Z
|
2022-03-01T01:45:47.000Z
|
import argparse
import json
import re
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'JSON tweet converter',
epilog = 'lol tw33tz', add_help = 'How to use',
prog = 'python json_to_txt.py <options>')
# Required arguments.
parser.add_argument("-i", "--input", required = True,
help = "JSON file to convert.")
# Optional arguments.
parser.add_argument("-o", "--output", default = "output.txt",
help = "Output file containing tweet content, one per line. [DEFAULT: output.txt]")
# Parse out the arguments.
args = vars(parser.parse_args())
content = json.load(open(args['input'], "r"))
fp = open(args['output'], "w")
item = 0
for obj in content:
tweet = obj['tweet']['full_text']
# STEP 1: Strip out RT.
tweet = remove_rt(tweet)
# STEP 2: Remove URLs, mentions, hashtags, emojis.
tweet = remove_urls(tweet)
tweet = remove_mentions(tweet)
tweet = remove_hashtags(tweet)
tweet = remove_emojis(tweet)
# STEP 3: Other random fixes.
tweet = tweet.strip()
tweet = fix_amp(tweet)
if len(tweet) == 0 or len(tweet) == 1: continue
tweet = tweet.replace("\"\"", "")
if tweet[0] == ":":
tweet = tweet[1:]
tweet = tweet.replace("\n", " ")
tweet = tweet.strip()
# Write out!
fp.write(f"{tweet}\n")
item += 1
if item % 1000 == 0:
print(f"{item} of {len(content)} done.")
fp.close()
print(f"{item} tweets processed!")
| 27.657534
| 91
| 0.574542
|
8d85dffad6d22403418ce3ef5e06280cc317b3e4
| 528
|
py
|
Python
|
truechat/chat/migrations/0007_auto_20191026_2020.py
|
TrueChat/Backend
|
7d2bc73d5b7f157d7499a65af4157e1ddeb7a0ac
|
[
"MIT"
] | 1
|
2019-12-19T19:04:33.000Z
|
2019-12-19T19:04:33.000Z
|
truechat/chat/migrations/0007_auto_20191026_2020.py
|
TrueChat/Backend
|
7d2bc73d5b7f157d7499a65af4157e1ddeb7a0ac
|
[
"MIT"
] | 6
|
2020-06-05T23:42:41.000Z
|
2022-02-10T13:32:59.000Z
|
truechat/chat/migrations/0007_auto_20191026_2020.py
|
TrueChat/Backend
|
7d2bc73d5b7f157d7499a65af4157e1ddeb7a0ac
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2019-10-26 20:20
from django.db import migrations, models
| 22.956522
| 81
| 0.587121
|
8d86a97241dd9580e12d59014523e0d42f09b38e
| 354
|
py
|
Python
|
libs/baseclass/about_screen.py
|
wildscsi/ecopos
|
9922bb5160227777401eb33fa9a01cfba5730781
|
[
"MIT"
] | null | null | null |
libs/baseclass/about_screen.py
|
wildscsi/ecopos
|
9922bb5160227777401eb33fa9a01cfba5730781
|
[
"MIT"
] | 1
|
2021-11-04T20:43:03.000Z
|
2021-11-04T20:43:03.000Z
|
libs/baseclass/about_screen.py
|
wildscsi/ecopos
|
9922bb5160227777401eb33fa9a01cfba5730781
|
[
"MIT"
] | 1
|
2021-11-04T19:43:53.000Z
|
2021-11-04T19:43:53.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 CPV.BY
#
# For suggestions and questions:
# <7664330@gmail.com>
#
# LICENSE: Commercial
import webbrowser
from kivymd.theming import ThemableBehavior
from kivymd.uix.screen import MDScreen
| 19.666667
| 46
| 0.728814
|
8d86c9a6526d8d524710fa780972b087a3f46ac3
| 7,715
|
py
|
Python
|
causal_rl/environments/multi_typed.py
|
vluzko/causal_rl
|
92ee221bdf1932fa83955441baabb5e28b78ab9d
|
[
"MIT"
] | 2
|
2021-04-02T12:06:13.000Z
|
2022-02-09T06:57:26.000Z
|
causal_rl/environments/multi_typed.py
|
vluzko/causal_rl
|
92ee221bdf1932fa83955441baabb5e28b78ab9d
|
[
"MIT"
] | 11
|
2020-12-28T14:51:31.000Z
|
2021-03-29T19:53:24.000Z
|
causal_rl/environments/multi_typed.py
|
vluzko/causal_rl
|
92ee221bdf1932fa83955441baabb5e28b78ab9d
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from gym import Env
from scipy.spatial import distance
from typing import Optional, Tuple, Any
from causal_rl.environments import CausalEnv
| 36.738095
| 106
| 0.568762
|
8d86e19a0f7bf48d0eb61da351363ace81caa8fc
| 353
|
py
|
Python
|
greetings.py
|
ucsd-cse-spis-2016/spis16-lecture-0815
|
24e0a8ea9726f969eb357db33eb2925aabd25e43
|
[
"MIT"
] | null | null | null |
greetings.py
|
ucsd-cse-spis-2016/spis16-lecture-0815
|
24e0a8ea9726f969eb357db33eb2925aabd25e43
|
[
"MIT"
] | null | null | null |
greetings.py
|
ucsd-cse-spis-2016/spis16-lecture-0815
|
24e0a8ea9726f969eb357db33eb2925aabd25e43
|
[
"MIT"
] | null | null | null |
from flask import Flask
app = Flask(__name__)
if __name__ == "__main__":
app.run(port=5000)
| 16.045455
| 35
| 0.628895
|
8d88e96d4a71ca08ce8d66eee14e65dd7c02396c
| 3,189
|
py
|
Python
|
bin/makeReport.py
|
oxfordmmm/SARS-CoV2_workflows
|
a84cb0a7142684414b2f285dd27cc2ea287eecb9
|
[
"MIT"
] | null | null | null |
bin/makeReport.py
|
oxfordmmm/SARS-CoV2_workflows
|
a84cb0a7142684414b2f285dd27cc2ea287eecb9
|
[
"MIT"
] | null | null | null |
bin/makeReport.py
|
oxfordmmm/SARS-CoV2_workflows
|
a84cb0a7142684414b2f285dd27cc2ea287eecb9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pandas as pd
import sys
import json
from Bio import SeqIO
sample_name=sys.argv[1]
pango=pd.read_csv('pango.csv')
nextclade=pd.read_csv('nextclade.tsv', sep='\t')
aln2type=pd.read_csv('aln2type.csv')
pango['sampleName']=sample_name
nextclade['sampleName']=sample_name
aln2type['sampleName']=sample_name
df=pango.merge(nextclade, on='sampleName', how='left', suffixes=("_pango","_nextclade"))
df=df.merge(aln2type, on='sampleName', how='left', suffixes=(None,"_aln2type"))
# versions
wf=open('workflow_commit.txt').read()
df['workflowCommit']=str(wf).strip()
df['manifestVersion']=sys.argv[2]
nextclade_version=open('nextclade_files/version.txt').read()
df['nextcladeVersion']=str(nextclade_version).strip()
aln2type_variant_commit=open('variant_definitions/aln2type_variant_git_commit.txt').read()
aln2type_variant_version=open('variant_definitions/aln2type_variant_version.txt').read()
aln2type_source_commit=open('variant_definitions/aln2type_commit.txt').read()
df['aln2typeVariantCommit']=str(aln2type_variant_commit).strip()
df['aln2typeVariantVersion']=str(aln2type_variant_version).strip()
df['aln2typeSourceVommit']=str(aln2type_source_commit).strip()
df.to_csv('{0}_report.tsv'.format(sys.argv[1]), sep='\t', index=False)
### convert to json
pango['program']='pango'
pango.set_index('program',inplace=True)
p=pango.to_dict(orient='index')
nextclade['program']='nextclade'
nextclade['nextcladeVersion']=str(nextclade_version).strip()
nextclade.set_index('program',inplace=True)
n=nextclade.to_dict(orient='index')
with open('nextclade.json','rt', encoding= 'utf-8') as inf:
nj=json.load(inf)
n['nextcladeOutputJson']=nj
aln2type['program']='aln2type'
aln2type['label']=aln2type['phe-label']
aln2type['aln2typeVariantCommit']=str(aln2type_variant_commit).strip()
aln2type['aln2typeSourceCommit']=str(aln2type_source_commit).strip()
aln2type.set_index(['program','phe-label'],inplace=True)
a={level: aln2type.xs(level).to_dict('index') for level in aln2type.index.levels[0]}
w={'WorkflowInformation':{}}
w['WorkflowInformation']['workflowCommit']=str(wf).strip()
w['WorkflowInformation']['manifestVersion']=sys.argv[2]
w['WorkflowInformation']['sampleIdentifier']=sample_name
# add fasta to json
record = SeqIO.read('ref.fasta', "fasta")
w['WorkflowInformation']['referenceIdentifier']=record.id
#f={'FastaRecord':{'SeqId':record.id,
# 'SeqDescription': record.description,
# 'Sequence':str(record.seq),
# 'sampleName':sample_name}}
s={'summary':{}}
s['summary']['completeness']=completeness(n['nextcladeOutputJson'])
d={sample_name:{}}
d[sample_name].update(p)
d[sample_name].update(n)
d[sample_name].update(a)
d[sample_name].update(w)
#d[sample_name].update(f)
d[sample_name].update(s)
with open('{0}_report.json'.format(sample_name), 'w', encoding='utf-8') as f:
json.dump(d, f, indent=4, sort_keys=True, ensure_ascii=False)
| 35.831461
| 90
| 0.756977
|
8d8a5d72d65e690dc4c82341ed975187662e4c48
| 1,484
|
py
|
Python
|
webhooks/statuscake/alerta_statuscake.py
|
frekel/alerta-contrib
|
d8f5c93a4ea735085b3689c2c852ecae94924d08
|
[
"MIT"
] | 114
|
2015-02-05T00:22:16.000Z
|
2021-11-25T13:02:44.000Z
|
webhooks/statuscake/alerta_statuscake.py
|
NeilOrley/alerta-contrib
|
69d271ef9fe6542727ec4aa39fc8e0f797f1e8b1
|
[
"MIT"
] | 245
|
2016-01-09T22:29:09.000Z
|
2022-03-16T10:37:02.000Z
|
webhooks/statuscake/alerta_statuscake.py
|
NeilOrley/alerta-contrib
|
69d271ef9fe6542727ec4aa39fc8e0f797f1e8b1
|
[
"MIT"
] | 193
|
2015-01-30T21:22:49.000Z
|
2022-03-28T05:37:14.000Z
|
from alerta.models.alert import Alert
from alerta.webhooks import WebhookBase
from alerta.exceptions import RejectException
import os
import hashlib
| 35.333333
| 85
| 0.624663
|
8d8b51eaca246cacfde939fcbc4a16b39dba720e
| 3,738
|
py
|
Python
|
ironic_discoverd/main.py
|
enovance/ironic-discoverd
|
d3df6178ca5c95943c93ff80723c86b7080bca0b
|
[
"Apache-2.0"
] | null | null | null |
ironic_discoverd/main.py
|
enovance/ironic-discoverd
|
d3df6178ca5c95943c93ff80723c86b7080bca0b
|
[
"Apache-2.0"
] | null | null | null |
ironic_discoverd/main.py
|
enovance/ironic-discoverd
|
d3df6178ca5c95943c93ff80723c86b7080bca0b
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
eventlet.monkey_patch(thread=False)
import json
import logging
import sys
from flask import Flask, request # noqa
from keystoneclient import exceptions
from ironic_discoverd import conf
from ironic_discoverd import discoverd
from ironic_discoverd import firewall
from ironic_discoverd import node_cache
from ironic_discoverd import utils
app = Flask(__name__)
LOG = discoverd.LOG
def periodic_update(period):
while True:
LOG.debug('Running periodic update of filters')
try:
firewall.update_filters()
except Exception:
LOG.exception('Periodic update failed')
eventlet.greenthread.sleep(period)
| 31.411765
| 75
| 0.688604
|
8d8c7b2102958e3a921b5b5a1f32ed6750cd5ff4
| 964
|
py
|
Python
|
config_translator.py
|
Charahiro-tan/Jurubot_Translator
|
d0d0db137f3ddfe06d7cd9457d22c418bdeff94c
|
[
"MIT"
] | 1
|
2021-07-26T11:14:05.000Z
|
2021-07-26T11:14:05.000Z
|
config_translator.py
|
Charahiro-tan/Jurubot_Translator
|
d0d0db137f3ddfe06d7cd9457d22c418bdeff94c
|
[
"MIT"
] | null | null | null |
config_translator.py
|
Charahiro-tan/Jurubot_Translator
|
d0d0db137f3ddfe06d7cd9457d22c418bdeff94c
|
[
"MIT"
] | null | null | null |
##################################################
#
#
##################################################
# []"",
#
ignore_user = ["Nightbot","Streamelements","Moobot"]
#
# URL
del_word = ["88+","+"]
#
# https://cloud.google.com/translate/docs/languages
ignore_lang = ["",""]
#
home_lang = "ja"
# home_lang
default_to_lang = "en"
# translate.googleURL
url_suffix = "co.jp"
# TrueFalse
sender = True
# True
# "displayname"
# "loginid" ID
sender_name = "displayname"
# (en ja)TrueFalse
language = True
# Google Apps ScriptAPITrueFalse
# Google Apps ScriptReadme
gas = False
# Google Apps ScriptURL
gas_url = ""
| 22.418605
| 61
| 0.692946
|
8d8cd77924dc533eeabb54595050045f0fb725d3
| 1,489
|
py
|
Python
|
wxcloudrun/dao.py
|
lubupang/resume_flask1
|
1ea18e88c0b667e92710096f57973a77d19e8fc6
|
[
"MIT"
] | null | null | null |
wxcloudrun/dao.py
|
lubupang/resume_flask1
|
1ea18e88c0b667e92710096f57973a77d19e8fc6
|
[
"MIT"
] | null | null | null |
wxcloudrun/dao.py
|
lubupang/resume_flask1
|
1ea18e88c0b667e92710096f57973a77d19e8fc6
|
[
"MIT"
] | null | null | null |
import logging
from sqlalchemy.exc import OperationalError
from wxcloudrun import db
from wxcloudrun.model import Counters
#
logger = logging.getLogger('log')
logger.info("aaaaaaa")
def query_counterbyid(id):
"""
IDCounter
:param id: CounterID
:return: Counter
"""
logger.info("bbbbbbbbb")
try:
return Counters.query.filter(Counters.id == id).first()
except OperationalError as e:
logger.info("query_counterbyid errorMsg= {} ".format(e))
return None
def delete_counterbyid(id):
"""
IDCounter
:param id: CounterID
"""
try:
counter = Counters.query.get(id)
if counter is None:
return
db.session.delete(counter)
db.session.commit()
except OperationalError as e:
logger.info("delete_counterbyid errorMsg= {} ".format(e))
def insert_counter(counter):
"""
Counter
:param counter: Counters
"""
try:
db.session.add(counter)
db.session.commit()
except OperationalError as e:
logger.info("insert_counter errorMsg= {} ".format(e))
def update_counterbyid(counter):
"""
IDcounter
:param counter
"""
try:
counter = query_counterbyid(counter.id)
if counter is None:
return
db.session.flush()
db.session.commit()
except OperationalError as e:
logger.info("update_counterbyid errorMsg= {} ".format(e))
| 22.560606
| 65
| 0.633983
|
8d8db8eca4cacfeb8ce07aa8011f8a4b558400b4
| 7,411
|
py
|
Python
|
src/bpp/tests/tests_legacy/test_views/test_raporty.py
|
iplweb/django-bpp
|
85f183a99d8d5027ae4772efac1e4a9f21675849
|
[
"BSD-3-Clause"
] | 1
|
2017-04-27T19:50:02.000Z
|
2017-04-27T19:50:02.000Z
|
src/bpp/tests/tests_legacy/test_views/test_raporty.py
|
mpasternak/django-bpp
|
434338821d5ad1aaee598f6327151aba0af66f5e
|
[
"BSD-3-Clause"
] | 41
|
2019-11-07T00:07:02.000Z
|
2022-02-27T22:09:39.000Z
|
src/bpp/tests/tests_legacy/test_views/test_raporty.py
|
iplweb/bpp
|
f027415cc3faf1ca79082bf7bacd4be35b1a6fdf
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
import os
import sys
import uuid
import pytest
from django.apps import apps
from django.contrib.auth.models import Group
from django.core.files.base import ContentFile
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.db import transaction
from django.http import Http404
from django.test.utils import override_settings
from django.utils import timezone
from model_mommy import mommy
from bpp.models import Typ_KBN, Jezyk, Charakter_Formalny, Typ_Odpowiedzialnosci
from bpp.tests.tests_legacy.testutil import UserTestCase, UserTransactionTestCase
from bpp.tests.util import any_jednostka, any_autor, any_ciagle
from bpp.util import rebuild_contenttypes
from bpp.views.raporty import RaportSelector, PodgladRaportu, KasowanieRaportu
from celeryui.models import Report
from django.conf import settings
| 32.221739
| 95
| 0.643233
|
8d8dfcd12be52225c59666f19fa694cef189e9ea
| 1,373
|
py
|
Python
|
bot/utilities/api/helpers/score.py
|
AiratK/kaishnik-bot
|
c42351611a40a04d78c8ae481b97339adbd321e5
|
[
"MIT"
] | null | null | null |
bot/utilities/api/helpers/score.py
|
AiratK/kaishnik-bot
|
c42351611a40a04d78c8ae481b97339adbd321e5
|
[
"MIT"
] | null | null | null |
bot/utilities/api/helpers/score.py
|
AiratK/kaishnik-bot
|
c42351611a40a04d78c8ae481b97339adbd321e5
|
[
"MIT"
] | null | null | null |
from typing import List
from typing import Tuple
from bot.utilities.api.constants import SCORE_TEMPLATE
| 39.228571
| 116
| 0.780772
|
8d8ebb77655b687ce95045239bb38a91c19a2901
| 1,192
|
py
|
Python
|
manager_app/serializers/carousel_serializers.py
|
syz247179876/e_mall
|
f94e39e091e098242342f532ae371b8ff127542f
|
[
"Apache-2.0"
] | 7
|
2021-04-10T13:20:56.000Z
|
2022-03-29T15:00:29.000Z
|
manager_app/serializers/carousel_serializers.py
|
syz247179876/E_mall
|
f94e39e091e098242342f532ae371b8ff127542f
|
[
"Apache-2.0"
] | 9
|
2021-05-11T03:53:31.000Z
|
2022-03-12T00:58:03.000Z
|
manager_app/serializers/carousel_serializers.py
|
syz247179876/E_mall
|
f94e39e091e098242342f532ae371b8ff127542f
|
[
"Apache-2.0"
] | 2
|
2020-11-24T08:59:22.000Z
|
2020-11-24T14:10:59.000Z
|
# -*- coding: utf-8 -*-
# @Time : 2021/4/6 9:21
# @Author :
# @File : carousel_serializers.py
# @Software: Pycharm
from rest_framework import serializers
from Emall.exceptions import DataFormatError
from shop_app.models.commodity_models import Carousel
| 27.090909
| 97
| 0.654362
|
8d9135e1864bf2b1336ddc05e72617edb4057d7b
| 7,312
|
py
|
Python
|
xfbin/structure/nud.py
|
SutandoTsukai181/xfbin_lib
|
8e2c56f354bfd868f9162f816cc528e6f830cdbc
|
[
"MIT"
] | 3
|
2021-07-20T09:13:13.000Z
|
2021-09-06T18:08:15.000Z
|
xfbin/structure/nud.py
|
SutandoTsukai181/xfbin_lib
|
8e2c56f354bfd868f9162f816cc528e6f830cdbc
|
[
"MIT"
] | 1
|
2021-09-06T18:07:48.000Z
|
2021-09-06T18:07:48.000Z
|
xfbin/structure/nud.py
|
SutandoTsukai181/xfbin_lib
|
8e2c56f354bfd868f9162f816cc528e6f830cdbc
|
[
"MIT"
] | null | null | null |
from itertools import chain
from typing import List, Tuple
from .br.br_nud import *
| 32.642857
| 117
| 0.607358
|
8d92051bcbbae105ab8b259c257c80d404e8f4eb
| 2,389
|
py
|
Python
|
src/attack_surface_pypy/__main__.py
|
ccrvs/attack_surface_pypy
|
f2bc9998cf42f4764f1c495e6243d970e01bd176
|
[
"CC0-1.0"
] | null | null | null |
src/attack_surface_pypy/__main__.py
|
ccrvs/attack_surface_pypy
|
f2bc9998cf42f4764f1c495e6243d970e01bd176
|
[
"CC0-1.0"
] | null | null | null |
src/attack_surface_pypy/__main__.py
|
ccrvs/attack_surface_pypy
|
f2bc9998cf42f4764f1c495e6243d970e01bd176
|
[
"CC0-1.0"
] | null | null | null |
import argparse
import gc
import pathlib
import sys
import typing
import uvicorn # type: ignore
from attack_surface_pypy import __service_name__, __version__, asgi
from attack_surface_pypy import logging as app_logging
from attack_surface_pypy import settings
# logger = structlog.get_logger()
gc.disable()
parser = argparse.ArgumentParser(description="App initial arguments.", prog=__service_name__)
parser.add_argument(
"-f",
"--file-path",
help="provide path to a file with initial data.",
type=pathlib.Path,
metavar=".fixtures/xxx.json",
required=True,
choices=[
pathlib.Path(".fixtures/input-1.json"),
pathlib.Path(".fixtures/input-2.json"),
pathlib.Path(".fixtures/input-3.json"),
pathlib.Path(".fixtures/input-4.json"),
pathlib.Path(".fixtures/input-5.json"),
],
)
parser.add_argument(
"-n",
"--host",
help="set host for the service.",
type=str,
metavar="localhost",
)
parser.add_argument(
"-p",
"--port",
type=int,
help="set port for the service.",
)
parser.add_argument(
"-v",
"--version",
action="version",
version=f"%(prog)s {__version__}",
)
if __name__ == "__main__":
ns = parser.parse_args()
domain_settings = settings.Domain(file_path=ns.file_path)
service_settings = settings.Service()
if ns.host or ns.port:
service_settings = settings.Service(host=ns.host, port=ns.port)
app_settings = settings.Settings(domain=domain_settings, service=service_settings)
log_config = app_logging.LoggingConfig(
log_level=app_settings.log_level, traceback_depth=app_settings.traceback_depth
).prepare_logger()
# context = types.Context(file_path=ns.file_path, host=ns.host, port=ns.port) # TODO: update settings from args?
sys.exit(run_uvicorn(app_settings, log_config)) # TODO: hardcoded name, awry fabric
| 29.493827
| 117
| 0.686061
|
8d92eb64df1700c877aeea998c716029d6df8ce0
| 391
|
py
|
Python
|
subscriptions/migrations/0004_auto_20200630_1157.py
|
Naveendata-ux/tor_redesign
|
e4b5135f8b4134527ad04a097bdffd9d956d9858
|
[
"BSD-2-Clause"
] | null | null | null |
subscriptions/migrations/0004_auto_20200630_1157.py
|
Naveendata-ux/tor_redesign
|
e4b5135f8b4134527ad04a097bdffd9d956d9858
|
[
"BSD-2-Clause"
] | null | null | null |
subscriptions/migrations/0004_auto_20200630_1157.py
|
Naveendata-ux/tor_redesign
|
e4b5135f8b4134527ad04a097bdffd9d956d9858
|
[
"BSD-2-Clause"
] | null | null | null |
# Generated by Django 2.2.5 on 2020-06-30 11:57
from django.db import migrations
| 20.578947
| 53
| 0.613811
|
8d9365bf3bc3b96e70ffdbc229d46a96e3d6b3fd
| 545
|
py
|
Python
|
Random_Colored/main.py
|
usamaahsan93/mischief-managed
|
824022ecaeda46450ca1029bceb39f194c363138
|
[
"MIT"
] | null | null | null |
Random_Colored/main.py
|
usamaahsan93/mischief-managed
|
824022ecaeda46450ca1029bceb39f194c363138
|
[
"MIT"
] | null | null | null |
Random_Colored/main.py
|
usamaahsan93/mischief-managed
|
824022ecaeda46450ca1029bceb39f194c363138
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 2 16:54:10 2021
@author: sdn1
"""
import numpy as np
import os
i=0
while(i<1):
1/0
print(bcolors.OKGREEN + chr(np.random.randint(250,400)) + bcolors.ENDC, end='')
os.system('python $(pwd)/main.py')
i=i+1
print(i)
| 17.03125
| 83
| 0.552294
|
8d93c9fb2121a519402ceb1deef23ae520c7fdfe
| 1,717
|
py
|
Python
|
utils/event_store_rebuilder_for_segments.py
|
initialed85/eds-cctv-system
|
fcdb7e7e23327bf3a901d23d506b3915833027d1
|
[
"MIT"
] | null | null | null |
utils/event_store_rebuilder_for_segments.py
|
initialed85/eds-cctv-system
|
fcdb7e7e23327bf3a901d23d506b3915833027d1
|
[
"MIT"
] | null | null | null |
utils/event_store_rebuilder_for_segments.py
|
initialed85/eds-cctv-system
|
fcdb7e7e23327bf3a901d23d506b3915833027d1
|
[
"MIT"
] | null | null | null |
import datetime
from pathlib import Path
from typing import Optional, Tuple
from .common import _IMAGE_SUFFIXES, _PERMITTED_EXTENSIONS, PathDetails, rebuild_event_store
if __name__ == "__main__":
import argparse
from dateutil.tz import tzoffset
parser = argparse.ArgumentParser()
parser.add_argument("-r", "--root-path", type=str, required=True)
parser.add_argument("-j", "--json-path", type=str, required=True)
args = parser.parse_args()
rebuild_event_store(
root_path=args.root_path,
tzinfo=tzoffset(name="WST-8", offset=8 * 60 * 60),
json_path=args.json_path,
parse_method=parse_path,
get_key_methods=[_get_key]
)
| 28.616667
| 92
| 0.663366
|
8d9464e17bd59a5730ae1d8d76d451408c780a27
| 4,049
|
py
|
Python
|
python/src/main/python/common/threadctl.py
|
esnet/netshell
|
4cb010b63e72610cf81112b29587d3e980612333
|
[
"BSD-3-Clause-LBNL"
] | 6
|
2016-02-17T16:31:55.000Z
|
2021-03-16T20:17:41.000Z
|
python/src/main/python/common/threadctl.py
|
esnet/netshell
|
4cb010b63e72610cf81112b29587d3e980612333
|
[
"BSD-3-Clause-LBNL"
] | 27
|
2016-04-11T19:49:04.000Z
|
2016-07-14T06:05:52.000Z
|
python/src/main/python/common/threadctl.py
|
esnet/netshell
|
4cb010b63e72610cf81112b29587d3e980612333
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2017-07-31T19:30:50.000Z
|
2017-07-31T19:30:50.000Z
|
# ESnet Network Operating System (ENOS) Copyright (c) 2015, The Regents
# of the University of California, through Lawrence Berkeley National
# Laboratory (subject to receipt of any required approvals from the
# U.S. Dept. of Energy). All rights reserved.
#
# If you have questions about your rights to use or distribute this
# software, please contact Berkeley Lab's Innovation & Partnerships
# Office at IPO@lbl.gov.
#
# NOTICE. This Software was developed under funding from the
# U.S. Department of Energy and the U.S. Government consequently retains
# certain rights. As such, the U.S. Government has been granted for
# itself and others acting on its behalf a paid-up, nonexclusive,
# irrevocable, worldwide license in the Software to reproduce,
# distribute copies to the public, prepare derivative works, and perform
# publicly and display publicly, and to permit other to do so.
from java.lang import Thread, ThreadGroup
import jarray
rootThreadGroup = None
if __name__ == '__main__':
argv = sys.argv
if len(argv) == 1:
print_syntax()
sys.exit()
cmd = argv[1]
if cmd == "help":
print_syntax()
elif cmd == "show-thread":
gri = argv[2]
if gri == 'all':
match = None
if 'grep' in argv:
match = argv[4]
threads = getAllThreads(match=match)
if threads != None:
for thread in threads:
displayThread(thread=thread)
print
else:
thread = getThread(long(argv[2]))
if (thread == None):
print "unknown",argv[2]
sys.exit()
displayThread(thread)
| 29.772059
| 96
| 0.613732
|
8d94db8d2bb9acc8dbec349c6766ca408545196a
| 599
|
py
|
Python
|
python/distance/HaversineDistanceInMiles.py
|
jigneshoo7/AlgoBook
|
8aecc9698447c0ee561a1c90d5c5ab87c4a07b79
|
[
"MIT"
] | 191
|
2020-09-28T10:00:20.000Z
|
2022-03-06T14:36:55.000Z
|
python/distance/HaversineDistanceInMiles.py
|
jigneshoo7/AlgoBook
|
8aecc9698447c0ee561a1c90d5c5ab87c4a07b79
|
[
"MIT"
] | 210
|
2020-09-28T10:06:36.000Z
|
2022-03-05T03:44:24.000Z
|
python/distance/HaversineDistanceInMiles.py
|
jigneshoo7/AlgoBook
|
8aecc9698447c0ee561a1c90d5c5ab87c4a07b79
|
[
"MIT"
] | 320
|
2020-09-28T09:56:14.000Z
|
2022-02-12T16:45:57.000Z
|
import math
| 33.277778
| 153
| 0.651085
|
8d95a5da0117840ab07b75457380a92375c5347d
| 8,837
|
py
|
Python
|
i2i/util.py
|
thorwhalen/i2i
|
f967aaba28793029e3fe643c5e17ae9bc7a77732
|
[
"Apache-2.0"
] | 1
|
2019-08-29T01:35:12.000Z
|
2019-08-29T01:35:12.000Z
|
i2i/util.py
|
thorwhalen/i2i
|
f967aaba28793029e3fe643c5e17ae9bc7a77732
|
[
"Apache-2.0"
] | null | null | null |
i2i/util.py
|
thorwhalen/i2i
|
f967aaba28793029e3fe643c5e17ae9bc7a77732
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import division
import inspect
import types
from functools import wraps
function_type = type(lambda x: x) # using this instead of callable() because classes are callable, for instance
no_default = NoDefault()
def inject_method(self, method_function, method_name=None):
"""
method_function could be:
* a function
* a {method_name: function, ...} dict (for multiple injections)
* a list of functions or (function, method_name) pairs
"""
if isinstance(method_function, function_type):
if method_name is None:
method_name = method_function.__name__
setattr(self,
method_name,
types.MethodType(method_function, self))
else:
if isinstance(method_function, dict):
method_function = [(func, func_name) for func_name, func in method_function.items()]
for method in method_function:
if isinstance(method, tuple) and len(method) == 2:
self = inject_method(self, method[0], method[1])
else:
self = inject_method(self, method)
return self
def transform_args(**trans_func_for_arg):
"""
Make a decorator that transforms function arguments before calling the function.
For example:
* original argument: a relative path --> used argument: a full path
* original argument: a pickle filepath --> used argument: the loaded object
:param rootdir: rootdir to be used for all name arguments of target function
:param name_arg: the position (int) or argument name of the argument containing the name
:return: a decorator
>>> def f(a, b, c):
... return "a={a}, b={b}, c={c}".format(a=a, b=b, c=c)
>>>
>>> print(f('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = transform_args()(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = transform_args(a=lambda x: 'ROOT/' + x)(f)
>>> print(ff('foo', 'bar', 3))
a=ROOT/foo, b=bar, c=3
>>> ff = transform_args(b=lambda x: 'ROOT/' + x)(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=ROOT/bar, c=3
>>> ff = transform_args(a=lambda x: 'ROOT/' + x, b=lambda x: 'ROOT/' + x)(f)
>>> print(ff('foo', b='bar', c=3))
a=ROOT/foo, b=ROOT/bar, c=3
"""
return transform_args_decorator
def resolve_filepath_of_name(name_arg=None, rootdir=''):
"""
Make a decorator that applies a function to an argument before using it.
For example:
* original argument: a relative path --> used argument: a full path
* original argument: a pickle filepath --> used argument: the loaded object
:param rootdir: rootdir to be used for all name arguments of target function
:param name_arg: the position (int) or argument name of the argument containing the name
:return: a decorator
>>> def f(a, b, c):
... return "a={a}, b={b}, c={c}".format(a=a, b=b, c=c)
>>>
>>> print(f('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = resolve_filepath_of_name()(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=bar, c=3
>>> ff = resolve_filepath_of_name('a', 'ROOT')(f)
>>> print(ff('foo', 'bar', 3))
a=ROOT/foo, b=bar, c=3
>>> ff = resolve_filepath_of_name('b', 'ROOT')(f)
>>> print(ff('foo', 'bar', 3))
a=foo, b=ROOT/bar, c=3
"""
if name_arg is not None:
return transform_args(**{name_arg: lambda x: os.path.join(rootdir, x)})
else:
return lambda x: x
def arg_dflt_dict_of_callable(f):
"""
Get a {arg_name: default_val, ...} dict from a callable.
See also :py:mint_of_callable:
:param f: A callable (function, method, ...)
:return:
"""
argspec = inspect.getfullargspec(f)
args = argspec.args or []
defaults = argspec.defaults or []
return {arg: dflt for arg, dflt in zip(args, [no_default] * (len(args) - len(defaults)) + list(defaults))}
def infer_if_function_might_be_intended_as_a_classmethod_or_staticmethod(func):
"""
Tries to infer if the input function is a 'classmethod' or 'staticmethod' (or just 'normal')
When is that? When:
* the function's first argument is called 'cls' and has no default: 'classmethod'
* the function's first argument is called 'self' and has no default: 'staticmethod'
* otherwise: 'normal'
>>> def a_normal_func(x, y=None):
... pass
>>> def a_func_that_is_probably_a_classmethod(cls, y=None):
... pass
>>> def a_func_that_is_probably_a_staticmethod(self, y=None):
... pass
>>> def a_func_that_is_probably_a_classmethod_but_is_not(cls=3, y=None):
... pass
>>> def a_func_that_is_probably_a_staticmethod_but_is_not(self=None, y=None):
... pass
>>> list_of_functions = [
... a_normal_func,
... a_func_that_is_probably_a_classmethod,
... a_func_that_is_probably_a_staticmethod,
... a_func_that_is_probably_a_classmethod_but_is_not,
... a_func_that_is_probably_a_staticmethod_but_is_not,
... ]
>>>
>>> for func in list_of_functions:
... print("{}: {}".format(func.__name__,
... infer_if_function_might_be_intended_as_a_classmethod_or_staticmethod(func)))
...
a_normal_func: normal
a_func_that_is_probably_a_classmethod: classmethod
a_func_that_is_probably_a_staticmethod: staticmethod
a_func_that_is_probably_a_classmethod_but_is_not: normal_with_cls
a_func_that_is_probably_a_staticmethod_but_is_not: normal_with_self
"""
argsspec = inspect.getfullargspec(func)
if len(argsspec.args) > 0:
first_element_has_no_defaults = bool(len(argsspec.args) > len(argsspec.defaults))
if argsspec.args[0] == 'cls':
if first_element_has_no_defaults:
return 'classmethod'
else:
return 'normal_with_cls'
elif argsspec.args[0] == 'self':
if first_element_has_no_defaults:
return 'staticmethod'
else:
return 'normal_with_self'
return 'normal'
if __name__ == '__main__':
import os
import re
key_file_re = re.compile('setup.py')
rootdir = '/D/Dropbox/dev/py/proj'
cumul = list()
for f in filter(lambda x: not x.startswith('.'), os.listdir(rootdir)):
filepath = os.path.join(rootdir, f)
if os.path.isdir(filepath):
if dir_is_a_pip_installable_dir(filepath):
cumul.append(filepath)
for f in cumul:
print(f)
| 34.928854
| 119
| 0.629965
|
8d97b86230f6560f3cd37b723cba275b3f968cb2
| 1,635
|
py
|
Python
|
setup.py
|
robflintham/mippy
|
e642c697202acc5b96b42f62204786bf5e705c9a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
robflintham/mippy
|
e642c697202acc5b96b42f62204786bf5e705c9a
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
robflintham/mippy
|
e642c697202acc5b96b42f62204786bf5e705c9a
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Test version numbering before running setup
test_version()
setup( name='MIPPY',
version=get_version(),
description='Modular Image Processing in Python',
author='Robert Flintham',
author_email='robert.flintham@uhb.nhs.uk',
install_requires=['numpy','scipy','dicom','pillow','nibabel','matplotlib'],
license='BSD-3-Clause',
classifiers=[
'Programming Language :: Python :: 2.7',
],
packages=['mippy','mippy.mdicom','mippy.mviewer'],
package_data={'':['resources/*','mviewer/config']}
)
| 29.727273
| 92
| 0.666055
|
8d98eec2f752514e211b3f9e607274f2de78ffd9
| 3,543
|
py
|
Python
|
physprog/tests/test_sample_problem.py
|
partofthething/physprog
|
8bbeb8d84697469417577c76c924dcb3a855cd2d
|
[
"Apache-2.0"
] | 3
|
2018-03-25T16:13:53.000Z
|
2021-06-29T14:30:20.000Z
|
physprog/tests/test_sample_problem.py
|
partofthething/physprog
|
8bbeb8d84697469417577c76c924dcb3a855cd2d
|
[
"Apache-2.0"
] | null | null | null |
physprog/tests/test_sample_problem.py
|
partofthething/physprog
|
8bbeb8d84697469417577c76c924dcb3a855cd2d
|
[
"Apache-2.0"
] | 2
|
2021-09-18T08:38:32.000Z
|
2022-03-01T07:43:52.000Z
|
"""Run a sample problem to test full system."""
# pylint: disable=invalid-name,missing-docstring
import unittest
from collections import namedtuple
import math
import os
from physprog import classfunctions
from physprog import optimize
THIS_DIR = os.path.dirname(__file__)
SAMPLE_INPUT = os.path.join(THIS_DIR, 'sample-input.yaml')
SampleDesign = namedtuple('SampleDesign', ['d1', 'd2', 'd3', 'b', 'L'])
if __name__ == '__main__':
unittest.main()
| 28.804878
| 80
| 0.582275
|
8d99f51b98aee394d6e4b4f62dcc6cdca1b6db1f
| 10,131
|
py
|
Python
|
tutorials/seq2seq_sated/seq2seq_sated_meminf.py
|
rizwandel/ml_privacy_meter
|
5dc4c300eadccceadd0e664a7e46099f65728628
|
[
"MIT"
] | 294
|
2020-04-13T18:32:45.000Z
|
2022-03-31T10:32:34.000Z
|
tutorials/seq2seq_sated/seq2seq_sated_meminf.py
|
kypomon/ml_privacy_meter
|
c0324e8f74cbd0cde0643a7854fa66eab47bbe53
|
[
"MIT"
] | 26
|
2020-04-29T19:56:21.000Z
|
2022-03-31T10:42:24.000Z
|
tutorials/seq2seq_sated/seq2seq_sated_meminf.py
|
kypomon/ml_privacy_meter
|
c0324e8f74cbd0cde0643a7854fa66eab47bbe53
|
[
"MIT"
] | 50
|
2020-04-16T02:16:24.000Z
|
2022-03-16T00:37:40.000Z
|
import os
import sys
from collections import defaultdict
import tensorflow as tf
import tensorflow.keras.backend as K
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve
from sklearn.linear_model import LogisticRegression
from utils import process_texts, load_texts, load_users, load_sated_data_by_user, \
build_nmt_model, words_to_indices, \
SATED_TRAIN_USER, SATED_TRAIN_FR, SATED_TRAIN_ENG
MODEL_PATH = 'checkpoints/model/'
OUTPUT_PATH = 'checkpoints/output/'
tf.compat.v1.disable_eager_execution()
# ================================ GENERATE RANKS ================================ #
# Code adapted from https://github.com/csong27/auditing-text-generation
def save_users_rank_results(users, user_src_texts, user_trg_texts, src_vocabs, trg_vocabs, prob_fn, save_dir,
member_label=1, cross_domain=False, save_probs=False, mask=False, rerun=False):
"""
Save user ranks in the appropriate format for attacks.
"""
for i, u in enumerate(users):
save_path = save_dir + 'rank_u{}_y{}{}.npz'.format(i, member_label, '_cd' if cross_domain else '')
prob_path = save_dir + 'prob_u{}_y{}{}.npz'.format(i, member_label, '_cd' if cross_domain else '')
if os.path.exists(save_path) and not save_probs and not rerun:
continue
user_src_data = words_to_indices(user_src_texts[u], src_vocabs, mask=mask)
user_trg_data = words_to_indices(user_trg_texts[u], trg_vocabs, mask=mask)
rtn = get_ranks(user_src_data, user_trg_data, prob_fn, save_probs=save_probs)
if save_probs:
probs = rtn
np.savez(prob_path, probs)
else:
ranks, labels = rtn[0], rtn[1]
np.savez(save_path, ranks, labels)
if (i + 1) % 500 == 0:
sys.stderr.write('Finishing saving ranks for {} users'.format(i + 1))
def get_target_ranks(num_users=200, num_words=5000, mask=False, h=128, emb_h=128, user_data_ratio=0.,
tied=False, save_probs=False):
"""
Get ranks of target machine translation model.
"""
user_src_texts, user_trg_texts, test_user_src_texts, test_user_trg_texts, src_vocabs, trg_vocabs \
= load_sated_data_by_user(num_users, num_words, test_on_user=True, user_data_ratio=user_data_ratio)
train_users = sorted(user_src_texts.keys())
test_users = sorted(test_user_src_texts.keys())
# Get model
save_dir = OUTPUT_PATH + 'target_{}{}/'.format(num_users, '_dr' if 0. < user_data_ratio < 1. else '')
if not os.path.exists(save_dir):
os.mkdir(save_dir)
model_path = 'sated_nmt'.format(num_users)
if 0. < user_data_ratio < 1.:
model_path += '_dr{}'.format(user_data_ratio)
heldout_src_texts, heldout_trg_texts = load_train_users_heldout_data(train_users, src_vocabs, trg_vocabs)
for u in train_users:
user_src_texts[u] += heldout_src_texts[u]
user_trg_texts[u] += heldout_trg_texts[u]
model = build_nmt_model(Vs=num_words, Vt=num_words, mask=mask, drop_p=0., h=h, demb=emb_h, tied=tied)
model.load_weights(MODEL_PATH + '{}_{}.h5'.format(model_path, num_users))
src_input_var, trg_input_var = model.inputs
prediction = model.output
trg_label_var = K.placeholder((None, None), dtype='float32')
# Get predictions
prediction = K.softmax(prediction)
prob_fn = K.function([src_input_var, trg_input_var, trg_label_var, K.learning_phase()], [prediction])
# Save user ranks for train and test dataset
save_users_rank_results(users=train_users, save_probs=save_probs,
user_src_texts=user_src_texts, user_trg_texts=user_trg_texts,
src_vocabs=src_vocabs, trg_vocabs=trg_vocabs, cross_domain=False,
prob_fn=prob_fn, save_dir=save_dir, member_label=1)
save_users_rank_results(users=test_users, save_probs=save_probs,
user_src_texts=test_user_src_texts, user_trg_texts=test_user_trg_texts,
src_vocabs=src_vocabs, trg_vocabs=trg_vocabs, cross_domain=False,
prob_fn=prob_fn, save_dir=save_dir, member_label=0)
# ================================ ATTACK ================================ #
def avg_rank_feats(ranks):
"""
Averages ranks to get features for deciding the threshold for membership inference.
"""
avg_ranks = []
for r in ranks:
avg = np.mean(np.concatenate(r))
avg_ranks.append(avg)
return avg_ranks
def load_ranks_by_label(save_dir, num_users=300, cross_domain=False, label=1):
"""
Helper method to load ranks by train/test dataset.
If label = 1, train set ranks are loaded. If label = 0, test set ranks are loaded.
Ranks are generated by running sated_nmt_ranks.py.
"""
ranks = []
labels = []
y = []
for i in range(num_users):
save_path = save_dir + 'rank_u{}_y{}{}.npz'.format(i, label, '_cd' if cross_domain else '')
if os.path.exists(save_path):
f = np.load(save_path, allow_pickle=True)
train_rs, train_ls = f['arr_0'], f['arr_1']
ranks.append(train_rs)
labels.append(train_ls)
y.append(label)
return ranks, labels, y
def load_all_ranks(save_dir, num_users=5000, cross_domain=False):
"""
Loads all ranks generated by the target model.
Ranks are generated by running sated_nmt_ranks.py.
"""
ranks = []
labels = []
y = []
# Load train ranks
train_label = 1
train_ranks, train_labels, train_y = load_ranks_by_label(save_dir, num_users, cross_domain, train_label)
ranks = ranks + train_ranks
labels = labels + train_labels
y = y + train_y
# Load test ranks
test_label = 0
test_ranks, test_labels, test_y = load_ranks_by_label(save_dir, num_users, cross_domain, test_label)
ranks = ranks + test_ranks
labels = labels + test_labels
y = y + test_y
return ranks, labels, np.asarray(y)
def run_average_rank_thresholding(num_users=300, dim=100, prop=1.0, user_data_ratio=0.,
top_words=5000, cross_domain=False, rerun=False):
"""
Runs average rank thresholding attack on the target model.
"""
result_path = OUTPUT_PATH
if dim > top_words:
dim = top_words
attack1_results_save_path = result_path + 'mi_data_dim{}_prop{}_{}{}_attack1.npz'.format(
dim, prop, num_users, '_cd' if cross_domain else '')
if not rerun and os.path.exists(attack1_results_save_path):
f = np.load(attack1_results_save_path)
X, y = [f['arr_{}'.format(i)] for i in range(4)]
else:
save_dir = result_path + 'target_{}{}/'.format(num_users, '_dr' if 0. < user_data_ratio < 1. else '')
# Load ranks
train_ranks, _, train_y = load_ranks_by_label(save_dir, num_users, label=1)
test_ranks, _, test_y = load_ranks_by_label(save_dir, num_users, label=0)
# Convert to average rank features
train_feat = avg_rank_feats(train_ranks)
test_feat = avg_rank_feats(test_ranks)
# Create dataset
X, y = np.concatenate([train_feat, test_feat]), np.concatenate([train_y, test_y])
np.savez(attack1_results_save_path, X, y)
# print(X.shape, y.shape)
# Find threshold using ROC
clf = LogisticRegression()
clf.fit(X.reshape(-1, 1), y)
probs = clf.predict_proba(X.reshape(-1, 1))
fpr, tpr, thresholds = roc_curve(y, probs[:, 1])
plt.figure(1)
plt.plot(fpr, tpr, label='Attack 1')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve')
plt.savefig('sateduser_attack1_roc_curve.png')
if __name__ == '__main__':
num_users = 300
save_probs = False
rerun = True
print("Getting target ranks...")
get_target_ranks(num_users=num_users, save_probs=save_probs)
print("Running average rank thresholding attack...")
run_average_rank_thresholding(num_users=num_users, rerun=True)
| 35.672535
| 113
| 0.660251
|
8d9d264830cab7159205ed06b41898abec3b84f4
| 2,685
|
py
|
Python
|
app/recipe/tests/test_tags_api.py
|
MohamedAbdelmagid/django-recipe-api
|
229d3a7cff483b3cad76c70aefde6a51250b9bc8
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
MohamedAbdelmagid/django-recipe-api
|
229d3a7cff483b3cad76c70aefde6a51250b9bc8
|
[
"MIT"
] | null | null | null |
app/recipe/tests/test_tags_api.py
|
MohamedAbdelmagid/django-recipe-api
|
229d3a7cff483b3cad76c70aefde6a51250b9bc8
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth import get_user_model
from rest_framework.test import APIClient
from rest_framework import status
from core.models import Tag
from recipe.serializers import TagSerializer
TAGS_URL = reverse("recipe:tag-list")
| 32.349398
| 85
| 0.672998
|
8d9d7d5c7ee0f28e0c8877291fb904e2d8ace2db
| 5,736
|
py
|
Python
|
dtlpy/entities/annotation_definitions/cube_3d.py
|
dataloop-ai/dtlpy
|
2c73831da54686e047ab6aefd8f12a8e53ea97c2
|
[
"Apache-2.0"
] | 10
|
2020-05-21T06:25:35.000Z
|
2022-01-07T20:34:03.000Z
|
dtlpy/entities/annotation_definitions/cube_3d.py
|
dataloop-ai/dtlpy
|
2c73831da54686e047ab6aefd8f12a8e53ea97c2
|
[
"Apache-2.0"
] | 22
|
2019-11-17T17:25:16.000Z
|
2022-03-10T15:14:28.000Z
|
dtlpy/entities/annotation_definitions/cube_3d.py
|
dataloop-ai/dtlpy
|
2c73831da54686e047ab6aefd8f12a8e53ea97c2
|
[
"Apache-2.0"
] | 8
|
2020-03-05T16:23:55.000Z
|
2021-12-27T11:10:42.000Z
|
import numpy as np
# import open3d as o3d
from . import BaseAnnotationDefinition
# from scipy.spatial.transform import Rotation as R
import logging
logger = logging.getLogger(name=__name__)
| 38.496644
| 135
| 0.544107
|
8d9e1079bef17b6514de9131ede3ab7099ea53a4
| 3,702
|
py
|
Python
|
my_module/tools.py
|
roki18d/sphinx_autogen-apidoc
|
67ad9c716c909d89bcd813a5fa871df8850e4fd5
|
[
"Apache-2.0"
] | null | null | null |
my_module/tools.py
|
roki18d/sphinx_autogen-apidoc
|
67ad9c716c909d89bcd813a5fa871df8850e4fd5
|
[
"Apache-2.0"
] | null | null | null |
my_module/tools.py
|
roki18d/sphinx_autogen-apidoc
|
67ad9c716c909d89bcd813a5fa871df8850e4fd5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
from my_module.exceptions import InvalidArgumentsError
if __name__ == "__main__":
my_adder = SimpleCalculator(operator="add")
print('Case01:', my_adder.execute(4, 2))
print('Case02:', my_adder.execute(5, "a"))
my_subtractor = SimpleCalculator(operator="sub")
print('Case03:', my_subtractor.execute(3, 5))
my_multiplier = SimpleCalculator(operator="mul")
print('Case04:', my_multiplier.execute(2, 7))
my_divider = SimpleCalculator(operator="div")
print('Case05:', my_divider.execute(17, 5))
print('Case06:', my_divider.execute(6, 0))
print('Case07:')
my_unknown = SimpleCalculator(operator="unknown")
import sys; sys.exit(0)
| 30.85
| 92
| 0.562939
|
8da38969800ff2540723920b2ba94670badb3561
| 12,114
|
py
|
Python
|
PCA_ResNet50.py
|
liuyingbin19222/HSI_svm_pca_resNet50
|
cd95d21c81e93f8b873183f10f52416f71a93d07
|
[
"Apache-2.0"
] | 12
|
2020-03-13T02:39:53.000Z
|
2022-02-21T03:28:33.000Z
|
PCA_ResNet50.py
|
liuyingbin19222/HSI_svm_pca_resNet50
|
cd95d21c81e93f8b873183f10f52416f71a93d07
|
[
"Apache-2.0"
] | 14
|
2020-02-17T12:31:08.000Z
|
2022-02-10T01:07:05.000Z
|
PCA_ResNet50.py
|
liuyingbin19222/HSI_svm_pca_resNet50
|
cd95d21c81e93f8b873183f10f52416f71a93d07
|
[
"Apache-2.0"
] | 3
|
2020-09-06T08:19:15.000Z
|
2021-03-08T10:15:40.000Z
|
import keras
from keras.layers import Conv2D, Conv3D, Flatten, Dense, Reshape, BatchNormalization
from keras.layers import Dropout, Input
from keras.models import Model
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint
from keras.utils import np_utils
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score
from operator import truediv
from plotly.offline import init_notebook_mode
import numpy as np
import tensorflow as tf
from keras import layers
from keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from keras.initializers import glorot_uniform
import pydot
from IPython.display import SVG
import scipy.misc
from matplotlib.pyplot import imshow
import keras.backend as K
K.set_image_data_format('channels_last')
K.set_learning_phase(1)
from keras.utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import os
import spectral
## GLOBAL VARIABLES
dataset = 'IP'
test_ratio = 0.8
windowSize = 25
#
X, y = loadData(dataset)
K = 30 if dataset == 'IP' else 15
X,pca = applyPCA(X,numComponents=K)
X, y = createImageCubes(X, y, windowSize=windowSize)
##
Xtrain, Xtest, ytrain, ytest = splitTrainTestSet(X, y, test_ratio)
# print("Xtrain.shape:",Xtrain.shape)
# print("ytrain.shape:",ytrain.shape)
# print("ytrain:",ytrain)
ytrain = convert_one_hot(ytrain,16)
ytest = convert_one_hot(ytest,16)
# print("ytrain.shape:",ytrain.shape)
# ResNet50 ;
def identity_block(X, f, filters, stage, block):
"""
3
X - tensor( m, n_H_prev, n_W_prev, n_H_prev )
f - CONV
filters -
stage - block
block - stage
X - tensor(n_H, n_W, n_C)
"""
#
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
#
F1, F2, F3 = filters
#
X_shortcut = X
#
##
X = Conv2D(filters=F1, kernel_size=(1,1), strides=(1,1) ,padding="valid",
name=conv_name_base+"2a", kernel_initializer=glorot_uniform(seed=0))(X)
##
X = BatchNormalization(axis=3,name=bn_name_base+"2a")(X)
##ReLU
X = Activation("relu")(X)
#
##
X = Conv2D(filters=F2, kernel_size=(f,f),strides=(1,1), padding="same",
name=conv_name_base+"2b", kernel_initializer=glorot_uniform(seed=0))(X)
##
X = BatchNormalization(axis=3,name=bn_name_base+"2b")(X)
##ReLU
X = Activation("relu")(X)
#
##
X = Conv2D(filters=F3, kernel_size=(1,1), strides=(1,1), padding="valid",
name=conv_name_base+"2c", kernel_initializer=glorot_uniform(seed=0))(X)
##
X = BatchNormalization(axis=3,name=bn_name_base+"2c")(X)
##ReLU
#
##
X = Add()([X,X_shortcut])
##ReLU
X = Activation("relu")(X)
return X
def convolutional_block(X, f, filters, stage, block, s=2):
"""
5
X - tensor( m, n_H_prev, n_W_prev, n_C_prev)
f - CONV
filters -
stage - block
block - stage
s -
X - tensor(n_H, n_W, n_C)
"""
#
conv_name_base = "res" + str(stage) + block + "_branch"
bn_name_base = "bn" + str(stage) + block + "_branch"
#
F1, F2, F3 = filters
#
X_shortcut = X
#
##
X = Conv2D(filters=F1, kernel_size=(1,1), strides=(s,s), padding="valid",
name=conv_name_base+"2a", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+"2a")(X)
X = Activation("relu")(X)
##
X = Conv2D(filters=F2, kernel_size=(f,f), strides=(1,1), padding="same",
name=conv_name_base+"2b", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+"2b")(X)
X = Activation("relu")(X)
##
X = Conv2D(filters=F3, kernel_size=(1,1), strides=(1,1), padding="valid",
name=conv_name_base+"2c", kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3,name=bn_name_base+"2c")(X)
#
X_shortcut = Conv2D(filters=F3, kernel_size=(1,1), strides=(s,s), padding="valid",
name=conv_name_base+"1", kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
X_shortcut = BatchNormalization(axis=3,name=bn_name_base+"1")(X_shortcut)
#
X = Add()([X,X_shortcut])
X = Activation("relu")(X)
return X
def ResNet50(input_shape=(25,25,30),classes=16):
"""
ResNet50
CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3
-> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> TOPLAYER
input_shape -
classes -
model - Keras
"""
#tensor
X_input = Input(input_shape)
#0
X = ZeroPadding2D((3,3))(X_input)
#stage1
X = Conv2D(filters=64, kernel_size=(7,7), strides=(2,2), name="conv1",
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(axis=3, name="bn_conv1")(X)
X = Activation("relu")(X)
X = MaxPooling2D(pool_size=(3,3), strides=(2,2))(X)
#stage2
X = convolutional_block(X, f=3, filters=[64,64,256], stage=2, block="a", s=1)
X = identity_block(X, f=3, filters=[64,64,256], stage=2, block="b")
X = identity_block(X, f=3, filters=[64,64,256], stage=2, block="c")
#stage3
X = convolutional_block(X, f=3, filters=[128,128,512], stage=3, block="a", s=2)
X = identity_block(X, f=3, filters=[128,128,512], stage=3, block="b")
X = identity_block(X, f=3, filters=[128,128,512], stage=3, block="c")
X = identity_block(X, f=3, filters=[128,128,512], stage=3, block="d")
#stage4
X = convolutional_block(X, f=3, filters=[256,256,1024], stage=4, block="a", s=2)
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="b")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="c")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="d")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="e")
X = identity_block(X, f=3, filters=[256,256,1024], stage=4, block="f")
#stage5
X = convolutional_block(X, f=3, filters=[512,512,2048], stage=5, block="a", s=2)
X = identity_block(X, f=3, filters=[512,512,2048], stage=5, block="b")
X = identity_block(X, f=3, filters=[512,512,2048], stage=5, block="c")
#
X = AveragePooling2D(pool_size=(2,2),padding="same")(X)
#
X = Flatten()(X)
X = Dense(classes, activation="softmax", name="fc"+str(classes),
kernel_initializer=glorot_uniform(seed=0))(X)
#
model = Model(inputs=X_input, outputs=X, name="ResNet50")
return model
# # x_train : (3074,25,25,30) y_train: (3074)
# model = ResNet50(input_shape=(25,25,30),classes=16)
# model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
#
#
# model.fit(Xtrain,ytrain,epochs=2,batch_size=25)
# preds = model.evaluate(Xtest,ytest)
#
# print(":",str(preds[0]))
# print(":",str(preds[1]))
if __name__ == "__main__":
main()
| 34.123944
| 159
| 0.630097
|
8da4e24daba79cfc5a237fbfd0bd61228b6bdc1d
| 754
|
py
|
Python
|
tests/test_data/utest/setup.py
|
gordonmessmer/pyp2rpm
|
60145ba6fa49ad5bb29eeffa5765e10ba8417f03
|
[
"MIT"
] | 114
|
2015-07-13T12:38:27.000Z
|
2022-03-23T15:05:11.000Z
|
tests/test_data/utest/setup.py
|
gordonmessmer/pyp2rpm
|
60145ba6fa49ad5bb29eeffa5765e10ba8417f03
|
[
"MIT"
] | 426
|
2015-07-13T12:09:38.000Z
|
2022-01-07T16:41:32.000Z
|
tests/test_data/utest/setup.py
|
Mattlk13/pyp2rpm
|
f9ced95877d88c96b77b2b8c510dc4ceaa10504a
|
[
"MIT"
] | 51
|
2015-07-14T13:11:29.000Z
|
2022-03-31T07:27:32.000Z
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
requirements = ["pyp2rpm~=3.3.1"]
setup(
name="utest",
version="0.1.0",
description="Micro test module",
license="GPLv2+",
author="pyp2rpm Developers",
author_email='bkabrda@redhat.com, rkuska@redhat.com, mcyprian@redhat.com, ishcherb@redhat.com',
url='https://github.com/fedora-python/pyp2rpm',
install_requires=requirements,
include_package_data=True,
packages=find_packages(exclude=["test"]),
classifiers=(
"License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
),
)
| 30.16
| 99
| 0.66313
|
8da621c7d046b3bbba97fe0075833d24a4276a49
| 4,235
|
py
|
Python
|
abstract_nas/train/preprocess.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
abstract_nas/train/preprocess.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
abstract_nas/train/preprocess.py
|
dumpmemory/google-research
|
bc87d010ab9086b6e92c3f075410fa6e1f27251b
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data preprocessing for ImageNet2012 and CIFAR-10."""
from typing import Any, Callable
# pylint: disable=unused-import
from big_vision.pp import ops_general
from big_vision.pp import ops_image
# pylint: enable=unused-import
from big_vision.pp import utils
from big_vision.pp.builder import get_preprocess_fn as _get_preprocess_fn
from big_vision.pp.registry import Registry
import tensorflow as tf
CIFAR_MEAN = [0.4914, 0.4822, 0.4465]
CIFAR_STD = [0.247, 0.243, 0.261]
def preprocess_cifar(split, **_):
"""Preprocessing functions for CIFAR-10 training."""
mean_str = ",".join([str(m) for m in CIFAR_MEAN])
std_str = ",".join([str(m) for m in CIFAR_STD])
if split == "train":
pp = ("decode|"
"value_range(0,1)|"
"random_crop_with_pad(32,4)|"
"flip_lr|"
f"vgg_value_range(({mean_str}),({std_str}))|"
"onehot(10, key='label', key_result='labels')|"
"keep('image', 'labels')")
else:
pp = ("decode|"
"value_range(0,1)|"
"central_crop(32)|"
f"vgg_value_range(({mean_str}),({std_str}))|"
"onehot(10, key='label', key_result='labels')|"
"keep('image', 'labels')")
return _get_preprocess_fn(pp)
def preprocess_imagenet(split,
autoaugment = False,
label_smoothing = 0.0,
**_):
"""Preprocessing functions for ImageNet training."""
if split == "train":
pp = ("decode_jpeg_and_inception_crop(224)|"
"flip_lr|")
if autoaugment:
pp += "randaug(2,10)|"
pp += "value_range(-1,1)|"
if label_smoothing:
confidence = 1.0 - label_smoothing
low_confidence = (1.0 - confidence) / (1000 - 1)
pp += ("onehot(1000, key='label', key_result='labels', "
f"on_value={confidence}, off_value={low_confidence})|")
else:
pp += "onehot(1000, key='label', key_result='labels')|"
pp += "keep('image', 'labels')"
else:
pp = ("decode|"
"resize_small(256)|"
"central_crop(224)|"
"value_range(-1,1)|"
"onehot(1000, key='label', key_result='labels')|"
"keep('image', 'labels')")
return _get_preprocess_fn(pp)
PREPROCESS = {
"cifar10": preprocess_cifar,
"imagenet2012": preprocess_imagenet,
}
def get_preprocess_fn(dataset, split,
**preprocess_kwargs):
"""Makes a preprocessing function."""
preprocess_fn_by_split = PREPROCESS.get(dataset, lambda _: (lambda x: x))
split = "train" if "train" in split else "val"
preprocess_fn = preprocess_fn_by_split(split, **preprocess_kwargs)
return preprocess_fn
| 32.576923
| 79
| 0.633058
|
8da6f40241c238cd5d1aecce8bbe81273d1e484a
| 5,570
|
py
|
Python
|
Decission_Tree/mytree.py
|
luoshao23/ML_algorithm
|
6e94fdd0718cd892118fd036c7c5851cf3e6d796
|
[
"MIT"
] | 4
|
2017-06-19T06:33:38.000Z
|
2019-01-31T12:07:12.000Z
|
Decission_Tree/mytree.py
|
luoshao23/ML_algorithm
|
6e94fdd0718cd892118fd036c7c5851cf3e6d796
|
[
"MIT"
] | null | null | null |
Decission_Tree/mytree.py
|
luoshao23/ML_algorithm
|
6e94fdd0718cd892118fd036c7c5851cf3e6d796
|
[
"MIT"
] | 1
|
2017-12-06T08:41:06.000Z
|
2017-12-06T08:41:06.000Z
|
from math import log
from PIL import Image, ImageDraw
from collections import Counter
import numpy as np
from pandas import DataFrame
# my_data = [['slashdot', 'USA', 'yes', 18, 213.2, 'None'],
# ['google', 'France', 'yes', 23, 121.2, 'Premium'],
# ['digg', 'USA', 'yes', 24, 21.32, 'Basic'],
# ['kiwitobes', 'France', 'yes', 23, 1.2, 'Basic'],
# ['google', 'UK', 'no', 21, .2, 'Premium'],
# ['(direct)', 'New Zealand', 'no', 12, 71.2, 'None'],
# ['(direct)', 'UK', 'no', 21, -21.2, 'Basic'],
# ['google', 'USA', 'no', 24, 241.2, 'Premium'],
# ['slashdot', 'France', 'yes', 19, 20, 'None'],
# ['digg', 'USA', 'no', 18, 1.0, 'None'],
# ['google', 'UK', 'no', 18, 2, 'None'],
# ['kiwitobes', 'UK', 'no', 19, 44, 'None'],
# ['digg', 'New Zealand', 'yes', 12, 27, 'Basic'],
# ['slashdot', 'UK', 'no', 21, 86, 'None'],
# ['google', 'UK', 'yes', 18, 2, 'Basic'],
# ['kiwitobes', 'France', 'yes', 19, 0.0, 'Basic']]
my_data = [[213.2, 'None'],
[121.2, 'Premium'],
[21.32, 'Basic'],
[1.2, 'Basic'],
[.2, 'Premium'],
[71.2, 'None'],
[-21.2, 'Basic'],
[241.2, 'Premium'],
[20, 'None'],
[1.0, 'None'],
[2, 'None'],
[44, 'None'],
[27, 'Basic'],
[86, 'None'],
[2, 'Basic'],
[0.0, 'Basic']]
data = np.array(DataFrame(my_data))
# my_data = [['slashdot', 'USA', 'yes', 18, 'None'],
# ['google', 'France', 'yes', 23, 'None'],
# ['digg', 'USA', 'yes', 24, 'None'],
# ['kiwitobes', 'France', 'yes', 23, 'None'],
# ['google', 'UK', 'no', 21, 'None'],
# ['(direct)', 'New Zealand', 'no', 12, 'None'],
# ['(direct)', 'UK', 'no', 21, 'None'],
# ['google', 'USA', 'no', 24, 'None'],
# ['slashdot', 'France', 'yes', 19, 'None'],
# ['digg', 'USA', 'no', 18, 'None'],
# ['google', 'UK', 'no', 18, 'None'],
# ['kiwitobes', 'UK', 'no', 19, 'None'],
# ['digg', 'New Zealand', 'yes', 12, 'None'],
# ['slashdot', 'UK', 'no', 21, 'None'],
# ['google', 'UK', 'yes', 18, 'None'],
# ['kiwitobes', 'France', 'yes', 19, 'None']]
| 31.828571
| 89
| 0.498025
|
8da70610f3402c8b44d3fbdf21a05f4f563b016b
| 488
|
py
|
Python
|
hidb/wrapper.py
|
sk-ip/hidb
|
1394000992c016607e7af15095f058cd9cce007b
|
[
"MIT"
] | null | null | null |
hidb/wrapper.py
|
sk-ip/hidb
|
1394000992c016607e7af15095f058cd9cce007b
|
[
"MIT"
] | null | null | null |
hidb/wrapper.py
|
sk-ip/hidb
|
1394000992c016607e7af15095f058cd9cce007b
|
[
"MIT"
] | null | null | null |
from datetime import datetime
| 24.4
| 53
| 0.622951
|
8da8f86888f2ee041a3f2312c9709ef180e420d0
| 4,504
|
py
|
Python
|
ion-channel-models/compare.py
|
sanmitraghosh/fickleheart-method-tutorials
|
d5ee910258a2656951201d4ada2a412804013bd5
|
[
"BSD-3-Clause"
] | null | null | null |
ion-channel-models/compare.py
|
sanmitraghosh/fickleheart-method-tutorials
|
d5ee910258a2656951201d4ada2a412804013bd5
|
[
"BSD-3-Clause"
] | null | null | null |
ion-channel-models/compare.py
|
sanmitraghosh/fickleheart-method-tutorials
|
d5ee910258a2656951201d4ada2a412804013bd5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
from __future__ import print_function
import sys
sys.path.append('./method')
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import model as m
"""
Run fit.
"""
predict_list = ['sinewave', 'staircase', 'activation', 'ap']
try:
which_predict = sys.argv[1]
except:
print('Usage: python %s [str:which_predict]' % os.path.basename(__file__))
sys.exit()
if which_predict not in predict_list:
raise ValueError('Input data %s is not available in the predict list' \
% which_predict)
# Get all input variables
import importlib
sys.path.append('./mmt-model-files')
info_id_a = 'model_A'
info_a = importlib.import_module(info_id_a)
info_id_b = 'model_B'
info_b = importlib.import_module(info_id_b)
data_dir = './data'
savedir = './fig/compare'
if not os.path.isdir(savedir):
os.makedirs(savedir)
data_file_name = 'data-%s.csv' % which_predict
print('Predicting ', data_file_name)
saveas = 'compare-sinewave-' + which_predict
# Protocol
protocol = np.loadtxt('./protocol-time-series/%s.csv' % which_predict,
skiprows=1, delimiter=',')
protocol_times = protocol[:, 0]
protocol = protocol[:, 1]
# Load data
data = np.loadtxt(data_dir + '/' + data_file_name,
delimiter=',', skiprows=1) # headers
times = data[:, 0]
data = data[:, 1]
# Model
model_a = m.Model(info_a.model_file,
variables=info_a.parameters,
current_readout=info_a.current_list,
set_ion=info_a.ions_conc,
transform=None,
temperature=273.15 + info_a.temperature, # K
)
model_b = m.Model(info_b.model_file,
variables=info_b.parameters,
current_readout=info_b.current_list,
set_ion=info_b.ions_conc,
transform=None,
temperature=273.15 + info_b.temperature, # K
)
# Update protocol
model_a.set_fixed_form_voltage_protocol(protocol, protocol_times)
model_b.set_fixed_form_voltage_protocol(protocol, protocol_times)
# Load calibrated parameters
load_seed = 542811797
fix_idx = [1]
calloaddir_a = './out/' + info_id_a
calloaddir_b = './out/' + info_id_b
cal_params_a = []
cal_params_b = []
for i in fix_idx:
cal_params_a.append(np.loadtxt('%s/%s-solution-%s-%s.txt' % \
(calloaddir_a, 'sinewave', load_seed, i)))
cal_params_b.append(np.loadtxt('%s/%s-solution-%s-%s.txt' % \
(calloaddir_b, 'sinewave', load_seed, i)))
# Predict
predictions_a = []
for p in cal_params_a:
predictions_a.append(model_a.simulate(p, times))
predictions_b = []
for p in cal_params_b:
predictions_b.append(model_b.simulate(p, times))
# Plot
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(10, 4),
gridspec_kw={'height_ratios': [1, 3]})
is_predict = ' prediction' if which_predict != 'sinewave' else ''
sim_protocol = model_a.voltage(times) # model_b should give the same thing
axes[0].plot(times, sim_protocol, c='#7f7f7f')
axes[0].set_ylabel('Voltage\n(mV)', fontsize=16)
axes[1].plot(times, data, alpha=0.5, label='Data')
for i, p in zip(fix_idx, predictions_a):
axes[1].plot(times, p, label='Model A' + is_predict)
for i, p in zip(fix_idx, predictions_b):
axes[1].plot(times, p, label='Model B' + is_predict)
# Zooms
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset
sys.path.append('./protocol-time-series')
zoom = importlib.import_module(which_predict + '_to_zoom')
axes[1].set_ylim(zoom.set_ylim)
for i_zoom, (w, h, loc) in enumerate(zoom.inset_setup):
axins = inset_axes(axes[1], width=w, height=h, loc=loc,
axes_kwargs={"facecolor" : "#f0f0f0"})
axins.plot(times, data, alpha=0.5)
for i, p in zip(fix_idx, predictions_a):
axins.plot(times, p)
for i, p in zip(fix_idx, predictions_b):
axins.plot(times, p)
axins.set_xlim(zoom.set_xlim_ins[i_zoom])
axins.set_ylim(zoom.set_ylim_ins[i_zoom])
#axins.yaxis.get_major_locator().set_params(nbins=3)
#axins.xaxis.get_major_locator().set_params(nbins=3)
axins.set_xticklabels([])
axins.set_yticklabels([])
pp, p1, p2 = mark_inset(axes[1], axins, loc1=zoom.mark_setup[i_zoom][0],
loc2=zoom.mark_setup[i_zoom][1], fc="none", lw=0.75, ec='k')
pp.set_fill(True); pp.set_facecolor("#f0f0f0")
axes[1].legend()
axes[1].set_ylabel('Current (pA)', fontsize=16)
axes[1].set_xlabel('Time (ms)', fontsize=16)
plt.subplots_adjust(hspace=0)
plt.savefig('%s/%s' % (savedir, saveas), bbox_inches='tight', dpi=200)
plt.close()
| 31.277778
| 78
| 0.690941
|
8da906c8ad76ecde7a1bd94e5017709b02a7ce8e
| 7,752
|
py
|
Python
|
examples/services/classifier_service.py
|
bbbdragon/python-pype
|
f0618150cb4d2fae1f959127453fb6eca8db84e5
|
[
"MIT"
] | 8
|
2019-07-12T03:28:10.000Z
|
2019-07-19T20:34:45.000Z
|
examples/services/classifier_service.py
|
bbbdragon/python-pype
|
f0618150cb4d2fae1f959127453fb6eca8db84e5
|
[
"MIT"
] | null | null | null |
examples/services/classifier_service.py
|
bbbdragon/python-pype
|
f0618150cb4d2fae1f959127453fb6eca8db84e5
|
[
"MIT"
] | null | null | null |
'''
python3 classifier_service.py data.csv
This service runs a scikit-learn classifier on data provided by the csv file data.csv.
The idea of this is a simple spam detector. In the file, you will see a number, 1 or
-1, followed by a pipe, followed by a piece of text. The text is designed to be a
subject email, and the number its label: 1 for spam and -1 for not spam.
The service loads the csv file, trains the classifier, and then waits for you to
send it a list of texts via the 'classify' route. This service can be tested using:
./test_classifier_service.sh
'''
from flask import Flask,request,jsonify
from pype import pype as p
from pype import _,_0,_1,_p
from pype import _assoc as _a
from pype import _dissoc as _d
from pype import _do
from statistics import mean,stdev
from pype.vals import lenf
from sklearn.ensemble import RandomForestClassifier as Classifier
from sklearn.feature_extraction.text import TfidfVectorizer as Vectorizer
import sys
import csv
'''
We have to use lambda to define the read function because pype functions can't yet
deal with keyword args.
'''
read=lambda f: csv.reader(f,delimiter='|')
def train_classifier(texts,y):
'''
Here is a perfect example of the "feel it ... func it" philosophy:
The pype call uses the function arguments and function body to specify
three variables, texts, a list of strings, y, a list of floats, and vectorizer,
a scikit-learn object that vectorizes text. This reiterates the adivce that you
should use the function body and function arguments to declare your scope,
whenever you can.
Line-by-line, here we go:
{'vectorizer':vectorizer.fit,
'X':vectorizer.transform},
We build a dict, the first element of which is the fit vectorizer. Luckily, the
'fit' function returns an instance of the trained vectorizer, so we do not need to
use _do. This vectorizer is then assigned to 'vectorizer'. Because iterating
through dictionaries in Python3.6 preserves the order of the keys in which they
were declared, we can apply the fit function to the vectorizer on the texts,
assign that to the 'vectorizer' key. We need this instance of the vectorizer to
run the classifier for unknown texts.
After this, we apply the 'transform' to convert the texts into a training matrix
keyed by 'X', whose rows are texts and whose columns are words.
_a('classifier',(Classifier().fit,_['X'],y)),
Finally, we can build a classifier. _a, or _assoc, means we are adding a
key-value pair to the previous dictionary. This will be a new instance of our
Classifier, which is trained through the fit function on the text-word matrix 'X'
and the labels vector y.
_d('X'),
Since we don't need the X matrix anymore, we delete it from the returned JSON,
which now only contains 'vectorizer' and 'classifier', the two things we will
need to classify unknown texts.
'''
vectorizer=Vectorizer()
return p( texts,
{'vectorizer':vectorizer.fit,
'X':vectorizer.transform},
_a('classifier',(Classifier().fit,_['X'],y)),
_d('X'),
)
'''
We train the model in a global variable containing our vectorizer and classifier.
This use of global variables is only used for microservices, by the way.
Here is a line-by-line description:
sys.argv[1],
open,
Open the file.
read,
We build a csv reader with the above-defined 'read' function, which builds a csv reader
with a '|' delimiter. I chose this delimeter because the texts often have commas.
list,
Because csv.reader is a generator, it cannot be accessed twice, so I cast it to a list. This list is a list of 2-element lists, of the form [label,text], where label is a
string for the label ('1' or '-1'), and text is a string for the training text. So an
example of this would be ['1','free herbal viagra buy now'].
(train,[_1],[(float,[_0])])
This is a lambda which calls the 'train' function on two arguments, the first being
a list of texts, the second being a list of numerical labels.
We know that the incoming argument is a list of 2-element lists, so [_1] is a map,
which goes through this list - [] - and builds a new list containing only the second
element of each 2-element list, referenced by _1.
With the first elements of the 2-element lists, we must extract the first element and
cast it to a float. In [(float,[_0])], the [] specifies a map over the list of
2-element lists. (float,_0) specifies we are accessing the first element of the
2-element list ('1' or '-1'), and calls the float function on it, to cast it to a
float. If we do not cast it to a float, sklearn will not be able to process it as
a label.
'''
MODEL=p( sys.argv[1],
open,
read,
list,
(train_classifier,[_1],[(float,_0)]),
)
app = Flask(__name__)
if __name__=='__main__':
app.run(host='0.0.0.0',port=10004,debug=True)
| 36.739336
| 172
| 0.68434
|
8da9192128d87d058ba7b763d377c653bfe2eb10
| 2,657
|
py
|
Python
|
ida_plugin/uefi_analyser.py
|
fengjixuchui/UEFI_RETool
|
72c5d54c1dab9f58a48294196bca5ce957f6fb24
|
[
"MIT"
] | 240
|
2019-03-12T21:28:06.000Z
|
2021-02-09T16:20:09.000Z
|
ida_plugin/uefi_analyser.py
|
fengjixuchui/UEFI_RETool
|
72c5d54c1dab9f58a48294196bca5ce957f6fb24
|
[
"MIT"
] | 10
|
2019-09-09T08:38:35.000Z
|
2020-11-30T15:19:30.000Z
|
ida_plugin/uefi_analyser.py
|
fengjixuchui/UEFI_RETool
|
72c5d54c1dab9f58a48294196bca5ce957f6fb24
|
[
"MIT"
] | 53
|
2019-03-16T06:54:18.000Z
|
2020-12-23T06:16:38.000Z
|
# SPDX-License-Identifier: MIT
import os
import idaapi
import idautils
from PyQt5 import QtWidgets
from uefi_analyser import dep_browser, dep_graph, prot_explorer, ui
AUTHOR = "yeggor"
VERSION = "1.2.0"
NAME = "UEFI_RETool"
WANTED_HOTKEY = "Ctrl+Alt+U"
HELP = "This plugin performs automatic analysis of the input UEFI module"
def PLUGIN_ENTRY():
try:
return UefiAnalyserPlugin()
except Exception as err:
import traceback
print(f"[{NAME} error] {str(err)}\n{traceback.format_exc()}")
| 26.04902
| 73
| 0.616861
|