blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8a10050f70298d563c999770fddf230090176a46
|
7fd1d9e5ab08a2623d8b57379ad3bcac99723c32
|
/pretty_print.py
|
365f3d512f07f0ca9724a8d85d9b49b3dd3e35d8
|
[] |
no_license
|
siderealsasquatch/udacity-bikeshare
|
4bf78089d5b87f164aacd59bbea4e4765829d20d
|
0d360b5ab363eaebff0e34583d842d052df5a051
|
refs/heads/master
| 2021-04-03T08:24:02.301492 | 2018-03-12T08:22:16 | 2018-03-12T08:22:16 | 124,857,263 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,684 |
py
|
#
# pretty_print.py - Contains the PrettyPrint class that takes the stats from
# DataStats objects and displays them in a pleasing manner.
#
import datetime as dt
class PrettyPrint:
'''
A class that handles displaying the stats from a DataStats object.
'''
def __init__(self):
'''
Initialize internal variables that keep track of filter options.
'''
self._city_name = None
self._filter_mode = None
self._filter_by = None
def _fancy_header_main(self, header_strings):
'''
Helper method to create fancy borders for the main header.
'''
longest_string = max([len(string) for string in header_strings])
header_strings_new = header_strings
if len(header_strings) == 1:
header_strings_new.append('Unfiltered')
header_strings_new.append(' ')
num_strings = len(header_strings_new)
for i, string in enumerate(header_strings_new):
if i == 0:
print('#' * (longest_string + 4))
print('#{s:^{fill}}#'.format(s=string,
fill=longest_string+2))
elif i == (num_strings-1):
print(('#' * (longest_string + 4)) + '\n')
else:
if i == 1:
print('#{s:-^{fill}}#'.format(s='-',
fill=longest_string+2))
print('#{s:^{fill}}#'.format(s=string,
fill=longest_string+2))
def _fancy_header_stat_group(self, stat_group):
'''
Helper method to create nice headers for each group of statistics.
'''
header_len = len(stat_group)
border_edge = '=' * header_len
print(stat_group)
print(border_edge)
def _print_stats_from_dict(self, stat_dict):
'''
Helper method to print out the statistics for every method except the
trip duration stats.
'''
non_none_dict = {k: v for k, v in stat_dict.items() if v is not None}
non_none_dict_len = len(non_none_dict)
for i, stats in enumerate(non_none_dict.items()):
time, stat = stats
if i == 0:
if non_none_dict_len == 1:
print("\n{}: {}\n".format(time, stat))
else:
print("\n{}: {}".format(time, stat), end=' | ')
elif i == (non_none_dict_len - 1):
print("{}: {}\n".format(time, stat))
else:
print("{}: {}".format(time, stat), end=' | ')
def main_header(self):
'''
Print a header displaying the current filter options: city, filter mode,
and the month or day (or both) depending on the filter mode.
'''
str_title = 'Statistics for {}'.format(self._city_name)
all_filter_str = [str_title]
if self._filter_mode:
str_filter_header = 'Filtered by'
str_filter_comp = []
if self._filter_mode == 'd':
month, day = self._filter_by
str_filter_comp.append('Month: {}'.format(month))
str_filter_comp.append('Day: {}'.format(day))
else:
str_filter_comp.append('Month: {}'.format(self._filter_by))
all_filter_str = [str_title, str_filter_header, *str_filter_comp]
self._fancy_header_main(all_filter_str)
def get_filter_options(self, city_name, filter_mode=None, filter_by=None):
'''
Get the current filter options and assign them to the proper internal
variables.
Parameters
city_name: Name of city as a string. Should match the names of one
of the cities stored in a DataStats object.
filter_mode: 'm' for month', 'd' for day, and None to forgo
filtering.
filter_by: Name of the month as a string for filter mode 'm', a list
containing the name of the month and day of week for
filter mode 'd'. None if data was not filtered.
'''
self._city_name = city_name
self._filter_mode = filter_mode
self._filter_by = filter_by
def show_start_time_stats(self, start_time_stats=None):
'''
Display the start time statistics using the current filter options.
Parameters
start_time_stats: Dictionary containing the statistics pertaining to
start times.
'''
header = 'Popular Month, Day, and Hour for Start Time'
if start_time_stats:
# Convert 'Hour' int to string
start_time_stats_time_format = start_time_stats
start_time_stats_time_format['Hour'] = '{}:00'.format(
start_time_stats_time_format['Hour'])
# Remove parts of the header string depending on how the data was
# filtered
if not start_time_stats_time_format['Month']:
header = header.replace(',', '').replace(' Month', '')
if not start_time_stats_time_format['Weekday']:
header = header.replace(' Day and', '')
# Print new header string
self._fancy_header_stat_group(header)
self._print_stats_from_dict(start_time_stats_time_format)
else:
self._fancy_header_stat_group(header)
print("\nThere was no data for these particular statistics.\n")
def show_stations_stats(self, stations_stats=None):
'''
Display the popular start and end stations for the current filter
options.
Parameters
stations_stats: Dictionary contating statistics pertaining to
start and end stations.
'''
header = 'Popular Start and End Stations'
self._fancy_header_stat_group(header)
if stations_stats:
self._print_stats_from_dict(stations_stats)
else:
print("\nThere was no data for these particular statistics.\n")
def show_trip_stats(self, trip_stats=None):
'''
Display the most popular trip for the current filter options.
Parameters
trip_stats: Dictionary containing statistics pertaining to full
trips.
'''
header = 'Most Popular Trip'
self._fancy_header_stat_group(header)
if trip_stats:
self._print_stats_from_dict(trip_stats)
else:
print("\nThere was no data for these particular statistics.\n")
def show_trip_duration_stats(self, trip_duration_stats=None):
'''
Display the total and average trip duration for the current filter
options.
Parameters:
trip_duration_stats: Dictionary containing statistics pertaining to
trip duration.
'''
header = 'Total and Average Trip Duration'
self._fancy_header_stat_group(header)
if trip_duration_stats:
trip_dur_len = len(trip_duration_stats)
for i, trip_dat in enumerate(trip_duration_stats.items()):
dur_type, dur_dat_dict = trip_dat
dur_string = "{}\t:: ".format(dur_type)
if i == 0:
dur_string = "\n" + dur_string
dur_dat_dict_len = len(dur_dat_dict)
for j, dur_dat in enumerate(dur_dat_dict.items()):
time_category, time = dur_dat
if j == (dur_dat_dict_len - 1):
dur_string += "{}: {}".format(time_category, time)
else:
dur_string += "{}: {}, ".format(time_category, time)
if i == (trip_dur_len - 1):
dur_string = dur_string + '\n'
print(dur_string)
else:
print("\nThere was no data for these particular statistics.\n")
def show_user_count_stats(self, user_count_stats=None):
'''
Display totals for each user type for the current filter options.
Parameters
user_count_stats: Dictionary containing totals for each user type.
'''
header = 'Counts of each User Type'
self._fancy_header_stat_group(header)
if user_count_stats:
self._print_stats_from_dict(user_count_stats)
else:
print("\nThere was no data for these particular statistics.\n")
def show_gender_count_stats(self, gender_count_stats=None):
'''
Display totals for each gender for the current filter options.
Parameters
gender_count_stats: Dictionary containing totals for each gender.
'''
header = 'Counts of each Gender'
self._fancy_header_stat_group(header)
if gender_count_stats:
self._print_stats_from_dict(gender_count_stats)
else:
print("\nThere was no data for these particular statistics.\n")
def show_birth_year_stats(self, birth_year_stats=None):
'''
Display latest, earliest, and most popular birth years for the current
filter options.
Parameters
birth_year_stats: Dictionary containing statistics related to birth
years.
'''
header = 'Latest, Earliest, and most Popular Birth Years'
self._fancy_header_stat_group(header)
if birth_year_stats:
self._print_stats_from_dict(birth_year_stats)
else:
print("\nThere was no data for these particular statistics.\n")
|
[
"fnugraha@gmail.com"
] |
fnugraha@gmail.com
|
954e1a81cae9daf62bf9cb9cf0f83299c3e8a038
|
8b942cbd6a0da0a61f68c468956ba318c7f1603d
|
/dynamic_programming/0053_maximum_subarray.py
|
4ed3786dd1ebedf430bdbe2dfaceed01c1a79c9e
|
[
"MIT"
] |
permissive
|
MartinMa28/Algorithms_review
|
080bd608b0e0c6f39c45f28402e5181791af4766
|
3f2297038c00f5a560941360ca702e6868530f34
|
refs/heads/master
| 2022-04-13T03:56:56.932788 | 2020-04-06T03:41:33 | 2020-04-06T03:41:33 | 203,349,402 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 498 |
py
|
class Solution:
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Each slot means the sum of the max-subarray that ends at this index
dp = [float('-inf')] * len(nums)
"""
dp[i] = max((nums[i], dp[i - 1] + nums[i]))
"""
dp[0] = nums[0]
for i in range(1, len(nums)):
dp[i] = max((nums[i], dp[i - 1] + nums[i]))
return max(dp)
|
[
"1135485226@qq.com"
] |
1135485226@qq.com
|
591a8001dd6fb6074150b61ebb6e5a9e83f44891
|
aee769f44a00d08a885ae7487191e6ad825a5b82
|
/lstm_py/mtop1.py
|
9e834fa9607d436671d68258169bb48c923bee0c
|
[] |
no_license
|
mspr666/Human-Action-Recognition-from-Skeleton-Data
|
efaccb58e0856c2de71486ae78db2db385bb5b6d
|
0a2594accd9b3d4d47ea99222a41d7cc5ace9841
|
refs/heads/master
| 2022-01-17T00:54:24.224505 | 2019-05-14T13:43:31 | 2019-05-14T13:43:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,425 |
py
|
import numpy as np
import scipy.io
from PIL import Image
import cv2
import os,os.path,shutil
import re
from scipy.interpolate import interp1d
##save file size(3,60,25)
timestep_size=60
def find_martrix_min_value(data_matrix):
'''
功能:找到矩阵最小值
'''
new_data=[]
for i in range(len(data_matrix)):
new_data.append(min(data_matrix[i]))
print ('data_matrix 最小值为:', min(new_data))
return min(new_data)
def find_martrix_max_value(data_matrix):
'''
功能:找到矩阵最大值
'''
new_data=[]
for i in range(len(data_matrix)):
new_data.append(max(data_matrix[i]))
print ('data_matrix 最大值为:', max(new_data))
return max(new_data)
#get whole joints place
#transfor .mat(all joints) into wanted point and reference(.npz)
def mtop( filename,savepath):
point= scipy.io.loadmat(filename) # 读取mat文件
#point=np.load("whole1.npz")
wx=point['x']##whole joints point
wy=point['y']
wz=point['z']
w=np.vstack((wx,wy,wz)).reshape(3,-1,25) #left arm, right arm,torso, left leg, right leg
center=w[:,:,0]
center=center.repeat(25)
center=center.reshape(3,-1,25)
#print(center)
w=w-center
if w.shape[1]>60 :
file_new=filename[filename.find('S'):filename.find('.mat')]
#print(file_new)
np.save(savepath+file_new,w)
def eachFile(folder):
allFile = os.listdir(folder)
fileNames = []
for file in allFile:
fullPath = os.path.join(folder, file)
fileNames.append(fullPath)
return fileNames
# main part
for i in range(60,61):
srcFolder='./mat_f/'+str(i)
savepath='./CV_40/'
fileNames =eachFile(srcFolder)
for fileName in fileNames:
print(fileName)
#print(int(fileName.find('C')))
if(int(fileName[fileName.find('C')+1:fileName.find('C')+4])==1):
savepath='./CV_40/test/'
else:
savepath='./CV_40/train/'
mtop(fileName,savepath)
srcFolder='./CV/train'
fileNames =eachFile(srcFolder)
trainpath='./CS/train/'
testpath='./CS/test/'
for fileName in fileNames:
subject=int(fileName[fileName.find('S')+1:fileName.find('S')+4])
a=[1, 2, 4, 5, 8, 9, 13, 14, 15,
16, 17, 18, 19, 25, 27, 28, 31, 34, 35, 38]
if subject in a:
newname=trainpath+fileName[fileName.find('S'):]
else:
newname=testpath+fileName[fileName.find('S'):]
shutil.copyfile(fileName,newname)
|
[
"noreply@github.com"
] |
mspr666.noreply@github.com
|
14d21c43b94c2a3d1c3ef51fa6289ed09bb9e429
|
d59435e4cbf38a2f3ea1bd09cba5e945a347ce05
|
/quizserv.py
|
d99ae3844bd4df427602e9e65d9997c36a39ed1c
|
[] |
no_license
|
akshatj427/Python_codes
|
c943a9d42bae7e43707dd67b7c93943fa3385c3f
|
5abbd193532e90f64ddbdcff1191b35879aa1b82
|
refs/heads/master
| 2021-01-09T20:37:52.406567 | 2016-07-20T18:50:54 | 2016-07-20T18:50:54 | 63,805,253 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 623 |
py
|
import socket, pickle
HOST = 'localhost'
PORT = 5006
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(5)
conn, addr = s.accept()
print('Connected by', addr)
work = []
while 1:
data1 = conn.recv(2048)
print("receiving")
data2 = conn.recv(2048)
print("receiving")
data11 = pickle.loads(data1) # decode
data21 = pickle.loads(data2)
print(data11)
print(data21)
for x in range(4):
work.append(data11[x])
work.append(data21[x])
print("sending:", work)
datase = pickle.dumps(work) # encode
conn.send(datase)
conn.close()
|
[
"akshatjain427@gmail.com"
] |
akshatjain427@gmail.com
|
a399301c523887d5bcc02002c2d2c1ac09e638a1
|
07cf86733b110a13224ef91e94ea5862a8f5d0d5
|
/permutations/permutations.py
|
9adc376b32f889d512681c06e31fc88b05902f97
|
[] |
no_license
|
karsevar/Code_Challenge_Practice
|
2d96964ed2601b3beb324d08dd3692c3d566b223
|
88d4587041a76cfd539c0698771420974ffaf60b
|
refs/heads/master
| 2023-01-23T17:20:33.967020 | 2020-12-14T18:29:49 | 2020-12-14T18:29:49 | 261,813,079 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,179 |
py
|
# First attempt most test cases didn't pass. Perhaps I miss read the permutation
# requirements for this problem.
class Solution:
def permute(self, nums):
# create a permutations array that will hold all the possible
# permutations
# create a recursive function that will have a start argument, nums argument,
# and a permutation argument
# if permutation is equal to the length len(nums) and not none
# add permutation to the permutations array
# if permutation is less than the lenght of len(nums)
# have a for loop that will start at range(start, len(nums) + 1)
# recursively call the recursive function
permutations = []
nums_length = len(nums)
def permutation_helper(nums, nums_length, permutation=None, variable_exclude=None):
if permutation != None and len(permutation) == nums_length:
permutations.append(permutation)
elif permutation == None or len(permutation) < nums_length:
for number in nums:
if permutation == None:
new_permutation = []
variable_exclude = number
new_permutation.append(number)
permutation_helper(nums, nums_length, new_permutation, variable_exclude)
elif permutation != None and variable_exclude != number and number != permutation[-1]:
new_permutation = permutation[:]
new_permutation.append(number)
permutation_helper(nums, nums_length, new_permutation, variable_exclude)
permutation_helper(nums, nums_length)
return permutations
class OfficialSolution:
def permute(self, nums):
# create a permutations array that will hold all the possible
# permutations
# create a recursive function that will have a start argument, nums argument,
# and a permutation argument
# if permutation is equal to the length len(nums) and not none
# add permutation to the permutations array
# if permutation is less than the lenght of len(nums)
# have a for loop that will start at range(start, len(nums) + 1)
# recursively call the recursive function
permutations = []
nums_length = len(nums)
def permutation_helper(index, perm, nums_length):
if index == len(perm):
permutations.append(list(perm))
for i in range(index, len(perm)):
print('permutation', perm)
print('index', index)
perm[index], perm[i] = perm[i], perm[index]
permutation_helper(index+1, perm, nums_length)
perm[index], perm[i] = perm[i], perm[index]
permutation_helper(0, nums, nums_length)
return permutations
|
[
"masonkarsevar@gmail.com"
] |
masonkarsevar@gmail.com
|
257aa8d1f68e6c7580b34aa3188372ce47c07185
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/nos/v7_2_0/rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_common_cmds_holder/maximum_paths/__init__.py
|
d3f779d9b38079c6a063f8e88fe16bb0211a6131
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,029 |
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class maximum_paths(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-rbridge - based on the path /rbridge-id/router/router-bgp/address-family/ipv4/ipv4-unicast/default-vrf/af-common-cmds-holder/maximum-paths. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__load_sharing_value','__ebgp','__ibgp','__use_load_sharing',)
_yang_name = 'maximum-paths'
_rest_name = 'maximum-paths'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__ibgp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="ibgp", rest_name="ibgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Number of IBGP paths for load sharing', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='ibgp-paths', is_config=True)
self.__load_sharing_value = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="load-sharing-value", rest_name="load-sharing-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='uint32', is_config=True)
self.__ebgp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="ebgp", rest_name="ebgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Number of EBGP paths for load sharing', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='ebgp-paths', is_config=True)
self.__use_load_sharing = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="use-load-sharing", rest_name="use-load-sharing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Number of load-sharing paths: using load-sharing value'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'rbridge-id', u'router', u'router-bgp', u'address-family', u'ipv4', u'ipv4-unicast', u'default-vrf', u'af-common-cmds-holder', u'maximum-paths']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'rbridge-id', u'router', u'bgp', u'address-family', u'ipv4', u'unicast', u'maximum-paths']
def _get_load_sharing_value(self):
"""
Getter method for load_sharing_value, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_common_cmds_holder/maximum_paths/load_sharing_value (uint32)
"""
return self.__load_sharing_value
def _set_load_sharing_value(self, v, load=False):
"""
Setter method for load_sharing_value, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_common_cmds_holder/maximum_paths/load_sharing_value (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_load_sharing_value is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_load_sharing_value() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="load-sharing-value", rest_name="load-sharing-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """load_sharing_value must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="load-sharing-value", rest_name="load-sharing-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='uint32', is_config=True)""",
})
self.__load_sharing_value = t
if hasattr(self, '_set'):
self._set()
def _unset_load_sharing_value(self):
self.__load_sharing_value = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="load-sharing-value", rest_name="load-sharing-value", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='uint32', is_config=True)
def _get_ebgp(self):
"""
Getter method for ebgp, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_common_cmds_holder/maximum_paths/ebgp (ebgp-paths)
"""
return self.__ebgp
def _set_ebgp(self, v, load=False):
"""
Setter method for ebgp, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_common_cmds_holder/maximum_paths/ebgp (ebgp-paths)
If this variable is read-only (config: false) in the
source YANG file, then _set_ebgp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ebgp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="ebgp", rest_name="ebgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Number of EBGP paths for load sharing', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='ebgp-paths', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ebgp must be of a type compatible with ebgp-paths""",
'defined-type': "brocade-bgp:ebgp-paths",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="ebgp", rest_name="ebgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Number of EBGP paths for load sharing', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='ebgp-paths', is_config=True)""",
})
self.__ebgp = t
if hasattr(self, '_set'):
self._set()
def _unset_ebgp(self):
self.__ebgp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="ebgp", rest_name="ebgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Number of EBGP paths for load sharing', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='ebgp-paths', is_config=True)
def _get_ibgp(self):
"""
Getter method for ibgp, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_common_cmds_holder/maximum_paths/ibgp (ibgp-paths)
"""
return self.__ibgp
def _set_ibgp(self, v, load=False):
"""
Setter method for ibgp, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_common_cmds_holder/maximum_paths/ibgp (ibgp-paths)
If this variable is read-only (config: false) in the
source YANG file, then _set_ibgp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ibgp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="ibgp", rest_name="ibgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Number of IBGP paths for load sharing', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='ibgp-paths', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ibgp must be of a type compatible with ibgp-paths""",
'defined-type': "brocade-bgp:ibgp-paths",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="ibgp", rest_name="ibgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Number of IBGP paths for load sharing', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='ibgp-paths', is_config=True)""",
})
self.__ibgp = t
if hasattr(self, '_set'):
self._set()
def _unset_ibgp(self):
self.__ibgp = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..32']}), is_leaf=True, yang_name="ibgp", rest_name="ibgp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Number of IBGP paths for load sharing', u'cli-full-command': None}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='ibgp-paths', is_config=True)
def _get_use_load_sharing(self):
"""
Getter method for use_load_sharing, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_common_cmds_holder/maximum_paths/use_load_sharing (empty)
"""
return self.__use_load_sharing
def _set_use_load_sharing(self, v, load=False):
"""
Setter method for use_load_sharing, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/default_vrf/af_common_cmds_holder/maximum_paths/use_load_sharing (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_use_load_sharing is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_use_load_sharing() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="use-load-sharing", rest_name="use-load-sharing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Number of load-sharing paths: using load-sharing value'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """use_load_sharing must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="use-load-sharing", rest_name="use-load-sharing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Number of load-sharing paths: using load-sharing value'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)""",
})
self.__use_load_sharing = t
if hasattr(self, '_set'):
self._set()
def _unset_use_load_sharing(self):
self.__use_load_sharing = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="use-load-sharing", rest_name="use-load-sharing", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Number of load-sharing paths: using load-sharing value'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='empty', is_config=True)
load_sharing_value = __builtin__.property(_get_load_sharing_value, _set_load_sharing_value)
ebgp = __builtin__.property(_get_ebgp, _set_ebgp)
ibgp = __builtin__.property(_get_ibgp, _set_ibgp)
use_load_sharing = __builtin__.property(_get_use_load_sharing, _set_use_load_sharing)
_pyangbind_elements = {'load_sharing_value': load_sharing_value, 'ebgp': ebgp, 'ibgp': ibgp, 'use_load_sharing': use_load_sharing, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
588ff9f9d1fd2b83d89b92f998ad98b57b5b6142
|
ec513ac551fc0bbb6c8af5b30330445bf52c6c7f
|
/location_monitor/src/location_monitor_node.py
|
e907ab747f1eb280bbd66076673f3279e2518249
|
[] |
no_license
|
ChuChuIgbokwe/me495_tutorials
|
b88c4833f35e50b51a4ccaa1a4bae5a1916e12bf
|
b03e74605cf469d818c4533f3d563622e7d14552
|
refs/heads/master
| 2020-04-06T07:06:08.360123 | 2016-09-18T08:46:01 | 2016-09-18T08:46:01 | 64,951,342 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,896 |
py
|
#!/usr/bin/env python
# # -*- coding: utf-8 -*-
# #Created by Chukwunyere Igbokwe on July 27, 2016 by 2:23 PM
# import rospy
# import math
# from nav_msgs.msg import Odometry
# from location_monitor.msg import LandmarkDistance
# def distance(x1, y1, x2, y2):
# xd = x1 - x2
# yd = y1 - y2
# return math.sqrt(xd*xd + yd*yd)
# class LandmarkMonitor(object):
# def __init__(self,landmark_pub, landmarks):
# self._landmark_pub = landmark_pub
# self._landmarks = landmarks
# def callback(self,msg):
# x = msg.pose.pose.position.x
# y = msg.pose.pose.position.y
# # rospy.loginfo("x: {}, y: {}".format(x,y))
# closest_name = None
# closest_distance = None
# for l_name,l_x, l_y in self._landmarks:
# dist = distance(x, y, l_x, l_y)
# if closest_distance is None or dist < closest_distance:
# closest_name = l_name
# closest_distance = dist
# ld = LandmarkDistance()
# ld.name = closest_name
# ld.distance = closest_distance
# self._landmark_pub.publish(ld)
# if closest_distance < 0.5:
# rospy.loginfo("I'm near the {}".format(closest_name))
# # rospy.loginfo("closest : {}".format(closest_name))
# def main():
# rospy.init_node('location_monitor_node')
# landmarks = []
# landmarks.append(("Cube", 0.31, -0.99));
# landmarks.append(("Dumpster", 0.11, -2.42));
# landmarks.append(("Cylinder", -1.14, -2.88));
# landmarks.append(("Barrier", -2.59, -0.83));
# landmarks.append(("Bookshelf", -0.09, 0.53));
# landmark_pub = rospy.Publisher("closest_landmark", LandmarkDistance, queue_size=10)
# monitor = LandmarkMonitor(landmark_pub,landmarks)
# rospy.Subscriber("/odom", Odometry, monitor.callback)
# try:
# rospy.spin()
# except KeyboardInterrupt:
# print("Shutting down")
# if __name__ == '__main__':
# main()
#your python node and package/message should always have different names
import rospy
from nav_msgs.msg import Odometry
import math
landmarks = []
landmarks.append(("Cube",0.31,-0.99));
landmarks.append(("Dumpster", 0.11,-2.42));
landmarks.append(("Cylinder", -1.14,-2.88));
landmarks.append(("Barrier", -2.59,-0.83));
landmarks.append(("Bookshelf", -0.09, 0.53));
def distance(x1, y1, x2, y2):
xd = x1 - x2
yd = y1 - y2
return math.sqrt(xd*xd + yd*yd)
def callback(msg):
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
# rospy.loginfo("x: {}, y: {}".format(x,y))
closest_name = None
closest_distance = None
for l_name,l_x, l_y in landmarks:
dist = distance(x, y, l_x, l_y)
if closest_distance is None or dist < closest_distance:
closest_name = l_name
closest_distance = dist
rospy.loginfo("Landmark: {} || Distance: {}".format(closest_name,closest_distance))
def main():
rospy.init_node('location_monitor')
rospy.Subscriber("/odom", Odometry, callback)
rospy.spin()
if __name__ == '__main__':
main()
|
[
"chukwunyereigbokwe2015@u.northwestern.edu"
] |
chukwunyereigbokwe2015@u.northwestern.edu
|
eff583088563f012c5c2fc7c5e24f3d09b7a51aa
|
8b962051e578f2690445db71984898dfe53c72d0
|
/lambda/lambda_handler.py
|
7abd251f3c47e3ff079fe921269dd89396e3a7e3
|
[] |
no_license
|
kgisl/alexa-airplane-spotter
|
7dd3c9f323e674ce9e234daeb1bb8397ee2d5e3e
|
39bcdad829495797598a89c87d5463dad3d60aaf
|
refs/heads/master
| 2020-12-02T06:37:21.629670 | 2017-06-21T00:47:24 | 2017-06-21T00:47:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,173 |
py
|
from __future__ import print_function
from lambda_settings import app_id, speech_endpoint
import requests
import json
def is_valid_app(event):
return event['session']['application']['applicationId'] == app_id
def get_output_speech():
r = requests.get(speech_endpoint)
output = json.loads(r.text)['response'].encode('ascii')
return output
def get_response():
return {
"version": "1.0",
"response": {
"outputSpeech": {
"type": "PlainText",
"text": get_output_speech()
},
"card": {
"content": "Planes rule!",
"title": "Plane Info",
"type": "Simple"
},
"reprompt": {
"outputSpeech": {
"type": "PlainText",
"text": ""
}
},
"shouldEndSession": 'false'
},
"sessionAttributes": {}
}
def lambda_handler(event, context):
if not is_valid_app(event):
print(event['session']['application']['applicationId'])
raise ValueError('Invalid Application ID')
return get_response()
|
[
"nsypteras@gmail.com"
] |
nsypteras@gmail.com
|
676a328a27e04da5cef3ca29ec3a68efa764656f
|
53fc3f163a02b0f06df05ad385ad175cc057e10a
|
/tests/storage/backends/__init__.py
|
070bca911a8e5202ee6f64804bd2daed29ed170c
|
[
"MIT"
] |
permissive
|
Kotaimen/stonemason
|
15284d7ca800186b9972d176ff1232ef7f0372e8
|
ebbfab294a9e412cc7d04ea1dcb163e45c0de5d2
|
refs/heads/develop
| 2021-12-10T09:57:46.453283 | 2018-02-15T10:21:35 | 2018-02-15T10:21:35 | 28,327,740 | 5 | 1 | null | 2015-11-10T02:25:45 | 2014-12-22T06:44:58 |
Python
|
UTF-8
|
Python
| false | false | 69 |
py
|
# -*- encoding: utf-8 -*-
__author__ = 'ray'
__date__ = '10/27/15'
|
[
"gliese.q@gmail.com"
] |
gliese.q@gmail.com
|
e558ab87f49ba00d98d7e8b8b17a2aa5cf3b37b4
|
d4420fd262ec96662e0ca4de22b8ca21e160ab7e
|
/app/blogengen/manage.py
|
35db533ba2aa964ffbce3f02eae4cb300c1966c3
|
[] |
no_license
|
Burnashev-d/Django
|
c6e5a7111adb3b1bf77d49617b3f1d3847916710
|
858fdc83e82404bbea5f7a09388b54e4b0dbcb8e
|
refs/heads/master
| 2020-03-31T08:09:56.968074 | 2018-10-08T08:54:22 | 2018-10-08T08:54:22 | 152,048,466 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 807 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "blogengen.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"burnashev_d@iuca.kg"
] |
burnashev_d@iuca.kg
|
ce3939b31af53ec8878573c1cb3b1fed53b2672e
|
fb081aa5746bf65511aa8d7f6cca9bf1bbd959bb
|
/day5/day5.py
|
3ca9dc3876d6d862a8bee28e825de4e245e3d392
|
[] |
no_license
|
bronemos/aoc-2020
|
4d11faea956b3809499a3780b10f97d21ad01808
|
11d6ae058f8e4c79b10a543d770663ca2dbea1e1
|
refs/heads/master
| 2023-01-24T16:21:26.223648 | 2020-12-16T14:57:14 | 2020-12-16T14:57:14 | 318,328,196 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 266 |
py
|
with open('input5.txt', 'r') as f:
# pt1
print(max(seats := [int(x.strip().replace('F', '0').replace('B', '1').replace('L', '0').replace('R', '1'), 2) for x in f.readlines()]))
# pt 2
print((set(range(min(seats), max(seats))) - set(seats)).pop())
|
[
"spieglb@gmail.com"
] |
spieglb@gmail.com
|
6ae54d9ccc133969c04088f132d6ef7883c2e260
|
3d9825900eb1546de8ad5d13cae893eb0d6a9b14
|
/AutoWorkup/SEMTools/utilities/brains.py
|
a9f06b8bfcc070f02a886a1a7dbbda143a65d219
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
rtkarcher/BRAINSTools
|
20d69f96e6d5ca92adaeb06aa4fe6556b5e7b268
|
961135366450400409cece431423ed480855d34c
|
refs/heads/master
| 2021-01-15T08:53:48.961607 | 2013-06-26T19:09:34 | 2013-06-26T19:09:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 39,809 |
py
|
# -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class BRAINSConstellationModelerInputSpec(CommandLineInputSpec):
verbose = traits.Bool(desc=", Show more verbose output, ", argstr="--verbose ")
inputTrainingList = File(desc=", Setup file, giving all parameters for training up a template model for each landmark., ", exists=True, argstr="--inputTrainingList %s")
outputModel = traits.Either(traits.Bool, File(), hash_files=False, desc=", The full filename of the output model file., ", argstr="--outputModel %s")
saveOptimizedLandmarks = traits.Bool(desc=", Flag to make a new subject-specific landmark definition file in the same format produced by Slicer3 with the optimized landmark (the detected RP, AC, and PC) in it. Useful to tighten the variances in the ConstellationModeler., ", argstr="--saveOptimizedLandmarks ")
optimizedLandmarksFilenameExtender = traits.Str(desc=", If the trainingList is (indexFullPathName) and contains landmark data filenames [path]/[filename].fcsv , make the optimized landmarks filenames out of [path]/[filename](thisExtender) and the optimized version of the input trainingList out of (indexFullPathName)(thisExtender) , when you rewrite all the landmarks according to the saveOptimizedLandmarks flag., ", argstr="--optimizedLandmarksFilenameExtender %s")
resultsDir = traits.Either(traits.Bool, Directory(), hash_files=False, desc=", The directory for the results to be written., ", argstr="--resultsDir %s")
mspQualityLevel = traits.Int(desc=", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", argstr="--mspQualityLevel %d")
rescaleIntensities = traits.Bool(desc=", Flag to turn on rescaling image intensities on input., ", argstr="--rescaleIntensities ")
trimRescaledIntensities = traits.Float(desc=", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", argstr="--trimRescaledIntensities %f")
rescaleIntensitiesOutputRange = InputMultiPath(
traits.Int, desc=", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", sep=",", argstr="--rescaleIntensitiesOutputRange %s")
BackgroundFillValue = traits.Str(desc="Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", argstr="--BackgroundFillValue %s")
writedebuggingImagesLevel = traits.Int(desc=", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", argstr="--writedebuggingImagesLevel %d")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class BRAINSConstellationModelerOutputSpec(TraitedSpec):
outputModel = File(desc=", The full filename of the output model file., ", exists=True)
resultsDir = Directory(desc=", The directory for the results to be written., ", exists=True)
class BRAINSConstellationModeler(SEMLikeCommandLine):
"""title: Generate Landmarks Model (BRAINS)
category: Utilities.BRAINS
description: Train up a model for BRAINSConstellationDetector
"""
input_spec = BRAINSConstellationModelerInputSpec
output_spec = BRAINSConstellationModelerOutputSpec
_cmd = " BRAINSConstellationModeler "
_outputs_filenames = {'outputModel': 'outputModel.mdl', 'resultsDir': 'resultsDir'}
class landmarksConstellationWeightsInputSpec(CommandLineInputSpec):
inputTrainingList = File(desc=", Setup file, giving all parameters for training up a Weight list for landmark., ", exists=True, argstr="--inputTrainingList %s")
inputTemplateModel = File(desc="User-specified template model., ", exists=True, argstr="--inputTemplateModel %s")
LLSModel = File(desc="Linear least squares model filename in HD5 format", exists=True, argstr="--LLSModel %s")
outputWeightsList = traits.Either(traits.Bool, File(), hash_files=False, desc=", The filename of a csv file which is a list of landmarks and their corresponding weights., ", argstr="--outputWeightsList %s")
class landmarksConstellationWeightsOutputSpec(TraitedSpec):
outputWeightsList = File(desc=", The filename of a csv file which is a list of landmarks and their corresponding weights., ", exists=True)
class landmarksConstellationWeights(SEMLikeCommandLine):
"""title: Generate Landmarks Weights (BRAINS)
category: Utilities.BRAINS
description: Train up a list of Weights for the Landmarks in BRAINSConstellationDetector
"""
input_spec = landmarksConstellationWeightsInputSpec
output_spec = landmarksConstellationWeightsOutputSpec
_cmd = " landmarksConstellationWeights "
_outputs_filenames = {'outputWeightsList': 'outputWeightsList.wts'}
class BRAINSTrimForegroundInDirectionInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Input image to trim off the neck (and also air-filling noise.)", exists=True, argstr="--inputVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output image with neck and air-filling noise trimmed isotropic image with AC at center of image.", argstr="--outputVolume %s")
directionCode = traits.Int(desc=", This flag chooses which dimension to compare. The sign lets you flip direction., ", argstr="--directionCode %d")
otsuPercentileThreshold = traits.Float(desc=", This is a parameter to FindLargestForegroundFilledMask, which is employed to trim off air-filling noise., ", argstr="--otsuPercentileThreshold %f")
closingSize = traits.Int(desc=", This is a parameter to FindLargestForegroundFilledMask, ", argstr="--closingSize %d")
headSizeLimit = traits.Float(desc=", Use this to vary from the command line our search for how much upper tissue is head for the center-of-mass calculation. Units are CCs, not cubic millimeters., ", argstr="--headSizeLimit %f")
BackgroundFillValue = traits.Str(desc="Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", argstr="--BackgroundFillValue %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class BRAINSTrimForegroundInDirectionOutputSpec(TraitedSpec):
outputVolume = File(desc="Output image with neck and air-filling noise trimmed isotropic image with AC at center of image.", exists=True)
class BRAINSTrimForegroundInDirection(SEMLikeCommandLine):
"""title: Trim Foreground In Direction (BRAINS)
category: Utilities.BRAINS
description: This program will trim off the neck and also air-filling noise from the inputImage.
version: 0.1
documentation-url: http://www.nitrc.org/projects/art/
"""
input_spec = BRAINSTrimForegroundInDirectionInputSpec
output_spec = BRAINSTrimForegroundInDirectionOutputSpec
_cmd = " BRAINSTrimForegroundInDirection "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
class BRAINSLmkTransformInputSpec(CommandLineInputSpec):
inputMovingLandmarks = File(desc="Input Moving Landmark list file in fcsv, ", exists=True, argstr="--inputMovingLandmarks %s")
inputFixedLandmarks = File(desc="Input Fixed Landmark list file in fcsv, ", exists=True, argstr="--inputFixedLandmarks %s")
outputAffineTransform = traits.Either(traits.Bool, File(), hash_files=False, desc="The filename for the estimated affine transform, ", argstr="--outputAffineTransform %s")
inputMovingVolume = File(desc="The filename of input moving volume", exists=True, argstr="--inputMovingVolume %s")
inputReferenceVolume = File(desc="The filename of the reference volume", exists=True, argstr="--inputReferenceVolume %s")
outputResampledVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="The filename of the output resampled volume", argstr="--outputResampledVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class BRAINSLmkTransformOutputSpec(TraitedSpec):
outputAffineTransform = File(desc="The filename for the estimated affine transform, ", exists=True)
outputResampledVolume = File(desc="The filename of the output resampled volume", exists=True)
class BRAINSLmkTransform(SEMLikeCommandLine):
"""title: Landmark Transform (BRAINS)
category: Utilities.BRAINS
description:
This utility program estimates the affine transform to align the fixed landmarks to the moving landmarks, and then generate the resampled moving image to the same physical space as that of the reference image.
version: 1.0
documentation-url: http://www.nitrc.org/projects/brainscdetector/
"""
input_spec = BRAINSLmkTransformInputSpec
output_spec = BRAINSLmkTransformOutputSpec
_cmd = " BRAINSLmkTransform "
_outputs_filenames = {'outputResampledVolume': 'outputResampledVolume.nii', 'outputAffineTransform': 'outputAffineTransform.h5'}
class BRAINSMushInputSpec(CommandLineInputSpec):
inputFirstVolume = File(desc="Input image (1) for mixture optimization", exists=True, argstr="--inputFirstVolume %s")
inputSecondVolume = File(desc="Input image (2) for mixture optimization", exists=True, argstr="--inputSecondVolume %s")
inputMaskVolume = File(desc="Input label image for mixture optimization", exists=True, argstr="--inputMaskVolume %s")
outputWeightsFile = traits.Either(traits.Bool, File(), hash_files=False, desc="Output Weights File", argstr="--outputWeightsFile %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="The MUSH image produced from the T1 and T2 weighted images", argstr="--outputVolume %s")
outputMask = traits.Either(traits.Bool, File(), hash_files=False, desc="The brain volume mask generated from the MUSH image", argstr="--outputMask %s")
seed = InputMultiPath(traits.Int, desc="Seed Point for Brain Region Filling", sep=",", argstr="--seed %s")
desiredMean = traits.Float(desc="Desired mean within the mask for weighted sum of both images.", argstr="--desiredMean %f")
desiredVariance = traits.Float(desc="Desired variance within the mask for weighted sum of both images.", argstr="--desiredVariance %f")
lowerThresholdFactorPre = traits.Float(desc="Lower threshold factor for finding an initial brain mask", argstr="--lowerThresholdFactorPre %f")
upperThresholdFactorPre = traits.Float(desc="Upper threshold factor for finding an initial brain mask", argstr="--upperThresholdFactorPre %f")
lowerThresholdFactor = traits.Float(desc="Lower threshold factor for defining the brain mask", argstr="--lowerThresholdFactor %f")
upperThresholdFactor = traits.Float(desc="Upper threshold factor for defining the brain mask", argstr="--upperThresholdFactor %f")
boundingBoxSize = InputMultiPath(traits.Int, desc="Size of the cubic bounding box mask used when no brain mask is present", sep=",", argstr="--boundingBoxSize %s")
boundingBoxStart = InputMultiPath(traits.Int, desc="XYZ point-coordinate for the start of the cubic bounding box mask used when no brain mask is present", sep=",", argstr="--boundingBoxStart %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class BRAINSMushOutputSpec(TraitedSpec):
outputWeightsFile = File(desc="Output Weights File", exists=True)
outputVolume = File(desc="The MUSH image produced from the T1 and T2 weighted images", exists=True)
outputMask = File(desc="The brain volume mask generated from the MUSH image", exists=True)
class BRAINSMush(SEMLikeCommandLine):
"""title: Brain Extraction from T1/T2 image (BRAINS)
category: Utilities.BRAINS
description:
This program: 1) generates a weighted mixture image optimizing the mean and variance and 2) produces a mask of the brain volume
version: 0.1.0.$Revision: 1.4 $(alpha)
documentation-url: http:://mri.radiology.uiowa.edu
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor:
This tool is a modification by Steven Dunn of a program developed by Greg Harris and Ron Pierson.
acknowledgements:
This work was developed by the University of Iowa Departments of Radiology and Psychiatry. This software was supported in part of NIH/NINDS award NS050568.
"""
input_spec = BRAINSMushInputSpec
output_spec = BRAINSMushOutputSpec
_cmd = " BRAINSMush "
_outputs_filenames = {'outputMask': 'outputMask.nii.gz', 'outputWeightsFile': 'outputWeightsFile.txt', 'outputVolume': 'outputVolume.nii.gz'}
class BRAINSAlignMSPInputSpec(CommandLineInputSpec):
inputVolume = File(desc=", The Image to be resampled, ", exists=True, argstr="--inputVolume %s")
OutputresampleMSP = traits.Either(traits.Bool, File(), hash_files=False, desc=", The image to be output., ", argstr="--OutputresampleMSP %s")
verbose = traits.Bool(desc=", Show more verbose output, ", argstr="--verbose ")
resultsDir = traits.Either(traits.Bool, Directory(), hash_files=False, desc=", The directory for the results to be written., ", argstr="--resultsDir %s")
writedebuggingImagesLevel = traits.Int(desc=", This flag controls if debugging images are produced. By default value of 0 is no images. Anything greater than zero will be increasing level of debugging images., ", argstr="--writedebuggingImagesLevel %d")
mspQualityLevel = traits.Int(desc=", Flag cotrols how agressive the MSP is estimated. 0=quick estimate (9 seconds), 1=normal estimate (11 seconds), 2=great estimate (22 seconds), 3=best estimate (58 seconds)., ", argstr="--mspQualityLevel %d")
rescaleIntensities = traits.Bool(desc=", Flag to turn on rescaling image intensities on input., ", argstr="--rescaleIntensities ")
trimRescaledIntensities = traits.Float(desc=", Turn on clipping the rescaled image one-tailed on input. Units of standard deviations above the mean. Very large values are very permissive. Non-positive value turns clipping off. Defaults to removing 0.00001 of a normal tail above the mean., ", argstr="--trimRescaledIntensities %f")
rescaleIntensitiesOutputRange = InputMultiPath(traits.Int, desc=", This pair of integers gives the lower and upper bounds on the signal portion of the output image. Out-of-field voxels are taken from BackgroundFillValue., ", sep=",", argstr="--rescaleIntensitiesOutputRange %s")
BackgroundFillValue = traits.Str(desc="Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", argstr="--BackgroundFillValue %s")
interpolationMode = traits.Enum("NearestNeighbor", "Linear", "ResampleInPlace", "BSpline", "WindowedSinc", "Hamming", "Cosine", "Welch", "Lanczos", "Blackman",
desc="Type of interpolation to be used when applying transform to moving volume. Options are Linear, ResampleInPlace, NearestNeighbor, BSpline, or WindowedSinc", argstr="--interpolationMode %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class BRAINSAlignMSPOutputSpec(TraitedSpec):
OutputresampleMSP = File(desc=", The image to be output., ", exists=True)
resultsDir = Directory(desc=", The directory for the results to be written., ", exists=True)
class BRAINSAlignMSP(SEMLikeCommandLine):
"""title: Align Mid Saggital Brain (BRAINS)
category: Utilities.BRAINS
description: Resample an image into ACPC alignement ACPCDetect
"""
input_spec = BRAINSAlignMSPInputSpec
output_spec = BRAINSAlignMSPOutputSpec
_cmd = " BRAINSAlignMSP "
_outputs_filenames = {'OutputresampleMSP': 'OutputresampleMSP.nii', 'resultsDir': 'resultsDir'}
class BRAINSTransformConvertInputSpec(CommandLineInputSpec):
inputTransform = File(exists=True, argstr="--inputTransform %s")
referenceVolume = File(exists=True, argstr="--referenceVolume %s")
outputTransformType = traits.Enum("Affine", "VersorRigid", "ScaleVersor", "ScaleSkewVersor", "DisplacementField", "Same", desc="The target transformation type. Must be conversion-compatible with the input transform type", argstr="--outputTransformType %s")
displacementVolume = traits.Either(traits.Bool, File(), hash_files=False, argstr="--displacementVolume %s")
outputTransform = traits.Either(traits.Bool, File(), hash_files=False, argstr="--outputTransform %s")
class BRAINSTransformConvertOutputSpec(TraitedSpec):
displacementVolume = File(exists=True)
outputTransform = File(exists=True)
class BRAINSTransformConvert(SEMLikeCommandLine):
"""title: BRAINS Transform Convert
category: Utilities.BRAINS
description: Convert ITK transforms to higher order transforms
version: 1.0
documentation-url: A utility to convert between transform file formats.
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Hans J. Johnson,Kent Williams
acknowledgements:
"""
input_spec = BRAINSTransformConvertInputSpec
output_spec = BRAINSTransformConvertOutputSpec
_cmd = " BRAINSTransformConvert "
_outputs_filenames = {'displacementVolume': 'displacementVolume.nii', 'outputTransform': 'outputTransform.mat'}
class landmarksConstellationAlignerInputSpec(CommandLineInputSpec):
inputLandmarksPaired = File(desc="Input landmark file (.fcsv)", exists=True, argstr="--inputLandmarksPaired %s")
outputLandmarksPaired = traits.Either(traits.Bool, File(), hash_files=False, desc="Output landmark file (.fcsv)", argstr="--outputLandmarksPaired %s")
class landmarksConstellationAlignerOutputSpec(TraitedSpec):
outputLandmarksPaired = File(desc="Output landmark file (.fcsv)", exists=True)
class landmarksConstellationAligner(SEMLikeCommandLine):
"""title: MidACPC Landmark Insertion
category: Utilities.BRAINS
description:
This program converts the original landmark files to the acpc-aligned landmark files
version:
documentation-url:
license:
contributor: Ali Ghayoor
acknowledgements:
"""
input_spec = landmarksConstellationAlignerInputSpec
output_spec = landmarksConstellationAlignerOutputSpec
_cmd = " landmarksConstellationAligner "
_outputs_filenames = {'outputLandmarksPaired': 'outputLandmarksPaired'}
class BRAINSEyeDetectorInputSpec(CommandLineInputSpec):
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
inputVolume = File(desc="The input volume", exists=True, argstr="--inputVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="The output volume", argstr="--outputVolume %s")
debugDir = traits.Str(desc="A place for debug information", argstr="--debugDir %s")
class BRAINSEyeDetectorOutputSpec(TraitedSpec):
outputVolume = File(desc="The output volume", exists=True)
class BRAINSEyeDetector(SEMLikeCommandLine):
"""title: Eye Detector (BRAINS)
category: Utilities.BRAINS
description:
version: 1.0
documentation-url: http://www.nitrc.org/projects/brainscdetector/
"""
input_spec = BRAINSEyeDetectorInputSpec
output_spec = BRAINSEyeDetectorOutputSpec
_cmd = " BRAINSEyeDetector "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
class BRAINSLinearModelerEPCAInputSpec(CommandLineInputSpec):
inputTrainingList = File(desc="Input Training Landmark List Filename, ", exists=True, argstr="--inputTrainingList %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class BRAINSLinearModelerEPCAOutputSpec(TraitedSpec):
pass
class BRAINSLinearModelerEPCA(SEMLikeCommandLine):
"""title: Landmark Linear Modeler (BRAINS)
category: Utilities.BRAINS
description:
Training linear model using EPCA. Implementation based on my MS thesis, "A METHOD FOR AUTOMATED LANDMARK CONSTELLATION DETECTION USING EVOLUTIONARY PRINCIPAL COMPONENTS AND STATISTICAL SHAPE MODELS"
version: 1.0
documentation-url: http://www.nitrc.org/projects/brainscdetector/
"""
input_spec = BRAINSLinearModelerEPCAInputSpec
output_spec = BRAINSLinearModelerEPCAOutputSpec
_cmd = " BRAINSLinearModelerEPCA "
_outputs_filenames = {}
class BRAINSInitializedControlPointsInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Input Volume", exists=True, argstr="--inputVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output Volume", argstr="--outputVolume %s")
splineGridSize = InputMultiPath(traits.Int, desc="The number of subdivisions of the BSpline Grid to be centered on the image space. Each dimension must have at least 3 subdivisions for the BSpline to be correctly computed. ", sep=",", argstr="--splineGridSize %s")
permuteOrder = InputMultiPath(traits.Int, desc="The permutation order for the images. The default is 0,1,2 (i.e. no permutation)", sep=",", argstr="--permuteOrder %s")
outputLandmarksFile = traits.Str(desc="Output filename", argstr="--outputLandmarksFile %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class BRAINSInitializedControlPointsOutputSpec(TraitedSpec):
outputVolume = File(desc="Output Volume", exists=True)
class BRAINSInitializedControlPoints(SEMLikeCommandLine):
"""title: Initialized Control Points (BRAINS)
category: Utilities.BRAINS
description:
Outputs bspline control points as landmarks
version: 0.1.0.$Revision: 916 $(alpha)
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Mark Scully
acknowledgements:
This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149. Additional support for Mark Scully and Hans Johnson at the University of Iowa.
"""
input_spec = BRAINSInitializedControlPointsInputSpec
output_spec = BRAINSInitializedControlPointsOutputSpec
_cmd = " BRAINSInitializedControlPoints "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
class CleanUpOverlapLabelsInputSpec(CommandLineInputSpec):
inputBinaryVolumes = InputMultiPath(File(exists=True), desc="The list of binary images to be checked and cleaned up. Order is important. Binary volume given first always wins out. ", argstr="--inputBinaryVolumes %s...")
outputBinaryVolumes = traits.Either(traits.Bool, InputMultiPath(File(), ), hash_files=False, desc="The output label map images, with integer values in it. Each label value specified in the inputLabels is combined into this output label map volume", argstr="--outputBinaryVolumes %s...")
class CleanUpOverlapLabelsOutputSpec(TraitedSpec):
outputBinaryVolumes = OutputMultiPath(File(exists=True), desc="The output label map images, with integer values in it. Each label value specified in the inputLabels is combined into this output label map volume", exists=True)
class CleanUpOverlapLabels(SEMLikeCommandLine):
"""title: Clean Up Overla Labels
category: Utilities.BRAINS
description: Take a series of input binary images and clean up for those overlapped area. Binary volumes given first always wins out
version: 0.1.0
contributor: Eun Young Kim
"""
input_spec = CleanUpOverlapLabelsInputSpec
output_spec = CleanUpOverlapLabelsOutputSpec
_cmd = " CleanUpOverlapLabels "
_outputs_filenames = {'outputBinaryVolumes': 'outputBinaryVolumes.nii'}
class BRAINSClipInferiorInputSpec(CommandLineInputSpec):
inputVolume = File(desc="Input image to make a clipped short int copy from.", exists=True, argstr="--inputVolume %s")
outputVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="Output image, a short int copy of the upper portion of the input image, filled with BackgroundFillValue.", argstr="--outputVolume %s")
acLowerBound = traits.Float(desc=", When the input image to the output image, replace the image with the BackgroundFillValue everywhere below the plane This Far in physical units (millimeters) below (inferior to) the AC point (assumed to be the voxel field middle.) The oversize default was chosen to have no effect. Based on visualizing a thousand masks in the IPIG study, we recommend a limit no smaller than 80.0 mm., ", argstr="--acLowerBound %f")
BackgroundFillValue = traits.Str(desc="Fill the background of image with specified short int value. Enter number or use BIGNEG for a large negative number.", argstr="--BackgroundFillValue %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class BRAINSClipInferiorOutputSpec(TraitedSpec):
outputVolume = File(desc="Output image, a short int copy of the upper portion of the input image, filled with BackgroundFillValue.", exists=True)
class BRAINSClipInferior(SEMLikeCommandLine):
"""title: Clip Inferior of Center of Brain (BRAINS)
category: Utilities.BRAINS
description: This program will read the inputVolume as a short int image, write the BackgroundFillValue everywhere inferior to the lower bound, and write the resulting clipped short int image in the outputVolume.
version: 1.0
"""
input_spec = BRAINSClipInferiorInputSpec
output_spec = BRAINSClipInferiorOutputSpec
_cmd = " BRAINSClipInferior "
_outputs_filenames = {'outputVolume': 'outputVolume.nii'}
class GenerateLabelMapFromProbabilityMapInputSpec(CommandLineInputSpec):
inputVolumes = InputMultiPath(File(exists=True), desc="The Input probaiblity images to be computed for lable maps", argstr="--inputVolumes %s...")
outputLabelVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="The Input binary image for region of interest", argstr="--outputLabelVolume %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class GenerateLabelMapFromProbabilityMapOutputSpec(TraitedSpec):
outputLabelVolume = File(desc="The Input binary image for region of interest", exists=True)
class GenerateLabelMapFromProbabilityMap(SEMLikeCommandLine):
"""title: Label Map from Probability Images
category: Utilities.BRAINS
description:
Given a list of probability maps for labels, create a discrete label map where only the highest probability region is used for the labeling.
version: 0.1
contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu
"""
input_spec = GenerateLabelMapFromProbabilityMapInputSpec
output_spec = GenerateLabelMapFromProbabilityMapOutputSpec
_cmd = " GenerateLabelMapFromProbabilityMap "
_outputs_filenames = {'outputLabelVolume': 'outputLabelVolume.nii.gz'}
class BRAINSLandmarkInitializerInputSpec(CommandLineInputSpec):
inputFixedLandmarkFilename = File(desc="input fixed landmark. *.fcsv", exists=True, argstr="--inputFixedLandmarkFilename %s")
inputMovingLandmarkFilename = File(desc="input moving landmark. *.fcsv", exists=True, argstr="--inputMovingLandmarkFilename %s")
inputWeightFilename = File(desc="Input weight file name for landmarks. Higher weighted landmark will be considered more heavily. Weights are propotional, that is the magnitude of weights will be normalized by its minimum and maximum value. ", exists=True, argstr="--inputWeightFilename %s")
outputTransformFilename = traits.Either(traits.Bool, File(), hash_files=False, desc="output transform file name (ex: ./outputTransform.mat) ", argstr="--outputTransformFilename %s")
class BRAINSLandmarkInitializerOutputSpec(TraitedSpec):
outputTransformFilename = File(desc="output transform file name (ex: ./outputTransform.mat) ", exists=True)
class BRAINSLandmarkInitializer(SEMLikeCommandLine):
"""title: BRAINSLandmarkInitializer
category: Utilities.BRAINS
description: Create transformation file (*mat) from a pair of landmarks (*fcsv) files.
version: 1.0
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Eunyoung Regina Kim
"""
input_spec = BRAINSLandmarkInitializerInputSpec
output_spec = BRAINSLandmarkInitializerOutputSpec
_cmd = " BRAINSLandmarkInitializer "
_outputs_filenames = {'outputTransformFilename': 'outputTransformFilename'}
class BRAINSMultiModeSegmentInputSpec(CommandLineInputSpec):
inputVolumes = InputMultiPath(File(exists=True), desc="The input image volumes for finding the largest region filled mask.", argstr="--inputVolumes %s...")
inputMaskVolume = File(desc="The ROI for region to compute histogram levels.", exists=True, argstr="--inputMaskVolume %s")
outputROIMaskVolume = traits.Either(traits.Bool, File(), hash_files=False, desc="The ROI automatically found from the input image.", argstr="--outputROIMaskVolume %s")
outputClippedVolumeROI = traits.Either(traits.Bool, File(), hash_files=False, desc="The inputVolume clipped to the region of the brain mask.", argstr="--outputClippedVolumeROI %s")
lowerThreshold = InputMultiPath(traits.Float, desc="Lower thresholds on the valid histogram regions for each modality", sep=",", argstr="--lowerThreshold %s")
upperThreshold = InputMultiPath(traits.Float, desc="Upper thresholds on the valid histogram regions for each modality", sep=",", argstr="--upperThreshold %s")
numberOfThreads = traits.Int(desc="Explicitly specify the maximum number of threads to use.", argstr="--numberOfThreads %d")
class BRAINSMultiModeSegmentOutputSpec(TraitedSpec):
outputROIMaskVolume = File(desc="The ROI automatically found from the input image.", exists=True)
outputClippedVolumeROI = File(desc="The inputVolume clipped to the region of the brain mask.", exists=True)
class BRAINSMultiModeSegment(SEMLikeCommandLine):
"""title: Segment based on rectangular region of joint histogram (BRAINS)
category: Utilities.BRAINS
description: This tool creates binary regions based on segmenting multiple image modalitities at once.
version: 2.4.1
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Hans J. Johnson, hans-johnson -at- uiowa.edu, http://www.psychiatry.uiowa.edu
acknowledgements: Hans Johnson(1,3,4); Gregory Harris(1), Vincent Magnotta(1,2,3); (1=University of Iowa Department of Psychiatry, 2=University of Iowa Department of Radiology, 3=University of Iowa Department of Biomedical Engineering, 4=University of Iowa Department of Electrical and Computer Engineering)
"""
input_spec = BRAINSMultiModeSegmentInputSpec
output_spec = BRAINSMultiModeSegmentOutputSpec
_cmd = " BRAINSMultiModeSegment "
_outputs_filenames = {'outputROIMaskVolume': 'outputROIMaskVolume.nii', 'outputClippedVolumeROI': 'outputClippedVolumeROI.nii'}
class insertMidACPCpointInputSpec(CommandLineInputSpec):
inputLandmarkFile = File(desc="Input landmark file (.fcsv)", exists=True, argstr="--inputLandmarkFile %s")
outputLandmarkFile = traits.Either(traits.Bool, File(), hash_files=False, desc="Output landmark file (.fcsv)", argstr="--outputLandmarkFile %s")
class insertMidACPCpointOutputSpec(TraitedSpec):
outputLandmarkFile = File(desc="Output landmark file (.fcsv)", exists=True)
class insertMidACPCpoint(SEMLikeCommandLine):
"""title: MidACPC Landmark Insertion
category: Utilities.BRAINS
description:
This program gets a landmark fcsv file and adds a new landmark as the midpoint between AC and PC points to the output landmark fcsv file
version:
documentation-url:
license:
contributor: Ali Ghayoor
acknowledgements:
"""
input_spec = insertMidACPCpointInputSpec
output_spec = insertMidACPCpointOutputSpec
_cmd = " insertMidACPCpoint "
_outputs_filenames = {'outputLandmarkFile': 'outputLandmarkFile'}
class BRAINSSnapShotWriterInputSpec(CommandLineInputSpec):
inputVolumes = InputMultiPath(File(exists=True), desc="Input image volume list to be extracted as 2D image. Multiple input is possible. At least one input is required.", argstr="--inputVolumes %s...")
inputBinaryVolumes = InputMultiPath(File(exists=True), desc="Input mask (binary) volume list to be extracted as 2D image. Multiple input is possible.", argstr="--inputBinaryVolumes %s...")
inputSliceToExtractInPhysicalPoint = InputMultiPath(traits.Float, desc="2D slice number of input images. For autoWorkUp output, which AC-PC aligned, 0,0,0 will be the center.", sep=",", argstr="--inputSliceToExtractInPhysicalPoint %s")
inputSliceToExtractInIndex = InputMultiPath(traits.Int, desc="2D slice number of input images. For size of 256*256*256 image, 128 is usually used.", sep=",", argstr="--inputSliceToExtractInIndex %s")
inputSliceToExtractInPercent = InputMultiPath(traits.Int, desc="2D slice number of input images. Percentage input from 0%-100%. (ex. --inputSliceToExtractInPercent 50,50,50", sep=",", argstr="--inputSliceToExtractInPercent %s")
inputPlaneDirection = InputMultiPath(traits.Int, desc="Plane to display. In general, 0=saggital, 1=coronal, and 2=axial plane.", sep=",", argstr="--inputPlaneDirection %s")
outputFilename = traits.Either(traits.Bool, File(), hash_files=False, desc="2D file name of input images. Required.", argstr="--outputFilename %s")
class BRAINSSnapShotWriterOutputSpec(TraitedSpec):
outputFilename = File(desc="2D file name of input images. Required.", exists=True)
class BRAINSSnapShotWriter(SEMLikeCommandLine):
"""title: BRAINSSnapShotWriter
category: Utilities.BRAINS
description: Create 2D snapshot of input images. Mask images are color-coded
version: 1.0
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Eunyoung Regina Kim
"""
input_spec = BRAINSSnapShotWriterInputSpec
output_spec = BRAINSSnapShotWriterOutputSpec
_cmd = " BRAINSSnapShotWriter "
_outputs_filenames = {'outputFilename': 'outputFilename'}
class JointHistogramInputSpec(CommandLineInputSpec):
inputVolumeInXAxis = File(desc="The Input image to be computed for statistics", exists=True, argstr="--inputVolumeInXAxis %s")
inputVolumeInYAxis = File(desc="The Input image to be computed for statistics", exists=True, argstr="--inputVolumeInYAxis %s")
inputMaskVolumeInXAxis = File(desc="Input mask volume for inputVolumeInXAxis. Histogram will be computed just for the masked region", exists=True, argstr="--inputMaskVolumeInXAxis %s")
inputMaskVolumeInYAxis = File(desc="Input mask volume for inputVolumeInYAxis. Histogram will be computed just for the masked region", exists=True, argstr="--inputMaskVolumeInYAxis %s")
outputJointHistogramImage = traits.Str(desc=" output joint histogram image file name. Histogram is usually 2D image. ", argstr="--outputJointHistogramImage %s")
verbose = traits.Bool(desc=" print debugging information, ", argstr="--verbose ")
class JointHistogramOutputSpec(TraitedSpec):
pass
class JointHistogram(SEMLikeCommandLine):
"""title: Write Out Image Intensities
category: Utilities.BRAINS
description:
For Analysis
version: 0.1
contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu
"""
input_spec = JointHistogramInputSpec
output_spec = JointHistogramOutputSpec
_cmd = " JointHistogram "
_outputs_filenames = {}
class ShuffleVectorsModuleInputSpec(CommandLineInputSpec):
inputVectorFileBaseName = File(desc="input vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", exists=True, argstr="--inputVectorFileBaseName %s")
outputVectorFileBaseName = traits.Either(traits.Bool, File(), hash_files=False, desc="output vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", argstr="--outputVectorFileBaseName %s")
resampleProportion = traits.Float(desc="downsample size of 1 will be the same size as the input images, downsample size of 3 will throw 2/3 the vectors away.", argstr="--resampleProportion %f")
class ShuffleVectorsModuleOutputSpec(TraitedSpec):
outputVectorFileBaseName = File(desc="output vector file name prefix. Usually end with .txt and header file has prost fix of .txt.hdr", exists=True)
class ShuffleVectorsModule(SEMLikeCommandLine):
"""title: ShuffleVectors
category: Utilities.BRAINS
description: Automatic Segmentation using neural networks
version: 1.0
license: https://www.nitrc.org/svn/brains/BuildScripts/trunk/License.txt
contributor: Hans Johnson
"""
input_spec = ShuffleVectorsModuleInputSpec
output_spec = ShuffleVectorsModuleOutputSpec
_cmd = " ShuffleVectorsModule "
_outputs_filenames = {'outputVectorFileBaseName': 'outputVectorFileBaseName'}
class ImageRegionPlotterInputSpec(CommandLineInputSpec):
inputVolume1 = File(desc="The Input image to be computed for statistics", exists=True, argstr="--inputVolume1 %s")
inputVolume2 = File(desc="The Input image to be computed for statistics", exists=True, argstr="--inputVolume2 %s")
inputBinaryROIVolume = File(desc="The Input binary image for region of interest", exists=True, argstr="--inputBinaryROIVolume %s")
inputLabelVolume = File(desc="The Label Image", exists=True, argstr="--inputLabelVolume %s")
numberOfHistogramBins = traits.Int(desc=" the number of histogram levels", argstr="--numberOfHistogramBins %d")
outputJointHistogramData = traits.Str(desc=" output data file name", argstr="--outputJointHistogramData %s")
useROIAUTO = traits.Bool(desc=" Use ROIAUTO to compute region of interest. This cannot be used with inputLabelVolume", argstr="--useROIAUTO ")
useIntensityForHistogram = traits.Bool(desc=" Create Intensity Joint Histogram instead of Quantile Joint Histogram", argstr="--useIntensityForHistogram ")
verbose = traits.Bool(desc=" print debugging information, ", argstr="--verbose ")
class ImageRegionPlotterOutputSpec(TraitedSpec):
pass
class ImageRegionPlotter(SEMLikeCommandLine):
"""title: Write Out Image Intensities
category: Utilities.BRAINS
description: For Analysis
version: 0.1
contributor: University of Iowa Department of Psychiatry, http:://www.psychiatry.uiowa.edu
"""
input_spec = ImageRegionPlotterInputSpec
output_spec = ImageRegionPlotterOutputSpec
_cmd = " ImageRegionPlotter "
_outputs_filenames = {}
|
[
"hans-johnson@uiowa.edu"
] |
hans-johnson@uiowa.edu
|
361231f4f9ecc36c3b9bf839a9c626d54b60867b
|
dbb052631187f2124ea1f888b212cc753bff84c5
|
/Spine/img/Test Phases/1/test1.py
|
a157c0ace5ea573936f2f0c4e95fe4eca666811a
|
[] |
no_license
|
umutnaderi/Constructing-a-3D-Model-by-Using-2D-Parameters
|
610bfe12d40e600cb6cffa7e512f02ef1d591ef8
|
13b872b63ef7bcefbde9316bd33de68573ba9441
|
refs/heads/master
| 2023-06-17T00:28:39.996675 | 2021-07-14T12:26:13 | 2021-07-14T12:26:13 | 260,132,927 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 16,253 |
py
|
App.newDocument("Project")
App.setActiveDocument("Project")
from FreeCAD import Base
import Part,PartGui,Draft
sketch01 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch01')
sketch01.Placement = App.Placement(App.Vector(0.000000,0.000000,0.000000),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch01.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch01.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch01.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch01.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch01.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch01.addSymmetric([2],-2,0)
sketch01.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch01.trim(2,App.Vector(4.397662,-13.864904,0))
sketch01.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch01.trim(2,App.Vector(0.090503,-19.139975,0))
sketch01.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch01.trim(2,App.Vector(10.701625,-28.702028,0))
sketch01.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch01.trim(1,App.Vector(1.574561,15.313293,0))
sketch01.trim(1,App.Vector(0.207600,14.282619,0))
sketch02 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch02')
sketch02.Placement = App.Placement(App.Vector(0.000000,0.000000,-15),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch02.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch02.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch02.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch02.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch02.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch02.addSymmetric([2],-2,0)
sketch02.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch02.trim(2,App.Vector(4.397662,-13.864904,0))
sketch02.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch02.trim(2,App.Vector(0.090503,-19.139975,0))
sketch02.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch02.trim(2,App.Vector(10.701625,-28.702028,0))
sketch02.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch02.trim(1,App.Vector(1.574561,15.313293,0))
sketch02.trim(1,App.Vector(0.207600,14.282619,0))
App.ActiveDocument.recompute()
scale01 = Draft.scale([sketch01],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale01.Label = 'Scale01'
scale02 = Draft.scale([sketch02],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale02.Label = 'Scale02'
loft01 = App.getDocument('Project').addObject('Part::Loft','Loft01')
loft01.Sections=[scale01, scale02, ]
loft01.Solid=True
loft01.Ruled=False
loft01.Closed=False
FreeCAD.ActiveDocument.recompute()
FreeCAD.ActiveDocument.recompute()
sketch11 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch11')
sketch11.Placement = App.Placement(App.Vector(0.000000,0.000000,-20),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch11.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch11.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch11.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch11.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch11.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch11.addSymmetric([2],-2,0)
sketch11.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch11.trim(2,App.Vector(4.397662,-13.864904,0))
sketch11.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch11.trim(2,App.Vector(0.090503,-19.139975,0))
sketch11.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch11.trim(2,App.Vector(10.701625,-28.702028,0))
sketch11.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch11.trim(1,App.Vector(1.574561,15.313293,0))
sketch11.trim(1,App.Vector(0.207600,14.282619,0))
sketch12 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch12')
sketch12.Placement = App.Placement(App.Vector(0.000000,0.000000,-35),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch12.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch12.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch12.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch12.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch12.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch12.addSymmetric([2],-2,0)
sketch12.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch12.trim(2,App.Vector(4.397662,-13.864904,0))
sketch12.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch12.trim(2,App.Vector(0.090503,-19.139975,0))
sketch12.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch12.trim(2,App.Vector(10.701625,-28.702028,0))
sketch12.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch12.trim(1,App.Vector(1.574561,15.313293,0))
sketch12.trim(1,App.Vector(0.207600,14.282619,0))
scale11 = Draft.scale([sketch11],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale11.Label = 'Scale11'
scale12 = Draft.scale([sketch12],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale12.Label = 'Scale12'
loft11 = App.getDocument('Project').addObject('Part::Loft','Loft11')
loft11.Sections=[scale11, scale12, ]
loft11.Solid=True
loft11.Ruled=False
loft11.Closed=False
FreeCAD.ActiveDocument.recompute()
FreeCAD.ActiveDocument.recompute()
sketch21 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch21')
sketch21.Placement = App.Placement(App.Vector(0.000000,0.000000,-40),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch21.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch21.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch21.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch21.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch21.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch21.addSymmetric([2],-2,0)
sketch21.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch21.trim(2,App.Vector(4.397662,-13.864904,0))
sketch21.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch21.trim(2,App.Vector(0.090503,-19.139975,0))
sketch21.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch21.trim(2,App.Vector(10.701625,-28.702028,0))
sketch21.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch21.trim(1,App.Vector(1.574561,15.313293,0))
sketch21.trim(1,App.Vector(0.207600,14.282619,0))
sketch22 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch22')
sketch22.Placement = App.Placement(App.Vector(0.000000,0.000000,-55),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch22.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch22.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch22.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch22.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch22.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch22.addSymmetric([2],-2,0)
sketch22.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch22.trim(2,App.Vector(4.397662,-13.864904,0))
sketch22.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch22.trim(2,App.Vector(0.090503,-19.139975,0))
sketch22.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch22.trim(2,App.Vector(10.701625,-28.702028,0))
sketch22.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch22.trim(1,App.Vector(1.574561,15.313293,0))
sketch22.trim(1,App.Vector(0.207600,14.282619,0))
scale21 = Draft.scale([sketch21],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale21.Label = 'Scale21'
scale22 = Draft.scale([sketch22],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale22.Label = 'Scale22'
loft21 = App.getDocument('Project').addObject('Part::Loft','Loft21')
loft21.Sections=[scale21, scale22, ]
loft21.Solid=True
loft21.Ruled=False
loft21.Closed=False
FreeCAD.ActiveDocument.recompute()
FreeCAD.ActiveDocument.recompute()
sketch31 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch31')
sketch31.Placement = App.Placement(App.Vector(0.000000,0.000000,-60),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch31.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch31.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch31.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch31.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch31.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch31.addSymmetric([2],-2,0)
sketch31.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch31.trim(2,App.Vector(4.397662,-13.864904,0))
sketch31.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch31.trim(2,App.Vector(0.090503,-19.139975,0))
sketch31.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch31.trim(2,App.Vector(10.701625,-28.702028,0))
sketch31.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch31.trim(1,App.Vector(1.574561,15.313293,0))
sketch31.trim(1,App.Vector(0.207600,14.282619,0))
sketch32 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch32')
sketch32.Placement = App.Placement(App.Vector(0.000000,0.000000,-75),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch32.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch32.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch32.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch32.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch32.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch32.addSymmetric([2],-2,0)
sketch32.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch32.trim(2,App.Vector(4.397662,-13.864904,0))
sketch32.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch32.trim(2,App.Vector(0.090503,-19.139975,0))
sketch32.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch32.trim(2,App.Vector(10.701625,-28.702028,0))
sketch32.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch32.trim(1,App.Vector(1.574561,15.313293,0))
sketch32.trim(1,App.Vector(0.207600,14.282619,0))
scale31 = Draft.scale([sketch31],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale31.Label = 'Scale31'
scale32 = Draft.scale([sketch32],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale32.Label = 'Scale32'
loft31 = App.getDocument('Project').addObject('Part::Loft','Loft31')
loft31.Sections=[scale31, scale32, ]
loft31.Solid=True
loft31.Ruled=False
loft31.Closed=False
FreeCAD.ActiveDocument.recompute()
FreeCAD.ActiveDocument.recompute()
sketch41 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch41')
sketch41.Placement = App.Placement(App.Vector(0.000000,0.000000,-80),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch41.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch41.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch41.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch41.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch41.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch41.addSymmetric([2],-2,0)
sketch41.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch41.trim(2,App.Vector(4.397662,-13.864904,0))
sketch41.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch41.trim(2,App.Vector(0.090503,-19.139975,0))
sketch41.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch41.trim(2,App.Vector(10.701625,-28.702028,0))
sketch41.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch41.trim(1,App.Vector(1.574561,15.313293,0))
sketch41.trim(1,App.Vector(0.207600,14.282619,0))
sketch42 = App.activeDocument().addObject('Sketcher::SketchObject','Sketch42')
sketch42.Placement = App.Placement(App.Vector(0.000000,0.000000,-95),App.Rotation(0.000000,0.000000,0.000000,1.000000))
sketch42.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),30),False)
sketch42.addConstraint(Sketcher.Constraint('Coincident',0,3,-1,1))
sketch42.addGeometry(Part.Circle(App.Vector(0.000000,0.000000,0),App.Vector(0,0,1),15),False)
sketch42.addConstraint(Sketcher.Constraint('Coincident',1,3,-1,1))
sketch42.addGeometry(Part.Circle(App.Vector(8,-21,0),App.Vector(0,0,1),8),False)
sketch42.addSymmetric([2],-2,0)
sketch42.trim(3,App.Vector(-4.671036,-13.775558,0))
sketch42.trim(2,App.Vector(4.397662,-13.864904,0))
sketch42.trim(3,App.Vector(-1.186513,-16.455961,0))
sketch42.trim(2,App.Vector(0.090503,-19.139975,0))
sketch42.trim(3,App.Vector(-11.270746,-28.304337,0))
sketch42.trim(2,App.Vector(10.701625,-28.702028,0))
sketch42.trim(0,App.Vector(-17.697254,-24.176645,0))
sketch42.trim(1,App.Vector(1.574561,15.313293,0))
sketch42.trim(1,App.Vector(0.207600,14.282619,0))
scale41 = Draft.scale([sketch41],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale41.Label = 'Scale41'
scale42 = Draft.scale([sketch42],delta=FreeCAD.Vector(2,2,2),center=FreeCAD.Vector(0,0,0),copy=False) #values
scale42.Label = 'Scale42'
loft41 = App.getDocument('Project').addObject('Part::Loft','Loft41')
loft41.Sections=[scale41, scale42, ]
loft41.Solid=True
loft41.Ruled=False
loft41.Closed=False
FreeCAD.ActiveDocument.recompute()
FreeCAD.ActiveDocument.recompute()
Gui.SendMsgToActiveView("ViewFit")
Gui.activeDocument().activeView().viewAxonometric()
scale01.Placement.Base = App.Vector(373,376,492)
scale01.Placement.Rotation = App.Rotation(App.Vector(0,1,0),0)
scale01.Placement.Rotation = App.Rotation(App.Vector(1,0,0),-0.739259)
scale01.Scale = (32.5556,22.1447,9.57143)
scale02.Placement.Base = App.Vector(374,375,425)
scale02.Placement.Rotation = App.Rotation(App.Vector(0,1,0),-0.196892)
scale02.Placement.Rotation = App.Rotation(App.Vector(1,0,0),0)
scale02.Scale = (32.3335,22.1429,9.57143)
scale11.Placement.Base = App.Vector(374,377,400)
scale11.Placement.Rotation = App.Rotation(App.Vector(0,1,0),0.196218)
scale11.Placement.Rotation = App.Rotation(App.Vector(1,0,0),-0.729843)
scale11.Scale = (32.4446,22.4304,9.28571)
scale12.Placement.Base = App.Vector(375,374,335)
scale12.Placement.Rotation = App.Rotation(App.Vector(0,1,0),0)
scale12.Placement.Rotation = App.Rotation(App.Vector(1,0,0),0.360346)
scale12.Scale = (32.6667,22.7147,9.28571)
scale21.Placement.Base = App.Vector(374,376,309)
scale21.Placement.Rotation = App.Rotation(App.Vector(0,1,0),0.197571)
scale21.Placement.Rotation = App.Rotation(App.Vector(1,0,0),-0.369645)
scale21.Scale = (32.2224,22.1447,9.42857)
scale22.Placement.Base = App.Vector(373,375,243)
scale22.Placement.Rotation = App.Rotation(App.Vector(0,1,0),-0.586626)
scale22.Placement.Rotation = App.Rotation(App.Vector(1,0,0),0)
scale22.Scale = (32.5573,22.1429,9.42857)
scale31.Placement.Base = App.Vector(374,376,218)
scale31.Placement.Rotation = App.Rotation(App.Vector(0,1,0),0)
scale31.Placement.Rotation = App.Rotation(App.Vector(1,0,0),-0.372045)
scale31.Scale = (32.4444,22.0005,9.57143)
scale32.Placement.Base = App.Vector(375,376,151)
scale32.Placement.Rotation = App.Rotation(App.Vector(0,1,0),0)
scale32.Placement.Rotation = App.Rotation(App.Vector(1,0,0),0.372045)
scale32.Scale = (32.3333,22.0005,9.57143)
scale41.Placement.Base = App.Vector(374,375,126)
scale41.Placement.Rotation = App.Rotation(App.Vector(0,1,0),0.784825)
scale41.Placement.Rotation = App.Rotation(App.Vector(1,0,0),0)
scale41.Scale = (32.4475,22.4286,9.57143)
scale42.Placement.Base = App.Vector(375,375,59)
scale42.Placement.Rotation = App.Rotation(App.Vector(0,1,0),0)
scale42.Placement.Rotation = App.Rotation(App.Vector(1,0,0),0.720664)
scale42.Scale = (32.6667,22.7161,9.57143)
FreeCAD.ActiveDocument.recompute()
Gui.SendMsgToActiveView("ViewFit")
Gui.activeDocument().activeView().viewAxonometric()
|
[
"naderiumut@gmail.com"
] |
naderiumut@gmail.com
|
202384744bc82b1b11a8752e20a41b61b8c14117
|
30ab9750e6ca334941934d1727c85ad59e6b9c8a
|
/zentral/contrib/monolith/management/commands/rebuild_manifest_enrollment_packages.py
|
4311863e71bf807a69d5cdb8a2dda5713092f8ef
|
[
"Apache-2.0"
] |
permissive
|
ankurvaishley/zentral
|
57e7961db65278a0e614975e484927f0391eeadd
|
a54769f18305c3fc71bae678ed823524aaa8bb06
|
refs/heads/main
| 2023-05-31T02:56:40.309854 | 2021-07-01T07:51:31 | 2021-07-01T14:15:34 | 382,346,360 | 1 | 0 |
Apache-2.0
| 2021-07-02T12:55:47 | 2021-07-02T12:55:47 | null |
UTF-8
|
Python
| false | false | 484 |
py
|
from django.core.management.base import BaseCommand
from zentral.contrib.monolith.models import ManifestEnrollmentPackage
from zentral.contrib.monolith.utils import build_manifest_enrollment_package
class Command(BaseCommand):
help = 'Rebuild monolith manifest enrollment packages.'
def handle(self, *args, **kwargs):
for mep in ManifestEnrollmentPackage.objects.all():
build_manifest_enrollment_package(mep)
print(mep.file.path, "rebuilt")
|
[
"eric.falconnier@112hz.com"
] |
eric.falconnier@112hz.com
|
fe3ceb1528d5007e0d71fd374886c29a337dc22d
|
310c88aed28df25637f60e41a067885371d0c644
|
/ejerciciosSql/Evaluacion2/Pregunta2_salarioEmpleado/empleadoSalario.py
|
55797e32160f7fc82ca07da96f8b0039968f458b
|
[] |
no_license
|
juankgp/pythonPoo2
|
1757644d2db98827d3fb49e0782435d2408d170c
|
eb06240f17a64666be7f1161503c0117dd3d97aa
|
refs/heads/master
| 2022-12-30T20:43:29.020081 | 2020-10-24T03:27:15 | 2020-10-24T03:27:15 | 282,591,151 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,868 |
py
|
#Pregunta 2: Utilizando los métodos de property and Setter se requiere:
#Controlar los datos del empleado se hereden a la clase empleado salario
#Considere un método para calcular el salario con y sin horas extras imprima resultados
# Considere otro método para imprimir la información del empleado
# Considere 2 objetos para mostrar resultados
from empleado import Empleado
class EmpleadoSalario(Empleado):
def __init__(self, ci, nombre, edad,valorhora,horastrabajo,horasextras):
Empleado.__init__(self, ci, nombre, edad)
self.valorhora=valorhora
self.horastrabajo=horastrabajo
self.horasextras=horasextras
def __str__(self):
#return Empleado.__str__(self.nombre,self.edad)
return f"Nombre: {self.nombre} Edad: {self.edad}\nTotal Salario: {self.valorhora*self.horastrabajo}\nTotal Horas Extras: {self.horasextras}\nToal + Horas Extras: {self.valorhora*self.horastrabajo+self.horasextras}"
def calcular_salario(self):
salario = self.valorhora*self.horastrabajo
return salario
def imprimir(self):
print("Nombre: {} Edad: {}".format(e1.nombre,e1.edad))
print("Total Salario: ",e1.calcular_salario())
print("Total Horas Extra: ",e1.horasextras)
print("Total + Horas Extras: ",e1.calcular_salario() + e1.horasextras)
e1 = EmpleadoSalario('1714574801','Paul Rosales',27,2,260,65)
e1.imprimir()
print("***********************")
print(e1)
print("-----------------------")
e2 = EmpleadoSalario('1714574801','Juan Gutierrez',37,3,260,80)
print(e2)
#e1 = EmpleadoSalario('1714574801','Paul Rosales',27,1,520,65)
# Resultado Esperado
'''
Empleado 1:
Nombre: Paul Rosales Edad: 27
Total Salario: 520
Total horas Extras: 65
Total + Horas Extras: 585
Empleado 2:
'''
|
[
"jukyarosinc@gmail.com"
] |
jukyarosinc@gmail.com
|
bdbf224d07f9a5aeceb878a2ff696537cb9fd117
|
3633bab8066f576c8bf9e7908afe30bb070d0b70
|
/Hack-tenth-week/cinema/website/management/commands/populate_db.py
|
f9afe0316b2236021528fb773fea671a5c9bdfe8
|
[] |
no_license
|
6desislava6/Hack-Bulgaria
|
099c195e45a443cf4a3342eff6612ac2aa66565b
|
de4bf7baae35e21d6a7b27d4bde68247bb85b67a
|
refs/heads/master
| 2021-01-20T11:57:29.027595 | 2015-06-02T17:36:59 | 2015-06-02T17:36:59 | 32,828,816 | 4 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,035 |
py
|
from django.core.management.base import BaseCommand
from website.models import Movie, Projection, Reservation
class Command(BaseCommand):
def _add_movies(self):
Movie.add_movie(name='The Green Mile', rating=9.0)
Movie.add_movie(name='Stay Alive', rating=6.0)
Movie.add_movie(name='Twenty-Seven Dresses', rating=5.0)
Movie.add_movie(name='Inception', rating=9.0)
Movie.add_movie(name='The Hunger Games: Catching Fire', rating=7.9)
Movie.add_movie(name='Wreck-It Ralph', rating=7.8)
Movie.add_movie(name='Her', rating=8.3)
def _delete_movies(self):
Movie.objects.all().delete()
def _delete_projections(self):
Projection.objects.all().delete()
def _add_projections(self):
Projection.add_projection(movie=Movie.objects.get(name='The Green Mile'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='Stay Alive'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='Twenty-Seven Dresses'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='Inception'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='The Hunger Games: Catching Fire'), type_projection='3D', date='2015-05-19', time='18:00')
Projection.add_projection(movie=Movie.objects.get(name='Wreck-It Ralph'), type_projection='3D', date='2015-05-19', time='18:00')
def _add_reservations(self):
Reservation.add_reservation(username='desi', row='1', col='1', projection=Projection.objects.get(movie__name='The Green Mile'))
Reservation.add_reservation(username='marmot', row='1', col='1', projection=Projection.objects.get(movie__name='Inception'))
def handle(self, *args, **options):
self._add_movies()
self._add_projections()
self._add_reservations()
|
[
"desislavatsvetkova@mail.bg"
] |
desislavatsvetkova@mail.bg
|
7dd17508882af8c84bfb02e4148f1e3a2d90fe46
|
8b255ad3a41c68f61b6c2d48243b1f40f8d57ef7
|
/numpy practice/validatetest.py
|
b02e6cdf39aa0fe51a71111d02184c1db733c3d9
|
[
"MIT"
] |
permissive
|
raelyz/machinelearning
|
1f2e7ec5d03a0e2ba82d963e619eb6e7df2e9c5b
|
3ad08cf243940df6d4dde146a6619508e4a7ee0a
|
refs/heads/main
| 2023-03-08T03:36:22.196011 | 2021-02-24T08:16:46 | 2021-02-24T08:16:46 | 341,826,941 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,252 |
py
|
#@title Import modules
import numpy as np
import pandas as pd
import tensorflow as tf
from matplotlib import pyplot as plt
pd.options.display.max_rows = 10
pd.options.display.float_format = "{:.1f}".format
train_df = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_train.csv")
test_df = pd.read_csv("https://download.mlcc.google.com/mledu-datasets/california_housing_test.csv")
scale_factor = 1000.0
# Scale the training set's label.
train_df["median_house_value"] /= scale_factor
# Scale the test set's label
test_df["median_house_value"] /= scale_factor
#@title Define the functions that build and train a model
def build_model(my_learning_rate):
"""Create and compile a simple linear regression model."""
# Most simple tf.keras models are sequential.
model = tf.keras.models.Sequential()
# Add one linear layer to the model to yield a simple linear regressor.
model.add(tf.keras.layers.Dense(units=1, input_shape=(1,)))
# Compile the model topography into code that TensorFlow can efficiently
# execute. Configure training to minimize the model's mean squared error.
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=my_learning_rate),
loss="mean_squared_error",
metrics=[tf.keras.metrics.RootMeanSquaredError()])
return model
def train_model(model, df, feature, label, my_epochs,
my_batch_size=None, my_validation_split=0.1):
"""Feed a dataset into the model in order to train it."""
history = model.fit(x=df[feature],
y=df[label],
batch_size=my_batch_size,
epochs=my_epochs,
validation_split=my_validation_split)
# Gather the model's trained weight and bias.
trained_weight = model.get_weights()[0]
trained_bias = model.get_weights()[1]
# The list of epochs is stored separately from the
# rest of history.
epochs = history.epoch
# Isolate the root mean squared error for each epoch.
hist = pd.DataFrame(history.history)
rmse = hist["root_mean_squared_error"]
return epochs, rmse, history.history
print("Defined the build_model and train_model functions.")
#@title Define the plotting function
def plot_the_loss_curve(epochs, mae_training, mae_validation):
"""Plot a curve of loss vs. epoch."""
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Root Mean Squared Error")
plt.plot(epochs[1:], mae_training[1:], label="Training Loss")
plt.plot(epochs[1:], mae_validation[1:], label="Validation Loss")
plt.legend()
# We're not going to plot the first epoch, since the loss on the first epoch
# is often substantially greater than the loss for other epochs.
merged_mae_lists = mae_training[1:] + mae_validation[1:]
highest_loss = max(merged_mae_lists)
lowest_loss = min(merged_mae_lists)
delta = highest_loss - lowest_loss
print(delta)
top_of_y_axis = highest_loss + (delta * 0.05)
bottom_of_y_axis = lowest_loss - (delta * 0.05)
plt.ylim([bottom_of_y_axis, top_of_y_axis])
plt.show()
print("Defined the plot_the_loss_curve function.")
# The following variables are the hyperparameters.
learning_rate = 0.08
epochs = 30
batch_size = 100
# Split the original training set into a reduced training set and a
# validation set.
validation_split=0.1
# Identify the feature and the label.
my_feature="median_income" # the median income on a specific city block.
my_label="median_house_value" # the median value of a house on a specific city block.
# That is, you're going to create a model that predicts house value based
# solely on the neighborhood's median income.
# Discard any pre-existing version of the model.
my_model = None
shuffled_train_df = train_df.reindex(np.random.permutation(train_df.index))
# Invoke the functions to build and train the model.
my_model = build_model(learning_rate)
epochs, rmse, history = train_model(my_model, shuffled_train_df, my_feature,
my_label, epochs, batch_size,
validation_split)
plot_the_loss_curve(epochs, history["root_mean_squared_error"],
history["val_root_mean_squared_error"])
train_df.head(n=500)
|
[
"73516102+raelyz@users.noreply.github.com"
] |
73516102+raelyz@users.noreply.github.com
|
f8f840e459d0bc531124d1a8a90d4404fcea99e9
|
6a7f5fcb172f3605aa1cdc9066ce2208226b73a7
|
/IntelliDataSmart/groups/migrations/0011_remove_group_groupid.py
|
35f964b451d8c92837c2df5737917166c29e0ae4
|
[] |
no_license
|
svjt78/IntelliDataSmart
|
86473758e9fa2096c9e9b5ec26a336ec2586d19d
|
3e8a3194f2affac923e85b65de089370d3b89b6f
|
refs/heads/master
| 2022-11-13T04:45:30.481401 | 2020-07-07T02:52:13 | 2020-07-07T02:52:13 | 270,714,487 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 387 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2020-06-08 23:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('groups', '0010_group_groupid'),
]
operations = [
migrations.RemoveField(
model_name='group',
name='groupid',
),
]
|
[
"svjt78@gmail.com"
] |
svjt78@gmail.com
|
27cf5a45cc0007eca5ae08b58351524a5f0614f9
|
327ef08986ee981bf08471131d723c2aff011269
|
/Firmware-url-Detection/url_classfication/trainer_zgd_TFIDF.py
|
373f6b89c89a82f71429b536122c37a4312be5cd
|
[] |
no_license
|
Homewm/firmware-url-detection
|
0f9741e76987a1332ae8f8da34b12b021cd24e8a
|
55160820041b8c89491559b2224b5ecfb77a37ba
|
refs/heads/master
| 2020-06-13T21:21:13.222730 | 2019-07-02T05:05:14 | 2019-07-02T05:05:14 | 194,791,385 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,654 |
py
|
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
import warnings
warnings.filterwarnings("ignore", category=FutureWarning, module="sklearn", lineno=433)
def getTokens(input):
tokensBySlash = str(input.encode('utf-8')).split('/') #get tokens after splitting by slash
# print tokensBySlash
allTokens = []
for i in tokensBySlash:
tokens = str(i).split('-') #get tokens after splitting by dash
tokensByDot = []
for j in range(0,len(tokens)):
tempTokens = str(tokens[j]).split('_') #get tokens after splitting by dot
tokensByDot = tokensByDot + tempTokens
allTokens = allTokens + tokens + tokensByDot
allTokens = list(set(allTokens)) #remove redundant tokens
# print allTokens
list_comm = ['http:','https:','com','www']
for i in list_comm:
if i in allTokens:
allTokens.remove(i) #removing .com since it occurs a lot of times and it should not be included in our feature
return allTokens
def TL():
allurls = 'all_url_label.csv' # path to our all urls file
allurlscsv = pd.read_csv(allurls,',',error_bad_lines=False) #reading file
allurlsdata = pd.DataFrame(allurlscsv) #converting to a dataframe ###数据格式化
allurlsdata = np.array(allurlsdata) #converting it into an array
np.random.shuffle(allurlsdata) #shuffling ####随机排序
y = [d[1] for d in allurlsdata] #all labels ###所有的标签
corpus = [d[0] for d in allurlsdata] #all urls corresponding to a label (either good or bad) ###所有的url
vectorizer = TfidfVectorizer(tokenizer=getTokens) #get a vector for each url but use our customized tokenizer ###添加过滤规则
# count_vec = CountVectorizer(stop_words=None)
# X = count_vec.fit_transform(corpus) # get the X vector
X = vectorizer.fit_transform(corpus) #get the X vector
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) #split into training and testing set 80/20 ratio
lgs = LogisticRegression() #using logistic regression
lgs.fit(X_train, y_train)
print(lgs.score(X_test, y_test)) #pring the score. It comes out to be 98%
joblib.dump(lgs, 'tfidf.pkl') #模型保存
return vectorizer, lgs ###返回向量和模型
# return count_vec, lgs
if __name__ == "__main__":
vectorizer, lgs = TL()
|
[
"1663462979@qq.com"
] |
1663462979@qq.com
|
c084bf927837edbff9f1738b44a08d195446fec2
|
35fa8925e63f2b0f62ef6bfc1ff4e03cf42bd923
|
/tests/models/output/definitions/test_output_definition.py
|
dc3f2047a98052437876efa3ed6a308349469e6b
|
[
"Apache-2.0"
] |
permissive
|
TheLabbingProject/django_analyses
|
9e6f8b9bd2a84e8efe6dda6a15de6a3ecdf48ec1
|
5642579660fd09dde4a23bf02ec98a7ec264bceb
|
refs/heads/master
| 2023-02-26T07:53:53.142552 | 2023-02-17T08:12:17 | 2023-02-17T08:12:17 | 225,623,958 | 1 | 2 |
Apache-2.0
| 2023-02-17T08:12:18 | 2019-12-03T13:15:29 |
Python
|
UTF-8
|
Python
| false | false | 5,861 |
py
|
from django.core.exceptions import ValidationError
from django.test import TestCase
from django_analyses.models.input.definitions.file_input_definition import \
FileInputDefinition
from django_analyses.models.managers.output_definition import \
OutputDefinitionManager
from django_analyses.models.output.definitions.output_definition import \
OutputDefinition
from django_analyses.models.output.types.file_output import FileOutput
from tests.factories.output.definitions.output_definition import \
OutputDefinitionFactory
class OutputDefinitionTestCase(TestCase):
"""
Tests for the
:class:`~django_analyses.models.output.definitions.output_definition.OutputDefinition`
model.
"""
def setUp(self):
"""
Adds the created instances to the tests' contexts.
For more information see unittest's :meth:`~unittest.TestCase.setUp` method.
"""
self.output_definition = OutputDefinitionFactory()
##########
# Meta #
##########
def test_ordering(self):
"""
Test the `ordering`.
"""
self.assertTupleEqual(OutputDefinition._meta.ordering, ("key",))
def test_output_class_is_none(self):
"""
Tests that the *output_class* class attribute is set to None. This is
meant to be overriden by a
:class:`~django_analyses.models.output.output.Output` instance.
"""
self.assertIsNone(OutputDefinition.output_class)
def test_custom_manager_is_assigned(self):
"""
Tests that the manager is assigned to be the custom
:class:`~django_analyses.models.managers.output_definition.OutputDefinitionManager`
class.
"""
self.assertIsInstance(OutputDefinition.objects, OutputDefinitionManager)
##########
# Fields #
##########
# key
def test_key_max_length(self):
"""
Test the max_length of the *key* field.
"""
field = self.output_definition._meta.get_field("key")
self.assertEqual(field.max_length, 50)
def test_key_is_not_unique(self):
"""
Tests that the *key* field is not unique.
"""
field = self.output_definition._meta.get_field("key")
self.assertFalse(field.unique)
def test_key_blank_and_null(self):
"""
Tests that the *key* field may not be blank or null.
"""
field = self.output_definition._meta.get_field("key")
self.assertFalse(field.blank)
self.assertFalse(field.null)
# description
def test_description_blank_and_null(self):
"""
Tests that the *description* field may be blank or null.
"""
field = self.output_definition._meta.get_field("description")
self.assertTrue(field.blank)
self.assertTrue(field.null)
###########
# Methods #
###########
def test_string(self):
"""
Test the string output.
"""
value = str(self.output_definition)
expected = self.output_definition.key
self.assertEqual(value, expected)
def test_create_output_instance_raises_type_error(self):
"""
Tests that calling the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.create_output_instance`
raises a ValidationError. This is the expected behavior as long as the
output_class attribute is not defined (or ill defined).
"""
with self.assertRaises(ValidationError):
self.output_definition.create_output_instance()
def test_create_output_instance_with_non_model_value_raises_type_error(self):
"""
Tests that calling the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.create_output_instance`
with a non-model value raises a ValidationError.
"""
self.output_definition.output_class = str
with self.assertRaises(ValidationError):
self.output_definition.create_output_instance()
def test_create_output_instance_with_non_output_subclass_value_raises_type_error(
self,
):
"""
Tests that calling the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.create_output_instance`
with a non-:class:`~django_analyses.models.output.output.Output`
model subclass value raises a ValidationError.
"""
self.output_definition.output_class = FileInputDefinition
with self.assertRaises(ValidationError):
self.output_definition.check_output_class_definition()
def test_resetting_output_class_to_valid_output_subclass(self):
"""
Tests that the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.check_output_class_definition`
method does not raise a ValidationError when setting *output_class* to
some valid Output model subclass.
"""
self.output_definition.output_class = FileOutput
try:
self.output_definition.check_output_class_definition()
except ValidationError:
self.fail(
"Failed to set output_definition output_class to a valid Output subclass!"
)
def test_create_output_instance_reraises_uncaught_exception(self):
"""
Tests that calling the
:meth:`~django_analyses.models.output.definitions.output_definition.OutputDefinition.create_output_instance`
method when *output_class* is properly set but invalid kwargs still
raises an exception.
"""
self.output_definition.output_class = FileOutput
with self.assertRaises(ValueError):
self.output_definition.create_output_instance()
|
[
"z.baratz@gmail.com"
] |
z.baratz@gmail.com
|
eecfca54a2533f8145259bdd583c8d2fe95b525a
|
16f647884c16426dd485dcf485341217dc4df8ce
|
/test.py
|
16567b4b7954afcdf7b3b59f72b7c95ddc37394f
|
[] |
no_license
|
digitechVidya1/test
|
5cba6f57a9d50fbf7f1490ae931820ff40a6ba36
|
44482956133fd5d01f9249a38bdba856a358100e
|
refs/heads/master
| 2023-02-15T22:54:32.208698 | 2021-01-09T15:03:29 | 2021-01-09T15:03:29 | 327,050,571 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 451 |
py
|
"""
It provides some very useful features for Machine Learning projects like:
Noun phrase extraction
Part-of-speech tagging
Sentiment analysis
Classification
Tokenization
Word and phrase frequencies
"""
from textblob import TextBlob
words = ["deta","analyeis"]
correct_word = []
for i in words:
correct_word.append(TextBlob(i))
print("Wrong words: ",words)
print("Correct Words are: ")
for i in correct_word:
print(i.correct(),end=" ")
|
[
"digitechsocial@gmail.com"
] |
digitechsocial@gmail.com
|
af9bf4858b5793e1641a6963e2f7e683b1de3f12
|
1adc548f1865c0e4fcb3b3ff1049789fa0c72b12
|
/tests/observes/test_column_property.py
|
058383a5651f5433d39e0d4606bda3d52d6f5663
|
[] |
no_license
|
wujuguang/sqlalchemy-utils
|
ca826a81acdc70168e0b85820aaf8fe1604d6b0a
|
b6871980a412f2ebd16ec08be3127814b42ba64e
|
refs/heads/master
| 2021-01-12T20:59:48.692539 | 2016-01-15T08:06:48 | 2016-01-18T18:52:12 | 48,418,840 | 0 | 0 | null | 2015-12-22T08:05:48 | 2015-12-22T08:05:47 | null |
UTF-8
|
Python
| false | false | 1,582 |
py
|
import sqlalchemy as sa
from pytest import raises
from sqlalchemy_utils.observer import observes
from tests import TestCase
class TestObservesForColumn(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
price = sa.Column(sa.Integer)
@observes('price')
def product_price_observer(self, price):
self.price = price * 2
self.Product = Product
def test_simple_insert(self):
product = self.Product(price=100)
self.session.add(product)
self.session.flush()
assert product.price == 200
class TestObservesForColumnWithoutActualChanges(TestCase):
dns = 'postgres://postgres@localhost/sqlalchemy_utils_test'
def create_models(self):
class Product(self.Base):
__tablename__ = 'product'
id = sa.Column(sa.Integer, primary_key=True)
price = sa.Column(sa.Integer)
@observes('price')
def product_price_observer(self, price):
raise Exception('Trying to change price')
self.Product = Product
def test_only_notifies_observer_on_actual_changes(self):
product = self.Product()
self.session.add(product)
self.session.flush()
with raises(Exception) as e:
product.price = 500
self.session.commit()
assert str(e.value) == 'Trying to change price'
|
[
"konsta.vesterinen@gmail.com"
] |
konsta.vesterinen@gmail.com
|
1c067814153122d6afb026459cbc5c5d408445b1
|
5dfeb98f5fa0ff6717af22f034341e849a09abff
|
/Lesson_11/HW11_3.py
|
aff33f68b07f320f471cf24486dfd1db3940a03a
|
[] |
no_license
|
Krasniy23/Hillel_Krasnoshchok
|
2ceb85405fe25067f25ea992debd27d3f3c07a3f
|
66bb2e6c7c64f222201d01f1f5b712852b6d692f
|
refs/heads/master
| 2023-01-02T15:59:19.475009 | 2020-10-19T13:47:42 | 2020-10-19T13:47:42 | 291,098,178 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 160 |
py
|
st = input('enter string: ').split()
count = 0
for i in st:
if len(i) > count:
count = len(i)
word = i
print('the longest word is: ', word)
|
[
"54262807+Krasniy23@users.noreply.github.com"
] |
54262807+Krasniy23@users.noreply.github.com
|
7b4ace09f0a0ae5c2539329a63e377c94c153453
|
eed0308536458462df010ec02c7cd18276d9fb67
|
/curriculum/migrations/0007_merge_20180705_0012.py
|
dd42f20ebadc2df4d230b5853ffc3487320f767f
|
[] |
no_license
|
deep-ideas/taught-me-django
|
66e8a3ff711ef4141e6d821781a18f7021c50b35
|
3b7a9366f57009d7542fd2e1506c362dcc3b9065
|
refs/heads/master
| 2022-12-12T11:56:18.084214 | 2018-07-06T10:30:21 | 2018-07-06T10:30:21 | 138,504,265 | 0 | 0 | null | 2022-12-08T00:58:27 | 2018-06-24T18:02:59 |
JavaScript
|
UTF-8
|
Python
| false | false | 279 |
py
|
# Generated by Django 2.0.2 on 2018-07-05 00:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('curriculum', '0006_auto_20180703_0648'),
('curriculum', '0006_auto_20180629_2352'),
]
operations = [
]
|
[
"onyariantoprapanca@gmail.com"
] |
onyariantoprapanca@gmail.com
|
919218c42d0db4b6de64d6cb4572d3c055f3218f
|
8d135b4d19e4d08d813ae384fe05e79b3359f7da
|
/helpers.py
|
ee23b5e93e8f097d94991ae1d9d1cefdddc283f5
|
[] |
no_license
|
maxscheiber/projecteuler
|
429748abdf8fd6af19a16d933100db4c955a14b4
|
57a43c4094ab2a51ec303bd38d75478db3d15634
|
refs/heads/master
| 2020-04-11T09:46:41.519677 | 2013-07-28T20:19:36 | 2013-07-28T20:19:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 403 |
py
|
from math import log
def sieve(n):
primeBools = [True for i in range(n+2)] # primes[i] represents the number i
for i in range(2, n):
for j in range(2*i, n, i):
primeBools[j] = False
primes = []
for i in range(2, n):
if primeBools[i]:
primes.append(i)
return primes
def primeUpperBound(n):
return int(round(n * log(n) + n * log(log(n))))
|
[
"keasbeynumb@gmail.com"
] |
keasbeynumb@gmail.com
|
69e05d4c935c7c51b0514fa07f636691f6091bbd
|
02fbc6a1358efee2bb9f2bad1375764d6977de73
|
/tensorflow_study3_placeholder.py
|
d0e3c3dce17d694dbf0c46fb5054ad09ae749743
|
[
"Apache-2.0"
] |
permissive
|
TerryBryant/tensorflow
|
6828510ad52c8723b0efbd9fc022301db64cfcf0
|
6e4727f861091d61dae00f4a79918e85750016d1
|
refs/heads/master
| 2021-05-07T01:03:07.953706 | 2017-11-15T00:55:50 | 2017-11-15T00:55:50 | 110,197,393 | 0 | 0 | null | 2017-11-10T03:23:06 | 2017-11-10T03:23:06 | null |
UTF-8
|
Python
| false | false | 311 |
py
|
import tensorflow as tf
#input1 = tf.placeholder(tf.float32, [2, 2])
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1, input2)
with tf.Session() as sess:
print(sess.run(output, feed_dict={input1:[7.], input2:[2.]})) #运行的时候再指定值
|
[
"noreply@github.com"
] |
TerryBryant.noreply@github.com
|
c2e9ac93f8629983cb977f8a65caf9dee5bfceaa
|
80760d4c8a6b2c45b4b529bdd98d33c9c5509438
|
/Practice/atcoder/ABC/054/src/c2.py
|
007ef8de5fd091ec21679eb96f94eb5ea1f9c5f2
|
[] |
no_license
|
prrn-pg/Shojin
|
f1f46f8df932df0be90082b475ec02b52ddd882e
|
3a20f1122d8bf7d95d9ecd205a62fc36168953d2
|
refs/heads/master
| 2022-12-30T22:26:41.020473 | 2020-10-17T13:53:52 | 2020-10-17T13:53:52 | 93,830,182 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 687 |
py
|
# 全域木?っていうんだっけ?でもコストは関係ないか
# 適当に隣接リストでもってしてDFSする
N, M = map(int, input().split())
Neighbor_list = [[] for _ in range(N)]
for _ in range(M):
s, t = map(int, input().split())
Neighbor_list[s-1].append(t-1)
Neighbor_list[t-1].append(s-1)
def dfs(cur, path):
if len(path) == N:
return 1
else:
ret = 0
for neighbor in Neighbor_list[cur]:
if neighbor not in path:
next_list = path[:]
next_list.append(neighbor)
ret += dfs(neighbor, next_list)
return ret
print(dfs(0, [0]))
|
[
"hjod1172@yahoo.co.jp"
] |
hjod1172@yahoo.co.jp
|
cd53fdab752cc6628b086d089002c796748479b8
|
e09bbdc53af6be9281795189f26f6e59997abf68
|
/tests/test_forex.py
|
eeb783520060d238446a4a97fba67b6f1d7c96a9
|
[
"Apache-2.0"
] |
permissive
|
jag787/ppQuanTrade
|
620ce72c7875bb730708c48ae0481376b43e501b
|
9a6da7522d281da130a2c459e2e614a75daa543d
|
refs/heads/master
| 2021-01-11T13:53:40.583710 | 2013-12-20T10:43:58 | 2013-12-20T10:43:58 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,100 |
py
|
#
# Copyright 2013 Xavier Bruhiere
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Tests for the forex datasource
'''
from unittest import TestCase
from nose.tools import timed
from neuronquant.data.forex import ConnectTrueFX
#from neuronquant.utils.datautils import FX_PAIRS
DEFAULT_TIMEOUT = 15
EXTENDED_TIMEOUT = 90
class TestForex(TestCase):
'''
Forex access through TrueFX provider
!! Beware that truefx server will return empty array
if currencies were not updated since last call
'''
def setUp(self):
pass
def tearDown(self):
pass
def test_connection_credentials(self):
'''
Use explicit TrueFx username and password account for
authentification
'''
client = ConnectTrueFX(user='Gusabi', password='quantrade')
# If succeeded, an authentification for further use was returned by
# truefx server
assert client
assert client._code
assert client._code.find('Gusabi') == 0
def test_connection_default_auth_file(self):
'''
If no credentials, the constructor tries to find it
reading config/default.json
'''
# It's default behavior, nothing to specifie
client = ConnectTrueFX()
assert client
assert client._code
assert client._code.find('Gusabi') == 0
def test_connection_custom_auth_file(self):
'''
If no credentials, the constructor tries to find it
reading given json file
'''
client = ConnectTrueFX(auth_file='plugins.json')
assert client
assert client._code
assert client._code.find('Gusabi') == 0
def test_connection_without_auth(self):
''' TrueFX API can be used without credentials in a limited mode '''
#FIXME Fails to retrieve limited values
client = ConnectTrueFX(user=None, password=None, auth_file='fake.json')
assert client._code == 'not authorized'
def test_connection_with_pairs(self):
pairs = ['EUR/USD', 'USD/JPY']
client = ConnectTrueFX(pairs=pairs)
### Default call use pairs given during connection
dataframe = client.QueryTrueFX()
for p in pairs:
assert p in dataframe.columns
@timed(DEFAULT_TIMEOUT)
def test_query_default(self):
pass
def test_query_format(self):
pass
def test_query_pairs(self):
pass
def test_response_formating(self):
pass
def test_detect_active(self):
pass
def test_standalone_request(self):
pass
|
[
"xavier.bruhiere@gmail.com"
] |
xavier.bruhiere@gmail.com
|
6af8eb5d346add6ed2018a0a659d580796d4f1cd
|
963cd89184cef2edb8c78dc349c81db7bdc32642
|
/Plots/Olympicbarchart.py
|
516bf1ea4922c2c4d313626672d80e3baa884fec
|
[] |
no_license
|
dpmessen/PythonDash
|
d458b6495dad055ca43bf4365157d52de1781e49
|
0d1633efe85600fd2b75b8ef638a0795a932fdf1
|
refs/heads/master
| 2021-05-18T22:16:16.762829 | 2020-04-02T20:44:05 | 2020-04-02T20:44:05 | 251,449,616 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 609 |
py
|
import pandas as pd
import plotly.offline as pyo
import plotly.graph_objs as go
# Load CSV file from Datasets folder
df = pd.read_csv('../Datasets/Olympic2016Rio.csv')
# Sorting values and select first 20 states
df = df.sort_values(by=['Total'], ascending=[False]).head(20)
# Preparing data
data = [go.Bar(x=df['NOC'], y=df['Total'])]
# Preparing layout
layout = go.Layout(title='Top 20 countries', xaxis_title="Countries",
yaxis_title="Total Medals")
# Plot the figure and saving in a html file
fig = go.Figure(data=data, layout=layout)
pyo.plot(fig, filename='Olympicbarchart.html')
|
[
"noreply@github.com"
] |
dpmessen.noreply@github.com
|
838a1a224339fe920c49a50a2b316a3903af131c
|
fca7958875d4650c6daeec7049fef02139db9eb1
|
/mi/dataset/parser/test/test_parad_k_stc_imodem.py
|
e220343f8e3a8ebed36ef5cb7da6e5a3da97baf2
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
oceanobservatories/mi-dataset
|
36f08a076b24c40f91abd0a97e47a72ec85fc5e6
|
93aa7289f5f4788727f3b32f11d62f30ad88fd2f
|
refs/heads/master
| 2020-04-04T04:25:22.372472 | 2017-02-24T17:06:23 | 2017-02-24T17:06:23 | 24,067,634 | 1 | 9 | null | 2016-06-29T23:24:46 | 2014-09-15T18:15:01 |
Python
|
UTF-8
|
Python
| false | false | 26,149 |
py
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_parad_k_stc_imodem
@file marine-integrations/mi/dataset/parser/test/test_parad_k_stc_imodem.py
@author Mike Nicoletti, Steve Myerson (recovered)
@brief Test code for a Parad_k_stc_imodem data parser
"""
import struct, ntplib
from StringIO import StringIO
from nose.plugins.attrib import attr
from mi.core.log import get_logger ; log = get_logger()
from mi.core.exceptions import SampleException
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.parad_k_stc_imodem import \
Parad_k_stc_imodemParser,\
Parad_k_stc_imodemRecoveredParser, \
Parad_k_stc_imodemDataParticle, \
Parad_k_stc_imodemRecoveredDataParticle
from mi.dataset.parser.WFP_E_file_common import StateKey
@attr('UNIT', group='mi')
class Parad_k_stc_imodemParserUnitTestCase(ParserUnitTestCase):
"""
Parad_k_stc_imodem Parser unit test suite
"""
TEST_DATA_SHORT = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac" \
"\x1d\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00" \
"\x00\x00\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^" \
"\x00OR\x9d\xac*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O"
TEST_DATA = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac\x1d\x00" \
"\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00\x00" \
"\x00\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^" \
"\x00OR\x9d\xac*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00OR\x9d\xac/C\xb8COA6\xde" \
"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9d\x00p\x00QR\x9d\xac3C\x98\xe5TA733\x00\x00\x00\x00\x00\x00\x00\x00" \
"\x00\xa4\x00u\x00OR\x9d\xac8C\x9566A7!-\x00\x00\x00\x00\x00\x00\x00\x00\x00\x9a\x00o\x00OR\x9d\xac?C\xa1\xd7\xc3" \
"A6\xa6LB\x8bG\xae\x00\x00\x00\x00\x00\xb6\x00v\x00PR\x9d\xacECsS\xfeA7e\xfeB\x88\x00\x00\x00\x00\x00\x00\x00" \
"\x98\x00s\x00QR\x9d\xacKC\x89\x17\x8cA6\xe2\xecB\x84\x99\x9a\x00\x00\x00\x00\x00\xa4\x00\x81\x00PR\x9d\xacQC}\n" \
"\xbfA7\x00hB\x81G\xae\x00\x00\x00\x00\x00\xa2\x00|\x00NR\x9d\xacWCyW\xc7A6\x97\x8dB{\xe1H\x00\x00\x00\x00\x00\x9a" \
"\x00m\x00NR\x9d\xac]C\x8c!#A6\x9f\xbeBuQ\xec\x00\x00\x00\x00\x00\x97\x00s\x00QR\x9d\xaccC\x84!9A6h\nBn\x8f\\\x00" \
"\x00\x00\x00\x00\x9f\x00v\x00NR\x9d\xaciCE\xa5UA6a|Bh=q\x00\x00\x00\x00\x00\x97\x00l\x00PR\x9d\xacoC\xa5\xa5\xad" \
"A5\x94\xafBa\\)\x00\x00\x00\x00\x00\x9b\x00n\x00RR\x9d\xacuC\\\r\x08A6\x14{B[\n=\x00\x00\x00\x00\x00\x9a\x00s\x00" \
"OR\x9d\xac{C\xa3\x0b\xb8A5F\nBT33\x00\x00\x00\x00\x00\x98\x00q\x00NR\x9d\xac\x81CO\xc0+A5\xd7\xdcBM\xd7\n\x00\x00" \
"\x00\x00\x00\x97\x00n\x00PR\x9d\xac\x87Cxp\xd0A5#\xa3BGG\xae\x00\x00\x00\x00\x00\x9b\x00n\x00PR\x9d\xac\x8dC\x84" \
"\xdd\xd9A5X\x10B@\xae\x14\x00\x00\x00\x00\x00\xa5\x00v\x00OR\x9d\xac\x93C\xa0\x85\x01A4j\x7fB:\x14{\x00\x00\x00\x00" \
"\x00\x9c\x00t\x00QR\x9d\xac\x99Cq\xa4\xdbA5:\x92B3\xc2\x8f\x00\x00\x00\x00\x00\x9c\x00x\x00PR\x9d\xac\x9fCg\x07#A5" \
"\x18+B-\x00\x00\x00\x00\x00\x00\x00\x9e\x00m\x00QR\x9d\xac\xa5C\x9bw\x96A4FtB&z\xe1\x00\x00\x00\x00\x00\xd7\x00s" \
"\x00OR\x9d\xac\xabCmP5A4\x9dJB\x1f\xd7\n\x00\x00\x00\x00\x00\x99\x00s\x00PR\x9d\xac\xb1C\xad\x960A3\x8a\tB\x19" \
"(\xf6\x00\x00\x00\x00\x00\x95\x00n\x00OR\x9d\xac\xb7C\x0c\xce]A5\x0f\xfaB\x12\xe1H\x00\x00\x00\x00\x00\x9c\x00u" \
"\x00PR\x9d\xac\xbdC\xa1\xeb\x02A3Z\x85B\x0c=q\x00\x00\x00\x00\x00\x95\x00u\x00OR\x9d\xac\xc3C$\xafOA4\xa23B\x05" \
"\xe1H\x00\x00\x00\x00\x00\x99\x00r\x00PR\x9d\xac\xc9C\xae\xddeA3\x0f(A\xfe(\xf6\x00\x00\x00\x00\x00\x9a\x00o\x00O" \
"R\x9d\xac\xcfA\xfa\xb2:A5\x0b\x0fA\xf2\x8f\\\x00\x00\x00\x00\x00\xaf\x00m\x00P\xff\xff\xff\xff\x00\x00\x00\rR\x9d" \
"\xac\xd4R\x9d\xadQ"
# all flags set to zero
TEST_DATA_BAD_FLAGS = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac\x1d" \
"\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t\xf2\xf7A9A!\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00OR\x9d\xac" \
"*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O"
# took 5 bytes out of second engineering sample
TEST_DATA_BAD_ENG = "\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00R\x9d\xab\xa2R\x9d\xac\x19R\x9d\xac\x1d" \
"\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00NR\x9d\xac!C\t!\x00\x00\x00\x00\x00" \
"\x00\x00\x00\x00\xf2\x00c\x00OR\x9d\xac&C\xbc\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00OR\x9d\xac" \
"*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O"
# Has a NaN for par_value
TEST_DATA_NAN = \
'\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x01\x00\x00\x00\x00' \
'\x52\x9D\xAB\xA2\x52\x9D\xAC\x19' \
'\x52\x9D\xAC\x1D' \
'\x00\x00\x00\x00\x41\x3A\x36\xE3\x00\x00\x00\x00' \
'\xFF\xC0\x00\x00' \
'\x01\x03\x00\x68\x00\x4E'
def create_rec_parser(self, new_state, file_handle):
"""
This function creates a Parad_k_stc parser for recovered data.
"""
if new_state is None:
new_state = self.state
parser = Parad_k_stc_imodemRecoveredParser(self.rec_config, new_state, file_handle,
self.state_callback, self.pub_callback)
return parser
def state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.file_ingested = file_ingested
self.state_callback_value = state
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.publish_callback_value = pub
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE:
'mi.dataset.parser.parad_k_stc_imodem',
DataSetDriverConfigKeys.PARTICLE_CLASS:
['Parad_k_stc_imodem_statusParserDataParticle',
'Parad_k_stc_imodem_startParserDataParticle',
'Parad_k_stc_imodem_engineeringParserDataParticle']
}
self.rec_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE:
'mi.dataset.parser.parad_k_stc_imodem',
DataSetDriverConfigKeys.PARTICLE_CLASS:
['Parad_k_stc_imodemRecoveredDataParticle']
}
self.start_state = {StateKey.POSITION: 0}
# Define test data particles and their associated timestamps which will be
# compared with returned results
self.timestamp1_eng = self.timestamp_to_ntp('R\x9d\xac\x1d')
log.debug("Converted timestamp #1: %s",self.timestamp1_eng)
self.particle_a_eng = Parad_k_stc_imodemDataParticle(b'R\x9d\xac\x1d' \
'\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00N',
internal_timestamp=self.timestamp1_eng)
self.timestamp2_eng = self.timestamp_to_ntp('R\x9d\xac!')
self.particle_b_eng = Parad_k_stc_imodemDataParticle(b'R\x9d\xac!C\t' \
'\xf2\xf7A9A!\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00c\x00O',
internal_timestamp=self.timestamp2_eng)
self.timestamp3_eng = self.timestamp_to_ntp('R\x9d\xac&')
self.particle_c_eng = Parad_k_stc_imodemDataParticle(b"R\x9d\xac&C\xbc" \
"\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00O",
internal_timestamp=self.timestamp3_eng)
self.timestamp4_eng = self.timestamp_to_ntp('R\x9d\xac*')
self.particle_d_eng = Parad_k_stc_imodemDataParticle(b'R\x9d\xac' \
'*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O',
internal_timestamp=self.timestamp4_eng)
self.timestamp_last_eng = self.timestamp_to_ntp('R\x9d\xac\xcf')
self.particle_last_eng = Parad_k_stc_imodemDataParticle(b'R\x9d\xac\xcfA' \
'\xfa\xb2:A5\x0b\x0fA\xf2\x8f\\\x00\x00\x00\x00\x00\xaf\x00m\x00P',
internal_timestamp=self.timestamp_last_eng)
# Recovered expected particles
self.particle_a_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b'R\x9d\xac\x1d' \
'\x00\x00\x00\x00A:6\xe3\x00\x00\x00\x00\x00\x00\x00\x00\x01\x03\x00h\x00N',
internal_timestamp=self.timestamp1_eng)
self.particle_b_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b'R\x9d\xac!C\t' \
'\xf2\xf7A9A!\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf2\x00c\x00O',
internal_timestamp=self.timestamp2_eng)
self.particle_c_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b"R\x9d\xac&C\xbc" \
"\x9f\xa7A7'\xbb\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2\x00^\x00O",
internal_timestamp=self.timestamp3_eng)
self.particle_d_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b'R\x9d\xac' \
'*C\xc5\xad\x08A6\xd5\xd0\x00\x00\x00\x00\x00\x00\x00\x00\x00\xb4\x00n\x00O',
internal_timestamp=self.timestamp4_eng)
self.particle_last_eng_rec = Parad_k_stc_imodemRecoveredDataParticle(b'R\x9d\xac\xcfA' \
'\xfa\xb2:A5\x0b\x0fA\xf2\x8f\\\x00\x00\x00\x00\x00\xaf\x00m\x00P',
internal_timestamp=self.timestamp_last_eng)
# uncomment the following to generate particles in yml format for driver testing results files
#self.particle_to_yml(self.particle_a_eng)
#self.particle_to_yml(self.particle_b_eng)
#self.particle_to_yml(self.particle_c_eng)
#self.particle_to_yml(self.particle_d_eng)
self.file_ingested = False
self.state_callback_value = None
self.publish_callback_value = None
self.state = None
def particle_to_yml(self, particle):
"""
This is added as a testing helper, not actually as part of the parser tests. Since the same particles
will be used for the driver test it is helpful to write them to .yml in the same form they need in the
results.yml files here.
"""
particle_dict = particle.generate_dict()
# open write append, if you want to start from scratch manually delete this file
fid = open('particle.yml', 'a')
fid.write(' - _index: 0\n')
fid.write(' internal_timestamp: %f\n' % particle_dict.get('internal_timestamp'))
fid.write(' particle_object: %s\n' % particle.__class__.__name__)
fid.write(' particle_type: %s\n' % particle_dict.get('stream_name'))
for val in particle_dict.get('values'):
if isinstance(val.get('value'), float):
fid.write(' %s: %16.16f\n' % (val.get('value_id'), val.get('value')))
else:
fid.write(' %s: %s\n' % (val.get('value_id'), val.get('value')))
fid.close()
def test_simple(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT) #turn into a data stream to look like file ingestion
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback) # last one is the link to the data source
# next get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng, True)
# no data left, dont move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_d_eng)
def test_simple_recovered(self):
"""
Read recovered test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT) #turn into a data stream to look like file ingestion
self.parser = self.create_rec_parser(None, stream_handle)
# next get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_rec, True)
# no data left, don't move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_d_eng_rec)
def timestamp_to_ntp(self, hex_timestamp):
fields = struct.unpack('>I', hex_timestamp)
timestamp = int(fields[0])
return ntplib.system_to_ntp_time(timestamp)
def assert_result(self, result, position, particle, ingested):
self.assertEqual(result, [particle])
self.assertEqual(self.file_ingested, ingested)
self.assertEqual(self.parser._state[StateKey.POSITION], position)
self.assertEqual(self.state_callback_value[StateKey.POSITION], position)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def test_get_many(self):
"""
Read test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# start with the start time record
result = self.parser.get_records(4)
self.assertEqual(result, [self.particle_a_eng, self.particle_b_eng, self.particle_c_eng, self.particle_d_eng])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assertEqual(self.publish_callback_value[0], self.particle_a_eng)
self.assertEqual(self.publish_callback_value[1], self.particle_b_eng)
self.assertEqual(self.publish_callback_value[2], self.particle_c_eng)
self.assertEqual(self.publish_callback_value[3], self.particle_d_eng)
self.assertEqual(self.file_ingested, True)
def test_get_many_recovered(self):
"""
Read recovered test data and pull out multiple data particles at one time.
Assert that the results are those we expected.
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = self.create_rec_parser(None, stream_handle)
# start with the start time record
result = self.parser.get_records(4)
self.assertEqual(result, [self.particle_a_eng_rec, self.particle_b_eng_rec,
self.particle_c_eng_rec, self.particle_d_eng_rec])
self.assertEqual(self.parser._state[StateKey.POSITION], 128)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 128)
self.assertEqual(self.publish_callback_value[0], self.particle_a_eng_rec)
self.assertEqual(self.publish_callback_value[1], self.particle_b_eng_rec)
self.assertEqual(self.publish_callback_value[2], self.particle_c_eng_rec)
self.assertEqual(self.publish_callback_value[3], self.particle_d_eng_rec)
self.assertEqual(self.file_ingested, True)
def test_long_stream(self):
"""
Test a long stream of data
"""
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(32)
self.assertEqual(result[0], self.particle_a_eng)
self.assertEqual(result[-1], self.particle_last_eng)
self.assertEqual(self.parser._state[StateKey.POSITION], 856)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 856)
self.assertEqual(self.publish_callback_value[-1], self.particle_last_eng)
def test_long_stream_recovered(self):
"""
Test a long stream of recovered data
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA)
self.parser = self.create_rec_parser(None, stream_handle)
result = self.parser.get_records(32)
self.assertEqual(result[0], self.particle_a_eng_rec)
self.assertEqual(result[-1], self.particle_last_eng_rec)
self.assertEqual(self.parser._state[StateKey.POSITION], 856)
self.assertEqual(self.state_callback_value[StateKey.POSITION], 856)
self.assertEqual(self.publish_callback_value[-1], self.particle_last_eng_rec)
def test_after_header(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:24}
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = Parad_k_stc_imodemParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
# get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng, True)
def test_after_header_recovered(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:24}
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = self.create_rec_parser(new_state, stream_handle)
# get engineering records
result = self.parser.get_records(1)
self.assert_result(result, 50, self.particle_a_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 76, self.particle_b_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_rec, True)
def test_mid_state_start(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:76}
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = Parad_k_stc_imodemParser(self.config, new_state, self.stream_handle,
self.state_callback, self.pub_callback)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng, True)
def test_mid_state_start_recovered(self):
"""
Test starting the parser in a state in the middle of processing
"""
new_state = {StateKey.POSITION:76}
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = self.create_rec_parser(new_state, stream_handle)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_rec, True)
def test_set_state(self):
"""
Test changing to a new state after initializing the parser and
reading data, as if new data has been found and the state has
changed
"""
new_state = {StateKey.POSITION:76}
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# set the new state, the essentially skips engineering a and b
self.parser.set_state(new_state)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng, True)
def test_set_state_recovered(self):
"""
Test changing to a new state after initializing the parser and
reading data, as if new data has been found and the state has
changed
"""
new_state = {StateKey.POSITION:76}
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_SHORT)
self.parser = self.create_rec_parser(None, stream_handle)
# set the new state, the essentially skips engineering a and b
self.parser.set_state(new_state)
result = self.parser.get_records(1)
self.assert_result(result, 102, self.particle_c_eng_rec, False)
result = self.parser.get_records(1)
self.assert_result(result, 128, self.particle_d_eng_rec, True)
def test_bad_flags(self):
"""
test that we don't parse any records when the flags are not what we expect
"""
with self.assertRaises(SampleException):
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_BAD_FLAGS)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
def test_bad_flags_recovered(self):
"""
test that we don't parse any records when the flags are not what we expect
"""
with self.assertRaises(SampleException):
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_BAD_FLAGS)
self.parser = self.create_rec_parser(None, stream_handle)
def test_bad_data(self):
"""
Ensure that missing data causes us to miss records
TODO: This test should be improved if we come up with a more accurate regex for the data sample
"""
self.stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_BAD_ENG)
self.parser = Parad_k_stc_imodemParser(self.config, self.start_state, self.stream_handle,
self.state_callback, self.pub_callback)
# next get engineering records
result = self.parser.get_records(4)
if len(result) == 4:
self.fail("We got 4 records, the bad data should only make 3")
def test_bad_data_recovered(self):
"""
Ensure that missing data causes us to miss records
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_BAD_ENG)
self.parser = self.create_rec_parser(None, stream_handle)
# next get engineering records
result = self.parser.get_records(4)
if len(result) == 4:
self.fail("We got 4 records, the bad data should only make 3")
def test_nan(self):
"""
Verify that an exception occurs when the par_value has a value of NaN.
"""
stream_handle = StringIO(Parad_k_stc_imodemParserUnitTestCase.TEST_DATA_NAN)
self.parser = self.create_rec_parser(None, stream_handle)
with self.assertRaises(SampleException):
self.parser.get_records(1)
|
[
"petercable@gmail.com"
] |
petercable@gmail.com
|
e41c3be1ab5cd471a4b71712a1195862fd907064
|
01b991bdae435e0651c73e2149834f1b9abf22f5
|
/ros_test/src/ball_follower/src/drive_wheels.py
|
60bff2a209da822138a537e6e969ac365fc1567c
|
[] |
no_license
|
virati/turtlebot_journeys
|
85b7f18787dad4b794d098bf3c2316107ecff81b
|
ae84a8078381747388aa59cb412f2d75c5428c29
|
refs/heads/master
| 2020-03-27T08:36:39.369793 | 2019-12-16T05:33:06 | 2019-12-16T05:33:06 | 146,270,516 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,605 |
py
|
#!/usr/bin/env python
#Vineet Tiruvadi
#Lab 2 for Intro Robotics
import rospy
from geometry_msgs.msg import Twist, Point
import numpy as np
import sys, select, termios, tty
class Driver:
sendVel = np.array([0,0,0])
def __init__(self):
self.ballSub = rospy.Subscriber("/ball_loc",Point,self.mover)
self.VelPub = rospy.Publisher("/cmd_vel",Twist,queue_size=5)
def mover(self,inPoint):
#INPUT HERE IS A POINT
#inCoord = np.array(inPoint)
#Check if the point we're looking for is normalized
#assert inCoord.any() <= 1
twist=Twist()
inX = inPoint.x
print(inX)
if inX <= 1:
#Center to the screen
inX = inX - 0.5
#since we're JUST TURNING FOR NOW, we'll focus on the x coord
targ = inX
print('X ball: ' + str(targ))
t_av = 0
c_av = 0
#set target_angular_vel; still just velocity
#if we want to go to the ball:
t_av -= np.sign(targ) * 0.1
#if we want to be scared of the ball: but can also collapse into single var and multily above
#t_av += np.sign(targ) * 0.1
#is target Ang Vel > control ang vel?
c_av = t_av
else:
c_av = 0
twist.linear.x = 0; twist.linear.y = 0; twist.linear.z = 0;
twist.angular.x = 0;twist.angular.y;twist.angular.z = c_av;
print('Publishing ' + str(c_av))
self.pub_vel(twist)
def pub_vel(self,twist):
self.VelPub.publish(twist)
if __name__== "__main__":
try:
rospy.init_node('WheelDriver')
mainDrv = Driver()
rate = rospy.Rate(30)
while not rospy.is_shutdown():
rate.sleep()
except rospy.ROSInterruptException:
pass
|
[
"vtiruva@emory.edu"
] |
vtiruva@emory.edu
|
d33acdb878b87e27c9f0a589c4e886de95aab2ed
|
15f2f06a1261d9981d57fcf75db1ae1f456cbbe4
|
/blogProject/blogProject/settings.py
|
6498019bb31952909a949585aa9e5c9077aeb122
|
[] |
no_license
|
rigvedpatki/django-basics-to-advance
|
f4ffa372802d35e3a76057b189d9ce985a63ff24
|
d6d7864c34fa10e03668951384d6fbcb4e355d29
|
refs/heads/master
| 2021-04-15T10:11:06.399153 | 2018-05-04T11:31:41 | 2018-05-04T11:31:41 | 126,586,078 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,103 |
py
|
"""
Django settings for blogProject project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '&p)4xfchm4e5#pd2#1g5yo*v6dc4e2+zt++ll823d&*jedhz88'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'blogProject.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'blogProject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
|
[
"rigved.patki@gmail.com"
] |
rigved.patki@gmail.com
|
857f096c6a217514c93fef42ae3a2f36f97fa43d
|
84f5e405e3a8fd902b7d67c692c42ff966e1bdaf
|
/venv/Lib/site-packages/pandas/tests/resample/test_resampler_grouper.py
|
155d704ca64b0cf2dfc15eecf8812a41509db34a
|
[] |
no_license
|
Davey1993/FYP
|
da976feab1c524fac1db609fa230d000b35671e0
|
39b4a21085329528942273efec030441ff8f3230
|
refs/heads/master
| 2023-04-11T05:52:20.388934 | 2021-04-21T10:24:11 | 2021-04-21T10:24:11 | 298,893,919 | 1 | 0 | null | 2021-03-19T22:21:05 | 2020-09-26T20:24:36 |
Python
|
UTF-8
|
Python
| false | false | 11,248 |
py
|
from textwrap import dedent
import numpy as np
import pytest
from pandas.util._test_decorators import async_mark
import pandas as pd
from pandas import DataFrame, Series, Timestamp, compat
import pandas._testing as tm
from pandas.core.indexes.datetimes import date_range
test_frame = DataFrame(
{"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)},
index=date_range("1/1/2000", freq="s", periods=40),
)
@async_mark()
async def test_tab_complete_ipython6_warning(ip):
from IPython.core.completer import provisionalcompleter
code = dedent(
"""\
import pandas._testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
"""
)
await ip.run_code(code)
# TODO: remove it when Ipython updates
# GH 33567, jedi version raises Deprecation warning in Ipython
import jedi
if jedi.__version__ < "0.17.0":
warning = tm.assert_produces_warning(None)
else:
warning = tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False)
with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("rs.", 1))
def test_deferred_with_groupby():
# GH 12486
# support deferred resample ops with groupby
data = [
["2010-01-01", "A", 2],
["2010-01-02", "A", 3],
["2010-01-05", "A", 8],
["2010-01-10", "A", 7],
["2010-01-13", "A", 3],
["2010-01-01", "B", 5],
["2010-01-03", "B", 2],
["2010-01-04", "B", 1],
["2010-01-11", "B", 7],
["2010-01-14", "B", 3],
]
df = DataFrame(data, columns=["date", "id", "score"])
df.date = pd.to_datetime(df.date)
def f(x):
return x.set_index("date").resample("D").asfreq()
expected = df.groupby("id").apply(f)
result = df.set_index("date").groupby("id").resample("D").asfreq()
tm.assert_frame_equal(result, expected)
df = DataFrame(
{
"date": pd.date_range(start="2016-01-01", periods=4, freq="W"),
"group": [1, 1, 2, 2],
"val": [5, 6, 7, 8],
}
).set_index("date")
def f(x):
return x.resample("1D").ffill()
expected = df.groupby("group").apply(f)
result = df.groupby("group").resample("1D").ffill()
tm.assert_frame_equal(result, expected)
def test_getitem():
g = test_frame.groupby("A")
expected = g.B.apply(lambda x: x.resample("2s").mean())
result = g.resample("2s").B.mean()
tm.assert_series_equal(result, expected)
result = g.B.resample("2s").mean()
tm.assert_series_equal(result, expected)
result = g.resample("2s").mean().B
tm.assert_series_equal(result, expected)
def test_getitem_multiple():
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{"id": 1, "buyer": "A"}, {"id": 2, "buyer": "B"}]
df = DataFrame(data, index=pd.date_range("2016-01-01", periods=2))
r = df.groupby("id").resample("1D")
result = r["buyer"].count()
expected = Series(
[1, 1],
index=pd.MultiIndex.from_tuples(
[(1, Timestamp("2016-01-01")), (2, Timestamp("2016-01-02"))],
names=["id", None],
),
name="buyer",
)
tm.assert_series_equal(result, expected)
result = r["buyer"].count()
tm.assert_series_equal(result, expected)
def test_groupby_resample_on_api_with_getitem():
# GH 17813
df = pd.DataFrame(
{"id": list("aabbb"), "date": pd.date_range("1-1-2016", periods=5), "data": 1}
)
exp = df.set_index("date").groupby("id").resample("2D")["data"].sum()
result = df.groupby("id").resample("2D", on="date")["data"].sum()
tm.assert_series_equal(result, exp)
def test_groupby_with_origin():
# GH 31809
freq = "1399min" # prime number that is smaller than 24h
start, end = "1/1/2000 00:00:00", "1/31/2000 00:00"
middle = "1/15/2000 00:00:00"
rng = pd.date_range(start, end, freq="1231min") # prime number
ts = pd.Series(np.random.randn(len(rng)), index=rng)
ts2 = ts[middle:end]
# proves that grouper without a fixed origin does not work
# when dealing with unusual frequencies
simple_grouper = pd.Grouper(freq=freq)
count_ts = ts.groupby(simple_grouper).agg("count")
count_ts = count_ts[middle:end]
count_ts2 = ts2.groupby(simple_grouper).agg("count")
with pytest.raises(AssertionError):
tm.assert_index_equal(count_ts.index, count_ts2.index)
# test origin on 1970-01-01 00:00:00
origin = pd.Timestamp(0)
adjusted_grouper = pd.Grouper(freq=freq, origin=origin)
adjusted_count_ts = ts.groupby(adjusted_grouper).agg("count")
adjusted_count_ts = adjusted_count_ts[middle:end]
adjusted_count_ts2 = ts2.groupby(adjusted_grouper).agg("count")
tm.assert_series_equal(adjusted_count_ts, adjusted_count_ts2)
# test origin on 2049-10-18 20:00:00
origin_future = pd.Timestamp(0) + pd.Timedelta("1399min") * 30_000
adjusted_grouper2 = pd.Grouper(freq=freq, origin=origin_future)
adjusted2_count_ts = ts.groupby(adjusted_grouper2).agg("count")
adjusted2_count_ts = adjusted2_count_ts[middle:end]
adjusted2_count_ts2 = ts2.groupby(adjusted_grouper2).agg("count")
tm.assert_series_equal(adjusted2_count_ts, adjusted2_count_ts2)
# both grouper use an adjusted timestamp that is a multiple of 1399 min
# they should be equals even if the adjusted_timestamp is in the future
tm.assert_series_equal(adjusted_count_ts, adjusted2_count_ts2)
def test_nearest():
# GH 17496
# Resample nearest
index = pd.date_range("1/1/2000", periods=3, freq="T")
result = Series(range(3), index=index).resample("20s").nearest()
expected = Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
[
"2000-01-01 00:00:00",
"2000-01-01 00:00:20",
"2000-01-01 00:00:40",
"2000-01-01 00:01:00",
"2000-01-01 00:01:20",
"2000-01-01 00:01:40",
"2000-01-01 00:02:00",
],
dtype="datetime64[ns]",
freq="20S",
),
)
tm.assert_series_equal(result, expected)
def test_methods():
g = test_frame.groupby("A")
r = g.resample("2s")
for f in ["first", "last", "median", "sem", "sum", "mean", "min", "max"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
for f in ["size"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_series_equal(result, expected)
for f in ["count"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
# series only
for f in ["nunique"]:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_series_equal(result, expected)
for f in ["nearest", "backfill", "ffill", "asfreq"]:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample("2s"), f)())
tm.assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample("2s").ohlc())
tm.assert_frame_equal(result, expected)
for f in ["std", "var"]:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample("2s"), f)(ddof=1))
tm.assert_frame_equal(result, expected)
def test_apply():
g = test_frame.groupby("A")
r = g.resample("2s")
# reduction
expected = g.resample("2s").sum()
def f(x):
return x.resample("2s").sum()
result = r.apply(f)
tm.assert_frame_equal(result, expected)
def f(x):
return x.resample("2s").apply(lambda y: y.sum())
result = g.apply(f)
tm.assert_frame_equal(result, expected)
def test_apply_with_mutated_index():
# GH 15169
index = pd.date_range("1-1-2015", "12-31-15", freq="D")
df = DataFrame(data={"col1": np.random.rand(len(index))}, index=index)
def f(x):
s = Series([1, 2], index=["a", "b"])
return s
expected = df.groupby(pd.Grouper(freq="M")).apply(f)
result = df.resample("M").apply(f)
tm.assert_frame_equal(result, expected)
# A case for series
expected = df["col1"].groupby(pd.Grouper(freq="M")).apply(f)
result = df["col1"].resample("M").apply(f)
tm.assert_series_equal(result, expected)
def test_apply_columns_multilevel():
# GH 16231
cols = pd.MultiIndex.from_tuples([("A", "a", "", "one"), ("B", "b", "i", "two")])
ind = date_range(start="2017-01-01", freq="15Min", periods=8)
df = DataFrame(np.array([0] * 16).reshape(8, 2), index=ind, columns=cols)
agg_dict = {col: (np.sum if col[3] == "one" else np.mean) for col in df.columns}
result = df.resample("H").apply(lambda x: agg_dict[x.name](x))
expected = DataFrame(
np.array([0] * 4).reshape(2, 2),
index=date_range(start="2017-01-01", freq="1H", periods=2),
columns=pd.MultiIndex.from_tuples(
[("A", "a", "", "one"), ("B", "b", "i", "two")]
),
)
tm.assert_frame_equal(result, expected)
def test_resample_groupby_with_label():
# GH 13235
index = date_range("2000-01-01", freq="2D", periods=5)
df = DataFrame(index=index, data={"col0": [0, 0, 1, 1, 2], "col1": [1, 1, 1, 1, 1]})
result = df.groupby("col0").resample("1W", label="left").sum()
mi = [
np.array([0, 0, 1, 2]),
pd.to_datetime(
np.array(["1999-12-26", "2000-01-02", "2000-01-02", "2000-01-02"])
),
]
mindex = pd.MultiIndex.from_arrays(mi, names=["col0", None])
expected = DataFrame(
data={"col0": [0, 0, 2, 2], "col1": [1, 1, 2, 1]}, index=mindex
)
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(not compat.IS64, reason="GH-35148")
def test_consistency_with_window():
# consistent return values with window
df = test_frame
expected = pd.Int64Index([1, 2, 3], name="A")
result = df.groupby("A").resample("2s").mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby("A").rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns():
# GH 14233
df = DataFrame(
np.random.randn(20, 3),
columns=list("aaa"),
index=pd.date_range("2012-01-01", periods=20, freq="s"),
)
df2 = df.copy()
df2.columns = ["a", "b", "c"]
expected = df2.resample("5s").median()
result = df.resample("5s").median()
expected.columns = result.columns
tm.assert_frame_equal(result, expected)
|
[
"david.dunleavy93@gmail.com"
] |
david.dunleavy93@gmail.com
|
e32fadc710671ee0d561a5192a3e0c6875072673
|
ac7e039a70ba627f6d9a7a02c9a8849ed5e18a89
|
/unep.project-database/tags/0.2/content/Project.py
|
d13c620db2288f39c6b8598a0df372dc144dd473
|
[] |
no_license
|
jean/project-database
|
65a2559844175350351ba87e820d25c3037b5fb2
|
e818d322ec11d950f2770cd5324fbcd1acaa734d
|
refs/heads/master
| 2021-01-01T06:27:24.528764 | 2014-01-31T11:11:45 | 2014-01-31T11:11:45 | 32,125,426 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,359 |
py
|
# -*- coding: utf-8 -*-
#
# File: Project.py
#
# Copyright (c) 2008 by []
# Generator: ArchGenXML Version 2.0
# http://plone.org/products/archgenxml
#
# GNU General Public License (GPL)
#
__author__ = """Jean Jordaan <jean.jordaan@gmail.com>, Jurgen Blignaut
<jurgen.blignaut@gmail.com>"""
__docformat__ = 'plaintext'
from AccessControl import ClassSecurityInfo
from Products.Archetypes.atapi import *
from zope.interface import implements
import interfaces
from Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin
from Products.ATVocabularyManager.namedvocabulary import NamedVocabulary
from Products.ProjectDatabase.config import *
# additional imports from tagged value 'import'
from Products.ProjectDatabase.widgets.SelectedLinesField import SelectedLinesField
from Products.CMFCore.utils import getToolByName
from Products.FinanceFields.MoneyField import MoneyField
from Products.FinanceFields.MoneyWidget import MoneyWidget
from Products.DataGridField import DataGridField, DataGridWidget, Column, SelectColumn, CalendarColumn
from Products.ATReferenceBrowserWidget.ATReferenceBrowserWidget import ReferenceBrowserWidget
import Project
import Financials
from Products.CMFCore.utils import getToolByName
from Products.FinanceFields.Money import Money
##code-section module-header #fill in your manual code here
del Project
from Products.ProjectDatabase.content.FMIFolder import FMIFolder
from Products.ProjectDatabase.content.MonitoringAndEvaluation import MonitoringAndEvaluation
from Products.ProjectDatabase.content.ProjectGeneralInformation import ProjectGeneralInformation
from Products.ProjectDatabase.content.MilestoneFolder import MilestoneFolder
import permissions
##/code-section module-header
schema = Schema((
),
)
##code-section after-local-schema #fill in your manual code here
##/code-section after-local-schema
Project_schema = BaseFolderSchema.copy() + \
schema.copy()
##code-section after-schema #fill in your manual code here
##/code-section after-schema
class Project(BaseFolder, BrowserDefaultMixin):
"""
"""
security = ClassSecurityInfo()
implements(interfaces.IProject)
meta_type = 'Project'
_at_rename_after_creation = True
schema = Project_schema
##code-section class-header #fill in your manual code here
##/code-section class-header
# Methods
security.declarePublic('getLeadAgencies')
def getLeadAgencies(self):
"""
"""
catalog = getToolByName(self, 'portal_catalog')
proxies = catalog(portal_type='Agency')
pl = [p.getObject().Title() for p in proxies]
return ','.join(pl)
security.declarePublic('getVocabulary')
def getVocabulary(self, vocabName):
"""
"""
pv_tool = getToolByName(self, 'portal_vocabularies')
vocab = pv_tool.getVocabularyByName(vocabName)
return vocab.getDisplayList(vocab)
security.declarePublic('getProjectGeneralInformation')
def getProjectGeneralInformation(self):
"""
"""
return self['project_general_info']
security.declarePublic('getAProject')
def getAProject(self):
"""
"""
return self
registerType(Project, PROJECTNAME)
# end of class Project
##code-section module-footer #fill in your manual code here
##/code-section module-footer
|
[
"jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d"
] |
jurgen.blignaut@61ed036f-b72b-0410-9ea5-b9ec1d72d98d
|
80cf0e2f70e7d2a6dede620d309918c419a3e68c
|
7f92f493d09cd69bb6f446f74bce0796d2d5918a
|
/fileSelfWrite/Others/test.py
|
5529c8205779f62486cec4ea0a34529fdf2a7e22
|
[
"MIT"
] |
permissive
|
usamaahsan93/mischief-managed
|
79efafde06cd57205e22d0bb200961a98df0e147
|
824022ecaeda46450ca1029bceb39f194c363138
|
refs/heads/main
| 2023-07-11T23:40:52.974430 | 2021-07-26T10:43:38 | 2021-07-26T10:43:38 | 387,379,497 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 574 |
py
|
import re, fileinput, time, subprocess,sys, os
time.sleep(1)
with open ("code2.py", "r+") as f:
for line in fileinput.input("code2.py"):
if "#counter:" in line :
t=int(re.search("[\d]+",line).group(0))
if t>=5:
print("greater than 5")
break
else:
f.write(line.replace("#counter:"+str(t),"#counter:"+str(t+1)))
with open("./text.txt","w+") as f:
f.write("This file has been opened "+str(t)+" times.")
os.system("notepad.exe ./text.txt")
subprocess.Popen('powershell Start-Sleep -Seconds 1; Remove-Item ./text.txt')
sys.exit()
|
[
"noreply@github.com"
] |
usamaahsan93.noreply@github.com
|
4f599b8dfbd69a5f176a51a7c15b40ac767c1900
|
caaf1b0754db1e676c37a6f1e58f19183754e654
|
/sdk/network/azure-mgmt-network/generated_samples/virtual_network_peering_delete.py
|
532bf47b34961b57f4acac61960084e27f172f18
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
rdomenzain/azure-sdk-for-python
|
45dfb39121a0abda048c22e7309733a56259f525
|
58984255aeb904346b6958c5ba742749a2cc7d1b
|
refs/heads/master
| 2023-07-07T06:53:12.967120 | 2023-07-04T16:27:37 | 2023-07-04T16:27:37 | 258,050,134 | 0 | 0 |
MIT
| 2020-04-23T00:12:14 | 2020-04-23T00:12:13 | null |
UTF-8
|
Python
| false | false | 1,583 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.network import NetworkManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-network
# USAGE
python virtual_network_peering_delete.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
client.virtual_network_peerings.begin_delete(
resource_group_name="peerTest",
virtual_network_name="vnet1",
virtual_network_peering_name="peer",
).result()
# x-ms-original-file: specification/network/resource-manager/Microsoft.Network/stable/2022-11-01/examples/VirtualNetworkPeeringDelete.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
rdomenzain.noreply@github.com
|
f65558cf904521d0f2d1c51ab51fa35f551c22b9
|
89696c8e71bb95df8ed4b870aaebcfb67d90407f
|
/tester.py
|
15c26d7694335b76781d7f0b7ce597a662ec3fb3
|
[
"MIT"
] |
permissive
|
darkLord19/GraphColoringPy
|
147b6d121e28f0dbf61aefc4a9f1241b7eb7e8c4
|
2cc2ef74ff6eb4eb53d6392dd6f18d5939a9a0ef
|
refs/heads/master
| 2020-03-29T15:42:33.775667 | 2018-10-07T17:42:16 | 2018-10-07T17:42:16 | 150,076,485 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,933 |
py
|
from greedy_coloring import *
from backtracking import *
from graph import Graph
import networkx as nx
import matplotlib.pyplot as plt
colors = ['red', 'green', 'blue', 'yellow', 'orange']
n = int(input('Enter number of vertex: '))
e = int(input('Enter number of edges: '))
# n = int(in_arr[0].split(' ')[0])
# e = int(in_arr[0].split(' ')[1])
g = Graph(n)
G = nx.Graph()
G.add_nodes_from([0, n])
for i in range(0, e):
s = input('Enter space seperated vertices representing an edge:\n')
u = int(s.split(' ')[0].strip())
v = int(s.split(' ')[1].strip())
g.add_edge(u, v)
G.add_edge(u, v)
chromatic_num_wp, vertex_color_wp = get_chromatic_number(g)
chromatic_num_bt, vertex_color_bt = get_chromatic_number_backtracking(g)
print('Chromatic Number is: ',chromatic_num_wp)
print('Chromatic Number is: ',chromatic_num_bt)
nodelist_wp = []
nodelist_bt = []
for i in range(0,chromatic_num_wp):
nodelist_wp.append([])
for key, value in vertex_color_wp.items():
nodelist_wp[value].append(key)
for i in range(0,chromatic_num_bt):
nodelist_bt.append([])
for key, value in vertex_color_bt.items():
nodelist_bt[value].append(key)
pos = nx.spring_layout(G)
fig = plt.figure()
st = 'Chromatic Number is: ' + str(chromatic_num_bt)
fig.suptitle(st, fontsize=20, color='r')
plt.subplot(1, 2, 1)
plt.title('Welsh Powell')
for i in range(0, len(nodelist_wp)):
nx.draw_networkx_nodes(G, pos, nodelist=nodelist_wp[i], node_color=colors[i])
labels = {}
for i in range(0,10):
labels[i] = i
nx.draw_networkx_edges(G, pos)
nx.draw_networkx_labels(G, pos, labels)
plt.axis('off')
plt.subplot(1, 2, 2)
plt.title('Backtracking')
for i in range(0, len(nodelist_bt)):
nx.draw_networkx_nodes(G, pos, nodelist=nodelist_bt[i], node_color=colors[i])
labels = {}
for i in range(0,10):
labels[i] = i
nx.draw_networkx_edges(G, pos)
nx.draw_networkx_labels(G, pos, labels)
plt.axis('off')
plt.show()
|
[
"umangjparmar@gmail.com"
] |
umangjparmar@gmail.com
|
4405bbc5d0120b144c0fd81f2acda78c3b3d1e57
|
b3c7a8a771ea108aacce1e35842766709a0e2c21
|
/2018/J1/J1.py
|
464798abab511c3e276bbc4bfeb7fcec5eb2da4b
|
[] |
no_license
|
vishnupsatish/CCC-practice
|
0e2a6baca0d64380c32afec8220232cbc046eb1b
|
a1837d56f734c352a050b37d60f591e9f5461cb5
|
refs/heads/master
| 2021-07-10T16:27:32.433858 | 2021-03-07T02:06:59 | 2021-03-07T02:06:59 | 236,358,317 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 163 |
py
|
numbers = [int(input()) for i in range(4)]
if (numbers[0] >= 8) and (numbers[3] >= 8) and (numbers[1] == numbers[2]):
print("ignore")
else:
print("answer")
|
[
"vishnupavan.satish@gmail.com"
] |
vishnupavan.satish@gmail.com
|
c6ecbc433345ab2e029640fa9fca42b81f210b42
|
33cba98daf992b91867be58de0c6cca14bd9c563
|
/knn/knn.py
|
61b5c94343d2203220f6a291967334d62cd43405
|
[] |
no_license
|
niucheng1991/Machine-Learning
|
c27d97d320a8b4130d7a071f6898ff43c644a0bb
|
1f8742e6ac270809d5808c13faeecf2a76a3eb9a
|
refs/heads/master
| 2020-04-07T05:17:57.415456 | 2018-11-25T16:03:50 | 2018-11-25T16:03:50 | 158,091,307 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 902 |
py
|
#-*- coding: utf-8 -*-
import numpy as np
class KNN():
def __init__(self, k=6):
self.k = k
def predict(self, X_test, X_train, y_train):
y_pred = np.empty(X_test.shape[0])
m_sample = np.shape(X_train[0])
distance = []
for i, test_sample in enumerate(X_test):
for train_sample in X_train:
# 计算两个样本的欧式距离
distance.append(euclidean_distance(test_sample, train_sample))
# 排序并获取排序好的前K个下标序号
idx = np.argsort(distance)[:self.k]
# K个进邻目标标签值
k_nearest_neighbors = np.array([y_train[j] for j in idx])
# 投票最多的进邻值
counts = np.bincount(k_nearest_neighbors.astype('int'))
y_pred[i] = np.argmax(counts)
distance = []
return y_pred
|
[
"niucheng1991@gmail.com"
] |
niucheng1991@gmail.com
|
24606d612bfe57df9133c52158fa43cb8df4b0fd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02554/s686857894.py
|
2b0827ac913366bf60ab8e65803058621790719e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 69 |
py
|
N = int(input())
print(((10**N) - 2*(9**N) + (8**N)) % ((10**9)+7))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
beb2b9f87eefb7acd3e3c7aa875cf9145c98f7c6
|
1fe10b41ac29e0e4a419bc24b39879d27121f7f6
|
/lab4_11.py
|
0fe07bb98921399c5e3458a888fd74e2b9ea8a28
|
[] |
no_license
|
gibum1228/Python_Study
|
dad24328f01129fafafce687802fc0ffd6dd0bb2
|
e3e38f0041574d259e738960e51afc71e103248b
|
refs/heads/master
| 2020-04-03T16:51:12.367850 | 2019-05-09T03:46:09 | 2019-05-09T03:46:09 | 155,421,781 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 547 |
py
|
"""
챕터: Day 4
주제: 반복문(for 문)
문제:
사용자로부터 5개의 숫자를 입력받아, 이를 리스트에 저장한 후 합과 평균을 구하여 출력한다.
작성자: 김기범
작성일: 2018.09.27.
"""
l = [] # l 초기화
sum = 0 # sum 초기화
for i in range(0, 5) : # i는 0부터 4
l.append(int(input())) # l에 입력받은 수를 저장
for add in l : # add에 l 값 대입
sum += add # sum에 add 더하기
average = sum / len(l) # 평균 구하기
print("%d %f" %(sum, average)) # 합과 평균 출력
|
[
"gibum1228@naver.com"
] |
gibum1228@naver.com
|
3cc568bb219b3dd743add2ae47a099388fa0706c
|
b70d665dfd0bdd67c0c5a3285ee196ce95fa274f
|
/Image_Analysis/Mod1/Excercice03_/Matemathic_morphology.py
|
e595ad082ecb637c6e62eb1ee7d15389e0e40a5e
|
[
"MIT"
] |
permissive
|
PabloWually/computer_vision
|
ec6e8ecf34b85c72f05c9d0dd8dc9c185f67395b
|
bab05c39573341c931ea3d8b1f0ed4685e07a7ca
|
refs/heads/master
| 2022-07-30T13:47:24.694489 | 2020-05-20T15:24:14 | 2020-05-20T15:24:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,036 |
py
|
import numpy as np
from skimage import morphology as sk_mm
from matplotlib import pyplot as plt
square = np.array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=np.uint8)
struct_element = sk_mm.selem.diamond(1)
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" # Apply erosion
eroded_square = sk_mm.erosion(square, struct_element)
fig = plt.figure(figsize=(6, 6))
# Plot original image
a=fig.add_subplot(1, 2, 1)
plt.imshow(square, cmap="binary")
a.set_title("Original")
# Plot eroded image
a=fig.add_subplot(1, 2, 2)
plt.imshow(eroded_square, cmap="binary")
a.set_title("Eroded") """
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
""" #Apply dilation
dilated_square = sk_mm.dilation(square, struct_element)
# Display it
fig = plt.figure(figsize=(6, 6))
# Plot original image
a=fig.add_subplot(1, 2, 1)
plt.imshow(square, cmap="binary")
a.set_title("Original")
# Plot dilated image
a=fig.add_subplot(1, 2, 2)
plt.imshow(dilated_square, cmap="binary")
a.set_title("Dilated") """
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
# Apply closing and opening
closed_square = sk_mm.closing(square, struct_element)
opened_square = sk_mm.opening(square, struct_element)
# Display it
fig = plt.figure(figsize=(9, 6))
# Plot original image
a=fig.add_subplot(1, 3, 1)
image_plot_1 = plt.imshow(square, cmap="binary")
a.set_title("Original")
# Plot closed image
a=fig.add_subplot(1, 3, 2)
image_plot_2 = plt.imshow(closed_square, cmap="binary")
a.set_title("Closed")
# Plot opened image
a=fig.add_subplot(1, 3, 3)
image_plot_2 = plt.imshow(opened_square, cmap="binary")
a.set_title("Opened")
#"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
plt.show()
|
[
"pablomp250@gmail.com"
] |
pablomp250@gmail.com
|
e87143ae62be3d76aca75336e2253b797cd931a1
|
9fd800815a2502dc5405a4e4e144b0a86a07e908
|
/apps/user/migrations/0002_auto_20210225_1902.py
|
d5917ac86dea787d3bf4351b3a738b85e87e8892
|
[] |
no_license
|
ailvaanderson13/unir-dev
|
6c02c40ed6facb12f6567201c132852b24c8c6ce
|
0e12ce2de99d98671644f3a6451700c20bffaa57
|
refs/heads/master
| 2023-03-13T18:16:16.723085 | 2021-02-25T23:54:45 | 2021-02-25T23:54:45 | 341,449,016 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 530 |
py
|
# Generated by Django 3.1.7 on 2021-02-25 22:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='motorista',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='user',
name='passageiro',
field=models.BooleanField(default=False),
),
]
|
[
"asilva130498@gmail.com"
] |
asilva130498@gmail.com
|
b377218bd232e3b0882d187c3146fe6e98122d09
|
bd404cd2b293fe1dda4797b1c10c0691dea72541
|
/resources/plop_proj/web.wsgi
|
e6fc4e8132197b75fba48e8b1064d9150c3438ca
|
[] |
no_license
|
nubela/plop-plop
|
55f4af5bec320cbe3d885457e39b870a8abdb712
|
56a62f24071bb0fa3d62da3b0c58650cb74d02f7
|
refs/heads/master
| 2021-01-10T19:53:38.769944 | 2014-01-13T05:48:17 | 2014-01-13T05:48:17 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 248 |
wsgi
|
import sys, os
cwd = os.path.dirname(os.path.realpath(__file__))
sys.path.append(cwd)
activate_this = os.path.join(cwd, "v_env/bin/activate_this.py")
execfile(activate_this, dict(__file__=activate_this))
from web.plop import app
application = app
|
[
"nubela@gmail.com"
] |
nubela@gmail.com
|
66063ce1957d90287e9f2a1bee636fb5b0c4935c
|
d655d21069c05f9ce5924f38f0fc5b152d606172
|
/flight_processor.py
|
d7719400c52c7aee8f95d958bcf94ecb30f9dc79
|
[] |
no_license
|
mt4g16/IP-Flight-Protocol
|
28de7a4c3e74d5d830b51fe2f1a171f7d85fe2b6
|
6b804b4f61c3d43a6501e6a03622a9ab3df7f6b7
|
refs/heads/main
| 2023-03-06T23:11:08.363895 | 2021-02-07T15:46:01 | 2021-02-07T15:46:01 | 336,823,069 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 355 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 16 22:59:19 2020
@author: Matteo
"""
from tlog_interpreter import process_tlog
from data_processor import process_data
from data_plots import make_plots
from data_panels import make_panels
def process_flight():
process_tlog()
process_data()
make_plots()
make_panels()
|
[
"noreply@github.com"
] |
mt4g16.noreply@github.com
|
fe01077c57dd86fca77c5595c967b8212455bb72
|
5a1c31771de586f2b55d689dfc49fae69e8181f6
|
/Desicion_tree/treePlotter.py
|
4046c194bc6cedc5effc663209ae583247298b14
|
[] |
no_license
|
Lv11223311/ML_combat
|
feea37cb99b65159827cc82db406e81b90863557
|
cb709fab9b9412890a8ed3dfaa087d47a92a8062
|
refs/heads/master
| 2020-04-18T18:26:22.291035 | 2019-02-20T11:49:11 | 2019-02-20T11:49:11 | 167,683,400 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,874 |
py
|
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
# 设置全局变量,决策单元,分支单元,和连接的样式
decisionNode = dict(boxstyle='sawtooth', fc='0.8')
leafNode = dict(boxstyle='round', fc='0.8')
arrow_args = dict(arrowstyle='<-')
# 注解单元,利用上面的全局变量设置整个图得样式
def plotNode(nodeTxt, centerPt, parentPt, nodeType):
createPlot.ax1.annotate(nodeTxt, xy=parentPt, xycoords='axes fraction',
xytext=centerPt, textcoords='axes fraction',
va='center', ha='center', bbox=nodeType, arrowprops=arrow_args)
# def createPlot():
# fig = plt.figure(1, facecolor='white')
# fig.clf()
# createPlot.ax1 = plt.subplot(111, frameon=False)
# plotNode('a decision node', (0.5, 0.1), (0.1, 0.5), decisionNode)
# plotNode('a leaf node', (0.8, 0.1), (0.3, 0.8), leafNode)
# plt.show()
def getNumLeafs(myTree):
# 用递归来字节点(结果)的个数
numLeafs = 0
firstStr = next(iter(myTree))
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs
def getTreeDepth(myTree):
# 递归求深度,二叉树的深度
maxDepth = 0
firstStr = next(iter(myTree))
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth:
maxDepth = thisDepth
return maxDepth
def retrieveTree(i):
# 创建两个树的实例数据
listOfTrees = [{'no surfacing': {0:'no', 1:{'flippers':{0:'no', 1:'yes'}}}},
{'no surfacing':{0:'no', 1:{'flippers':{0:{'head':{0:'no', 1:'yes'}}, 1:'no'}}}}
]
return listOfTrees[i]
# 填充文本信息
def plotMidText(cntrPt, parentPt, txtString):
xMid = (parentPt[0] - cntrPt[0]) / 2.0 + cntrPt[0]
yMid = (parentPt[1] - cntrPt[1]) / 2.0 + cntrPt[1]
createPlot.ax1.text(xMid, yMid, txtString)
# 填充注解树
def plotTree(myTree, parentPt, nodeTxt):
numLeafs = getNumLeafs(myTree) # 树的叶子
depth = getTreeDepth(myTree) # 树得深度
firstStr = next(iter(myTree)) # decision node
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs)) /2.0/plotTree.totalW, plotTree.yOff) # 中心位置
plotMidText(cntrPt, parentPt, nodeTxt) # 标注文本信息
plotNode(firstStr, cntrPt, parentPt, decisionNode) # 利用plotNode画出决策单元
secondDict = myTree[firstStr] # 进入下一个节点
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD # y偏移
for key in secondDict.keys(): # 来个循环递归对所有节点绘制,这里的思路和上面求深度和数量得函数是一样得
if type(secondDict[key]).__name__ == 'dict':
plotTree(secondDict[key], cntrPt, str(key))
else:
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key], (plotTree.xOff, plotTree.yOff), cntrPt, leafNode)
plotMidText((plotTree.xOff, plotTree.yOff), cntrPt, str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
# 做图
def createPlot(inTree):
fig = plt.figure(1, facecolor='white') # 创建画板
fig.clf() # 清空画板
axprops = dict(xticks=[], yticks=[]) # 参数字典
createPlot.ax1 = plt.subplot(111, frameon=False, **axprops) # 去掉X ,Y轴
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW # X偏移
plotTree.yOff = 1.0
plotTree(inTree,(0.5, 1.0), '')
plt.show()
|
[
"2695484680@qq.com"
] |
2695484680@qq.com
|
2a4e109b863e0a4319bfe85562be993edc93a2a9
|
72aaf95c620add84cae41151a36e7e15de8a5cd4
|
/0618/py/test1.py
|
8f0431ee75ba524f31088d19d7c9200381c1bb7c
|
[] |
no_license
|
uZJl/CodeZjl
|
687b9324a59840702a2a970561e79262c02f5000
|
1355f7698ba8193cb05f1971bce25b6994ea0500
|
refs/heads/master
| 2023-07-29T08:35:08.280229 | 2021-08-29T14:58:44 | 2021-08-29T14:58:44 | 312,001,461 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 510 |
py
|
from selenium import webdriver
import os
import time
from selenium.webdriver.common.action_chains import ActionChains
driver = webdriver.Chrome()
file="file:///" + os.path.abspath("C:/Users/21173/Desktop/pyhtml/level_locate.html")
driver.get(file)
driver.maximize_window()
time.sleep(2)
driver.find_element_by_link_text("Link1").click()
ele = driver.find_element_by_id("dropdown1").find_element_by_link_text("Another action")
ActionChains(driver).move_to_element(ele).perform()
time.sleep(2)
driver.quit()
|
[
"1969643139@qq.com"
] |
1969643139@qq.com
|
212ee2ca759bdbfa84b1176d260a19504b87e9ac
|
1ccfc8fd6d502714e2c3059f346ccde635ca4a98
|
/discord/permissions.py
|
51c39fec25c6808cb2fbb16ade451480df18725d
|
[
"MIT"
] |
permissive
|
joymalya31/nextcord
|
0c58991f03cb200fe9cb742c9d9e1d1828ecf24b
|
c9a17374ac3568dc704e46bb856811d3a85038d3
|
refs/heads/master
| 2023-09-01T17:15:13.788281 | 2021-10-30T06:48:31 | 2021-10-30T06:48:31 | 422,811,073 | 0 | 0 |
NOASSERTION
| 2021-10-30T07:12:35 | 2021-10-30T07:12:34 | null |
UTF-8
|
Python
| false | false | 1,603 |
py
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Copyright (c) 2021-present tag-epic
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
--------------
Aliased moodule. See the same file in the nextcord folder for more information
Autogenerated by aliasgen.py
"""
from nextcord.permissions import Any, BaseFlags, Callable, ClassVar, Dict, Iterator, Optional, P, PO, PermissionOverwrite, Permissions, Set, TYPE_CHECKING, Tuple, Type, TypeVar, _augment_from_permissions, alias_flag_value, annotations, fill_with_flags, flag_value, make_permission_alias, permission_alias
__all__ = ("Permissions", "PermissionOverwrite")
|
[
"noreply@github.com"
] |
joymalya31.noreply@github.com
|
7db66557bd3a0b24ca156b7d9d1adc8dd2bb1036
|
2f63746b12651e78c4ce8728289b2c3c619cc88b
|
/tests/test_rebuild_overlap_groups.py
|
0784cfeb5fa5bc63fa95d6c0c44ce772220c204f
|
[
"MIT"
] |
permissive
|
WillieMaddox/Airbus_SDC_dup
|
0729f42365ad30a857efc09de4710747ab37f7f6
|
09be904cf3c8050086f07538f5e2954282de5d62
|
refs/heads/master
| 2020-04-29T06:28:10.841952 | 2020-01-25T05:11:59 | 2020-01-25T05:11:59 | 175,917,002 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 521 |
py
|
import pytest
from pytest import fixture
import json
from sdcdup.rebuild_overlap_groups import PrettyEncoder
def test_PrettyEncoder():
test_dict = {"filename.jpg": {'d82542ac6.jpg': (0, 0), '7b836bdec.jpg': (1, 0)}}
true_json = '{\n "filename.jpg": {\n "7b836bdec.jpg": [1, 0], \n "d82542ac6.jpg": [0, 0]\n }\n}'
test_json = json.dumps(
test_dict,
cls=PrettyEncoder,
indent=2,
separators=(', ', ': '),
sort_keys=True,
)
assert test_json == true_json
|
[
"willie.maddox@gmail.com"
] |
willie.maddox@gmail.com
|
fa9c4e8d7c168d8362c39b5b10eb21a795cf526d
|
0a8ce2729cc5d4a0610899ce051c19dd1710c65f
|
/playground/stitching/stitch.py
|
bb81800763b4b7a1e18fad58c4e82b39c2424580
|
[] |
no_license
|
niarm/smart-mic
|
84f7b165b5d4f7b5e1eef3f0bcbc47d8356bbce2
|
2f4ab4966280ff99b32204ea168fcc81d5d21977
|
refs/heads/master
| 2020-04-01T13:22:56.821640 | 2018-10-16T08:35:55 | 2018-10-16T08:35:55 | 153,250,233 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 241 |
py
|
import cv2
stitcher = cv2.createStitcher(False)
image_1 = cv2.imread("r0-c0.png")
image_2 = cv2.imread("r0-c1.png")
image_3 = cv2.imread("r0-c2.png")
result = stitcher.stitch((image_1,image_2, image_3))
cv2.imwrite("result.jpg", result[1])
|
[
"niklas.armbruster@deutschebahn.com"
] |
niklas.armbruster@deutschebahn.com
|
546a1f441b55eb2b590148a3340a3cf794aca817
|
f70b6d2a0a2b3beb4c416d90611254e68c7cd02e
|
/newspaper_project/users/forms.py
|
0ceb844abcd6bdd7d3ab1b242bd86f5d37b10bb8
|
[] |
no_license
|
poojaurkude/newsapp
|
11681d3b8b8f57b150f7acb8e0bb5f27fe19e48f
|
f42d95538c9c5025a5097481f85971ba5a591a82
|
refs/heads/master
| 2021-01-06T09:46:47.700196 | 2020-02-18T05:58:05 | 2020-02-18T05:58:05 | 241,284,873 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 428 |
py
|
from django import forms
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('username','email','age',)
class CustomUserChangeForm(UserChangeForm):
class Meta:
model = CustomUser
fields = ('username','email','age',)
|
[
"pooja11urkude@gmail.com"
] |
pooja11urkude@gmail.com
|
3e3160aa14fa7ceee7753a704491419a0573c1b1
|
853f732a548a78441164d55572f9ed2f822c8f6d
|
/src/tradutor.py
|
7df124c97ad7b50c1c13019bb808b7d0fa2abe17
|
[] |
no_license
|
schaiana/tradutor
|
460d5ea0b7377ba70554cd19474ea8a2d4dea8d3
|
c8b8541a23d039a5b24d7f9149102baf307c9229
|
refs/heads/master
| 2023-05-11T11:01:26.918070 | 2020-06-29T19:20:56 | 2020-06-29T19:20:56 | 222,506,664 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,824 |
py
|
import math
import re
algarismo_para_extenso = {
0: "zero",
1: "um",
2: "dois",
3: "três",
4: "quatro",
5: "cinco",
6: "seis",
7: "sete",
8: "oito",
9: "nove",
10: "dez",
11: "onze",
12: "doze",
13: "treze",
14: "quatorze",
15: "quinze",
16: "dezesseis",
17: "dezessete",
18: "dezoito",
19: "dezenove",
20: "vinte",
30: "trinta",
40: "quarenta",
50: "cinquenta",
60: "sessenta",
70: "setenta",
80: "oitenta",
90: "noventa",
100: "cento",
200: "duzentos",
300: "trezentos",
400: "quatrocentos",
500: "quinhentos",
600: "seiscentos",
700: "setecentos",
800: "oitocentos",
900: "novecentos"
}
def obtem_extenso(algarismo):
if (algarismo == 0):
return algarismo_para_extenso[algarismo]
prefixo = "menos " if algarismo < 0 else ""
modulo_algarismo = math.fabs(algarismo) # módulo do algarismo
sufixos = [" mil ", ""]
extenso = ""
while(modulo_algarismo != 0):
sufixo = sufixos.pop()
cento, extenso_parcial = traduz_cento(modulo_algarismo)
modulo_algarismo = (modulo_algarismo - cento) / 1000
if (cento == 1 and sufixo != ""):
if (extenso == ""):
extenso = sufixo.strip()
else:
extenso = sufixo.strip() + " e " + extenso
else:
if (sufixo != ""):
extenso_parcial = extenso_parcial + sufixo
if extenso == "":
extenso = extenso_parcial
else:
extenso = extenso_parcial + "e " + extenso
else:
extenso = extenso_parcial + sufixo + extenso
return (prefixo + extenso).strip()
def traduz_cento(modulo_algarismo):
unidade = modulo_algarismo % 10
#print(unidade)
dezena = (modulo_algarismo % 100) - unidade
#print(dezena)
cento = modulo_algarismo % 1000
#print(cento)
centena = cento - dezena - unidade
#print(centena)
extenso = ""
if (centena != 0):
if (cento == 100):
return cento, "cem"
else:
extenso += algarismo_para_extenso[centena]
if (dezena != 0):
if (centena != 0):
extenso += " e "
if (dezena == 10):
extenso += algarismo_para_extenso[dezena + unidade]
return cento, extenso
else:
extenso += algarismo_para_extenso[dezena]
if (unidade != 0):
if (dezena != 0 or centena != 0):
extenso += " e "
extenso += algarismo_para_extenso[unidade]
return cento, extenso
def valida_algarismo(algarismo):
if (re.match("^[-+]?[0-9]{1,5}$", algarismo) == None):
return False
return True
if __name__ == '__main__':
print(obtem_extenso(119000))
|
[
"schaianasonaglio@gmail.com"
] |
schaianasonaglio@gmail.com
|
914ad708786f3d23eb277aa6f3ba40c9aaf15a81
|
92adc05640d66eec8fc3f2cda4ee7621af1c456a
|
/homeworks/markov-decision-processes/agents.py
|
afbb6c8101184947038cab5418c223e3d3993822
|
[] |
no_license
|
artificial-intelligence-class/artificial-intelligence-class.github.io
|
5ef0911f08754a9d4fae2f75555d25a7bb9724db
|
7e3383a803fe9a456dee73a6848f2cc02b0207c8
|
refs/heads/master
| 2023-02-21T08:43:36.430394 | 2023-02-12T14:02:27 | 2023-02-12T14:02:27 | 136,362,611 | 9 | 22 | null | 2020-11-02T09:35:25 | 2018-06-06T17:24:53 |
Python
|
UTF-8
|
Python
| false | false | 3,283 |
py
|
# Include your imports here, if any are used.
student_name = "Type your full name here."
# 1. Value Iteration
class ValueIterationAgent:
"""Implement Value Iteration Agent using Bellman Equations."""
def __init__(self, game, discount):
"""Store game object and discount value into the agent object,
initialize values if needed.
"""
... # TODO
def get_value(self, state):
"""Return value V*(s) correspond to state.
State values should be stored directly for quick retrieval.
"""
return 0 # TODO
def get_q_value(self, state, action):
"""Return Q*(s,a) correspond to state and action.
Q-state values should be computed using Bellman equation:
Q*(s,a) = Σ_s' T(s,a,s') [R(s,a,s') + γ V*(s')]
"""
return 0 # TODO
def get_best_policy(self, state):
"""Return policy π*(s) correspond to state.
Policy should be extracted from Q-state values using policy extraction:
π*(s) = argmax_a Q*(s,a)
"""
return None # TODO
def iterate(self):
"""Run single value iteration using Bellman equation:
V_{k+1}(s) = max_a Q*(s,a)
Then update values: V*(s) = V_{k+1}(s)
"""
... # TODO
# 2. Policy Iteration
class PolicyIterationAgent(ValueIterationAgent):
"""Implement Policy Iteration Agent.
The only difference between policy iteration and value iteration is at
their iteration method. However, if you need to implement helper function or
override ValueIterationAgent's methods, you can add them as well.
"""
def iterate(self):
"""Run single policy iteration.
Fix current policy, iterate state values V(s) until |V_{k+1}(s) - V_k(s)| < ε
"""
epsilon = 1e-6
... # TODO
# 3. Bridge Crossing Analysis
def question_3():
discount = ...
noise = ...
return discount, noise
# 4. Policies
def question_4a():
discount = ...
noise = ...
living_reward = ...
return discount, noise, living_reward
# If not possible, return 'NOT POSSIBLE'
def question_4b():
discount = ...
noise = ...
living_reward = ...
return discount, noise, living_reward
# If not possible, return 'NOT POSSIBLE'
def question_4c():
discount = ...
noise = ...
living_reward = ...
return discount, noise, living_reward
# If not possible, return 'NOT POSSIBLE'
def question_4d():
discount = ...
noise = ...
living_reward = ...
return discount, noise, living_reward
# If not possible, return 'NOT POSSIBLE'
def question_4e():
discount = ...
noise = ...
living_reward = ...
return discount, noise, living_reward
# If not possible, return 'NOT POSSIBLE'
# 5. Feedback
# Just an approximation is fine.
feedback_question_1 = 0
feedback_question_2 = """
Type your response here.
Your response may span multiple lines.
Do not include these instructions in your response.
"""
feedback_question_3 = """
Type your response here.
Your response may span multiple lines.
Do not include these instructions in your response.
"""
|
[
"admin@superfashi.com"
] |
admin@superfashi.com
|
66fa92e9025251b90129308bd92a3f521649690c
|
753a70bc416e8dced2853f278b08ef60cdb3c768
|
/models/research/domain_adaptation/domain_separation/dsn_test.py
|
3d687398a9b9356455f739417bc96ddb2ca5ad40
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
finnickniu/tensorflow_object_detection_tflite
|
ef94158e5350613590641880cb3c1062f7dd0efb
|
a115d918f6894a69586174653172be0b5d1de952
|
refs/heads/master
| 2023-04-06T04:59:24.985923 | 2022-09-20T16:29:08 | 2022-09-20T16:29:08 | 230,891,552 | 60 | 19 |
MIT
| 2023-03-25T00:31:18 | 2019-12-30T09:58:41 |
C++
|
UTF-8
|
Python
| false | false | 6,027 |
py
|
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DSN model assembly functions."""
import numpy as np
import tensorflow as tf
import dsn
class HelperFunctionsTest(tf.test.TestCase):
def testBasicDomainSeparationStartPoint(self):
with self.test_session() as sess:
# Test for when global_step < domain_separation_startpoint
step = tf.contrib.slim.get_or_create_global_step()
sess.run(tf.global_variables_initializer()) # global_step = 0
params = {'domain_separation_startpoint': 2}
weight = dsn.dsn_loss_coefficient(params)
weight_np = sess.run(weight)
self.assertAlmostEqual(weight_np, 1e-10)
step_op = tf.assign_add(step, 1)
step_np = sess.run(step_op) # global_step = 1
weight = dsn.dsn_loss_coefficient(params)
weight_np = sess.run(weight)
self.assertAlmostEqual(weight_np, 1e-10)
# Test for when global_step >= domain_separation_startpoint
step_np = sess.run(step_op) # global_step = 2
tf.logging.info(step_np)
weight = dsn.dsn_loss_coefficient(params)
weight_np = sess.run(weight)
self.assertAlmostEqual(weight_np, 1.0)
class DsnModelAssemblyTest(tf.test.TestCase):
def _testBuildDefaultModel(self):
images = tf.to_float(np.random.rand(32, 28, 28, 1))
labels = {}
labels['classes'] = tf.one_hot(
tf.to_int32(np.random.randint(0, 9, (32))), 10)
params = {
'use_separation': True,
'layers_to_regularize': 'fc3',
'weight_decay': 0.0,
'ps_tasks': 1,
'domain_separation_startpoint': 1,
'alpha_weight': 1,
'beta_weight': 1,
'gamma_weight': 1,
'recon_loss_name': 'sum_of_squares',
'decoder_name': 'small_decoder',
'encoder_name': 'default_encoder',
}
return images, labels, params
def testBuildModelDann(self):
images, labels, params = self._testBuildDefaultModel()
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'dann_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelDannSumOfPairwiseSquares(self):
images, labels, params = self._testBuildDefaultModel()
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'dann_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelDannMultiPSTasks(self):
images, labels, params = self._testBuildDefaultModel()
params['ps_tasks'] = 10
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'dann_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelMmd(self):
images, labels, params = self._testBuildDefaultModel()
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'mmd_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelCorr(self):
images, labels, params = self._testBuildDefaultModel()
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'correlation_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 6)
def testBuildModelNoDomainAdaptation(self):
images, labels, params = self._testBuildDefaultModel()
params['use_separation'] = False
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels, 'none',
params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 1)
self.assertEqual(len(tf.contrib.losses.get_regularization_losses()), 0)
def testBuildModelNoAdaptationWeightDecay(self):
images, labels, params = self._testBuildDefaultModel()
params['use_separation'] = False
params['weight_decay'] = 1e-5
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels, 'none',
params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 1)
self.assertTrue(len(tf.contrib.losses.get_regularization_losses()) >= 1)
def testBuildModelNoSeparation(self):
images, labels, params = self._testBuildDefaultModel()
params['use_separation'] = False
with self.test_session():
dsn.create_model(images, labels,
tf.cast(tf.ones([32,]), tf.bool), images, labels,
'dann_loss', params, 'dann_mnist')
loss_tensors = tf.contrib.losses.get_losses()
self.assertEqual(len(loss_tensors), 2)
if __name__ == '__main__':
tf.test.main()
|
[
"finn.niu@apptech.com.hk"
] |
finn.niu@apptech.com.hk
|
abde9955cdf401538f6a48140cc38c426eea896a
|
8e29c21c631d2b3a21f18a210a2c0bbab0d1f347
|
/python/pfs/drp/stella/datamodel/pfsTargetSpectra.py
|
37983859c2a5a41486dce2a196d966979720b153
|
[] |
no_license
|
Subaru-PFS/drp_stella
|
630d25118dcc074cf14629f2f1389fad21a023a8
|
85602eea2485ac24e0831046dc74f1b2d1a3d89f
|
refs/heads/master
| 2023-09-01T06:23:57.661286 | 2023-08-23T21:22:25 | 2023-08-23T21:22:25 | 53,125,359 | 3 | 1 | null | 2023-09-07T05:52:04 | 2016-03-04T09:51:39 |
Python
|
UTF-8
|
Python
| false | false | 13,208 |
py
|
from collections.abc import Mapping
from typing import Dict, Iterator, Iterable, List, Type
import astropy.io.fits
import numpy as np
import yaml
from astropy.io.fits import BinTableHDU, Column, HDUList, ImageHDU
from pfs.datamodel.drp import PfsSingleNotes, PfsSingle, PfsObjectNotes, PfsObject
from pfs.datamodel.masks import MaskHelper
from pfs.datamodel.observations import Observations
from pfs.datamodel.pfsConfig import TargetType
from pfs.datamodel.pfsTable import PfsTable
from pfs.datamodel.target import Target
from pfs.drp.stella.datamodel.fluxTable import FluxTable
from .pfsFiberArray import PfsFiberArray
__all__ = ["PfsTargetSpectra", "PfsCalibratedSpectra", "PfsObjectSpectra"]
class PfsTargetSpectra(Mapping):
"""A collection of `PfsFiberArray` indexed by target"""
PfsFiberArrayClass: Type[PfsFiberArray] # Subclasses must override
NotesClass: Type[PfsTable] # Subclasses must override
def __init__(self, spectra: Iterable[PfsFiberArray]):
super().__init__()
self.spectra: Dict[Target, PfsFiberArray] = {spectrum.target: spectrum for spectrum in spectra}
def __getitem__(self, target: Target) -> PfsFiberArray:
"""Retrieve spectrum for target"""
return self.spectra[target]
def __iter__(self) -> Iterator[Target]:
"""Return iterator over targets in container"""
return iter(self.spectra)
def __len__(self) -> int:
"""Return length of container"""
return len(self.spectra)
def __contains__(self, target: Target) -> bool:
"""Return whether target is in container"""
return target in self.spectra
@classmethod
def readFits(cls, filename: str) -> "PfsTargetSpectra":
"""Read from FITS file
Parameters
----------
filename : `str`
Filename of FITS file.
Returns
-------
self : ``cls``
Constructed instance, from FITS file.
"""
spectra = []
with astropy.io.fits.open(filename) as fits:
targetHdu = fits["TARGET"].data
targetFluxHdu = fits["TARGETFLUX"].data
observationsHdu = fits["OBSERVATIONS"].data
wavelengthHdu = fits["WAVELENGTH"].data
fluxHdu = fits["FLUX"].data
maskHdu = fits["MASK"].data
skyHdu = fits["SKY"].data
covarHdu = fits["COVAR"].data
covar2Hdu = fits["COVAR2"].data if "COVAR2" in fits else None
metadataHdu = fits["METADATA"].data
fluxTableHdu = fits["FLUXTABLE"].data
notesTable = cls.NotesClass.readHdu(fits)
for ii, row in enumerate(targetHdu):
targetId = row["targetId"]
select = targetFluxHdu.targetId == targetId
fiberFlux = dict(
zip(
("".join(np.char.decode(ss.astype("S"))) for ss in targetFluxHdu.filterName[select]),
targetFluxHdu.fiberFlux[select],
)
)
target = Target(
row["catId"],
row["tract"],
"".join(row["patch"]),
row["objId"],
row["ra"],
row["dec"],
TargetType(row["targetType"]),
fiberFlux=fiberFlux,
)
select = observationsHdu.targetId == targetId
observations = Observations(
observationsHdu.visit[select],
["".join(np.char.decode(ss.astype("S"))) for ss in observationsHdu.arm[select]],
observationsHdu.spectrograph[select],
observationsHdu.pfsDesignId[select],
observationsHdu.fiberId[select],
observationsHdu.pfiNominal[select],
observationsHdu.pfiCenter[select],
)
metadataRow = metadataHdu[ii]
assert metadataRow["targetId"] == targetId
metadata = yaml.load(
# This complicated conversion is required in order to preserve the newlines
"".join(np.char.decode(metadataRow["metadata"].astype("S"))),
Loader=yaml.SafeLoader,
)
flags = MaskHelper.fromFitsHeader(metadata, strip=True)
fluxTableRow = fluxTableHdu[ii]
assert fluxTableRow["targetId"] == targetId
fluxTable = FluxTable(
fluxTableRow["wavelength"],
fluxTableRow["flux"],
fluxTableRow["error"],
fluxTableRow["mask"],
flags,
)
notes = cls.PfsFiberArrayClass.NotesClass(
**{col.name: notesTable[col.name][ii] for col in notesTable.schema}
)
spectrum = cls.PfsFiberArrayClass(
target,
observations,
wavelengthHdu[ii],
fluxHdu[ii],
maskHdu[ii],
skyHdu[ii],
covarHdu[ii],
covar2Hdu[ii] if covar2Hdu is not None else [],
flags,
metadata,
fluxTable,
notes,
)
spectra.append(spectrum)
return cls(spectra)
def writeFits(self, filename: str):
"""Write to FITS file
This API is intended for use by the LSST data butler, which handles
translating the desired identity into a filename.
Parameters
----------
filename : `str`
Filename of FITS file.
"""
fits = HDUList()
targetId = np.arange(len(self), dtype=np.int16)
fits.append(
BinTableHDU.from_columns(
[
Column("targetId", "I", array=targetId),
Column("catId", "J", array=[target.catId for target in self]),
Column("tract", "J", array=[target.tract for target in self]),
Column("patch", "PA()", array=[target.patch for target in self]),
Column("objId", "K", array=[target.objId for target in self]),
Column("ra", "D", array=[target.ra for target in self]),
Column("dec", "D", array=[target.dec for target in self]),
Column("targetType", "I", array=[int(target.targetType) for target in self]),
],
name="TARGET",
)
)
numFluxes = sum(len(target.fiberFlux) for target in self)
targetFluxIndex = np.empty(numFluxes, dtype=np.int16)
filterName: List[str] = []
fiberFlux = np.empty(numFluxes, dtype=np.float32)
start = 0
for tt, target in zip(targetId, self):
num = len(target.fiberFlux)
stop = start + num
targetFluxIndex[start:stop] = tt
filterName += list(target.fiberFlux.keys())
fiberFlux[start:stop] = np.array(list(target.fiberFlux.values()))
start = stop
fits.append(
BinTableHDU.from_columns(
[
Column("targetId", "I", array=targetFluxIndex),
Column("filterName", "PA()", array=filterName),
Column("fiberFlux", "E", array=fiberFlux),
],
name="TARGETFLUX",
)
)
numObservations = sum(len(ss.observations) for ss in self.values())
observationsIndex = np.empty(numObservations, dtype=np.int16)
visit = np.empty(numObservations, dtype=np.int32)
arm: List[str] = []
spectrograph = np.empty(numObservations, dtype=np.int16)
pfsDesignId = np.empty(numObservations, dtype=np.int64)
fiberId = np.empty(numObservations, dtype=np.int32)
pfiNominal = np.empty((numObservations, 2), dtype=float)
pfiCenter = np.empty((numObservations, 2), dtype=float)
start = 0
for tt, spectrum in zip(targetId, self.values()):
observations = spectrum.observations
num = len(observations)
stop = start + num
observationsIndex[start:stop] = tt
visit[start:stop] = observations.visit
arm += list(observations.arm)
spectrograph[start:stop] = observations.spectrograph
pfsDesignId[start:stop] = observations.pfsDesignId
fiberId[start:stop] = observations.fiberId
pfiNominal[start:stop] = observations.pfiNominal
pfiCenter[start:stop] = observations.pfiCenter
start = stop
fits.append(
BinTableHDU.from_columns(
[
Column("targetId", "I", array=observationsIndex),
Column("visit", "J", array=visit),
Column("arm", "PA()", array=arm),
Column("spectrograph", "I", array=spectrograph),
Column("pfsDesignId", "K", array=pfsDesignId),
Column("fiberId", "J", array=fiberId),
Column("pfiNominal", "2D", array=pfiNominal),
Column("pfiCenter", "2D", array=pfiCenter),
],
name="OBSERVATIONS",
)
)
fits.append(ImageHDU(data=[spectrum.wavelength for spectrum in self.values()], name="WAVELENGTH"))
fits.append(ImageHDU(data=[spectrum.flux for spectrum in self.values()], name="FLUX"))
fits.append(ImageHDU(data=[spectrum.mask for spectrum in self.values()], name="MASK"))
fits.append(ImageHDU(data=[spectrum.sky for spectrum in self.values()], name="SKY"))
fits.append(ImageHDU(data=[spectrum.covar for spectrum in self.values()], name="COVAR"))
haveCovar2 = [spectrum.covar2 is not None for spectrum in self.values()]
if len(set(haveCovar2)) == 2:
raise RuntimeError("covar2 must be uniformly populated")
if any(haveCovar2):
fits.append(ImageHDU(data=[spectrum.covar2 for spectrum in self.values()], name="COVAR2"))
# Metadata table
metadata: List[str] = []
for spectrum in self.values():
md = spectrum.metadata.copy()
md.update(spectrum.flags.toFitsHeader())
metadata.append(yaml.dump(md))
fits.append(
BinTableHDU.from_columns(
[
Column("targetId", "I", array=targetId),
Column("metadata", "PA()", array=metadata),
],
name="METADATA",
)
)
fits.append(
BinTableHDU.from_columns(
[
Column("targetId", "I", array=targetId),
Column(
"wavelength",
"PD()",
array=[
spectrum.fluxTable.wavelength if spectrum.fluxTable else []
for spectrum in self.values()
],
),
Column(
"flux",
"PD()",
array=[
spectrum.fluxTable.flux if spectrum.fluxTable else []
for spectrum in self.values()
],
),
Column(
"error",
"PD()",
array=[
spectrum.fluxTable.error if spectrum.fluxTable else []
for spectrum in self.values()
],
),
Column(
"mask",
"PJ()",
array=[
spectrum.fluxTable.mask if spectrum.fluxTable else []
for spectrum in self.values()
],
),
],
name="FLUXTABLE",
)
)
notes = self.NotesClass.empty(len(self))
for ii, spectrum in enumerate(self.values()):
notes.setRow(ii, **spectrum.notes.getDict())
notes.writeHdu(fits)
with open(filename, "wb") as fd:
fits.writeto(fd)
class PfsCalibratedNotesTable(PfsTable):
"""Table of notes for PfsCalibratedSpectra"""
schema = PfsSingleNotes.schema
fitsExtName = "NOTES"
class PfsCalibratedSpectra(PfsTargetSpectra):
"""A collection of PfsSingle indexed by target"""
PfsFiberArrayClass = PfsSingle
NotesClass = PfsCalibratedNotesTable
class PfsObjectNotesTable(PfsTable):
"""Table of notes for PfsObjectSpectra"""
schema = PfsObjectNotes.schema
fitsExtName = "NOTES"
class PfsObjectSpectra(PfsTargetSpectra):
"""A collection of PfsObject indexed by target"""
PfsFiberArrayClass = PfsObject
NotesClass = PfsObjectNotesTable
|
[
"price@astro.princeton.edu"
] |
price@astro.princeton.edu
|
a57b4e402cd6c093da4ebc82e7bcd1cd994a4a06
|
f7d9e3c2c31acc023335331ca1cce940b1d054a3
|
/demo_pythond_jango/booktest/views.py
|
f8c9f80431449bcce7d63e6193f890914b773a15
|
[] |
no_license
|
ht5678/yzh-learn
|
ed6fc6d1ef7497bcc44c18d0af3f017388da8521
|
c58ffe44b7b568c61164e1f9daf0ffea09ee3771
|
refs/heads/master
| 2023-02-25T12:09:04.037844 | 2022-08-23T16:19:21 | 2022-08-23T16:19:21 | 144,949,753 | 0 | 1 | null | 2023-02-22T02:43:39 | 2018-08-16T06:59:39 |
Java
|
UTF-8
|
Python
| false | false | 2,260 |
py
|
from django.shortcuts import render
from django.http import HttpResponse,HttpResponseRedirect
from django.template import loader,RequestContext
from django.shortcuts import render,redirect
from booktest.models import BookInfo
from datetime import date
def myRender(request,templatePath , contextDict={}):
'''使用模板文件'''
#使用模板文件
#1.加载模板文件 , 模板对象
temp = loader.get_template(templatePath)
#2.定义模板上下文,给模板文件传递数据
context = RequestContext(request,{});
#3.模板渲染:产生标准的html内容
resHtml = temp.render(context);
#4.返回给浏览器
return HttpResponse(resHtml);
# Create your views here.
#1.定义视图函数 , httprequest对象
#2.进行url配置,建立url地址和视图的对应关系
# 测试: http://localhost:8000/index
def index(request):
#进行处理,和M和T进行交互...
#1
#return HttpResponse('ok');
#2
#return myRender(request,'booktest/index.html');
#3
return render(request,'booktest/index.html',
{'content':'hello world' , 'list':list(range(1,9))});
def showBooks(request):
'''显示图书信息'''
#通过model查找图书表中的数据
books = BookInfo.objects.all();
#使用模板
return render(request,'booktest/showBooks.html',{'books':books});
def detail(request,bid):
'''查询图书关联英雄信息'''
#根据bid查询图书信息
book = BookInfo.objects.get(id=bid);
#查询和book关联的英雄信息
heros = book.heroinfo_set.all();
return render(request,'booktest/detail.html',{'book':book,'heros':heros});
def create(request):
'''新增一本图书'''
#创建BookInfo对象
b = BookInfo();
b.btitle = '流星蝴蝶剑';
b.bpub_date=date(1990,1,1);
#保存进数据库
b.save();
#返回应答,让浏览器再访问/index
return HttpResponseRedirect('/books');
#简写
#return redirect('/books')
def delete(request,bid):
'''删除点击的图书'''
#1.通过bid获取图书对象
book = BookInfo.objects.get(id=bid);
#2.删除
book.delete();
#3.重定向. 让浏览器访问/books
return HttpResponseRedirect('/books');
|
[
"yuezh2@lenovo.com"
] |
yuezh2@lenovo.com
|
176d0b6229b4f26e00bbaa4c702c2b0b5598691c
|
1b9e4843268255b643fb365039fa69b4a9097b38
|
/src/pieltk/alphabet/xcr.py
|
cf2961091028b0f55f1a6e942331427db4d39d99
|
[
"MIT"
] |
permissive
|
caiogeraldes/pieltk
|
1e6e4ddbf30b03ef7f0947b12e4c83289df7472a
|
205c2c030fce5f82551fe36fb48eef1040c7e496
|
refs/heads/main
| 2023-08-04T14:44:02.932297 | 2021-09-15T13:21:47 | 2021-09-15T13:21:47 | 403,777,652 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,211 |
py
|
"""The Carian alphabet. Sources:
- `<https://www.unicode.org/charts/PDF/U102A0.pdf>`
- Adiego, Ignacio, J. (2007) The Carian Language
"""
__author__ = [
"Caio Geraldes <caio.geraldes@usp.br>"]
VOWELS = [
"\U000102A0", # 𐊠 CARIAN LETTER A
"\U000102A7", # 𐊧 CARIAN LETTER A2
"\U000102AB", # 𐊫 CARIAN LETTER O
"\U000102B2", # 𐊲 CARIAN LETTER U
"\U000102B9", # 𐊹 CARIAN LETTER I
"\U000102BA", # 𐊺 CARIAN LETTER E
"\U000102BF", # 𐊿 CARIAN LETTER UU
"\U000102C5", # 𐋅 CARIAN LETTER II
"\U000102C8", # 𐋈 CARIAN LETTER UUU2
"\U000102CF", # 𐋏 CARIAN LETTER E2
"\U000102D0", # 𐋐 CARIAN LETTER UUU3
]
CONSONANTS = [
"\U000102A1", # 𐊡 CARIAN LETTER P2
"\U000102A2", # 𐊢 CARIAN LETTER D
"\U000102A3", # 𐊣 CARIAN LETTER L
"\U000102A4", # 𐊤 CARIAN LETTER UUU
"\U000102A5", # 𐊥 CARIAN LETTER R
"\U000102A6", # 𐊦 CARIAN LETTER LD
"\U000102A8", # 𐊨 CARIAN LETTER Q
"\U000102A9", # 𐊩 CARIAN LETTER B
"\U000102AA", # 𐊪 CARIAN LETTER M
"\U000102AC", # 𐊬 CARIAN LETTER D2
"\U000102AD", # 𐊭 CARIAN LETTER T
"\U000102AE", # 𐊮 CARIAN LETTER SH
"\U000102AF", # 𐊯 CARIAN LETTER SH2
"\U000102B0", # 𐊰 CARIAN LETTER S
"\U000102B1", # 𐊱 CARIAN LETTER C-18
"\U000102B3", # 𐊳 CARIAN LETTER NN
"\U000102B4", # 𐊴 CARIAN LETTER X
"\U000102B5", # 𐊵 CARIAN LETTER N
"\U000102B6", # 𐊶 CARIAN LETTER TT2
"\U000102B7", # 𐊷 CARIAN LETTER P
"\U000102B8", # 𐊸 CARIAN LETTER SS
"\U000102BB", # 𐊻 CARIAN LETTER UUUU
"\U000102BC", # 𐊼 CARIAN LETTER K
"\U000102BD", # 𐊽 CARIAN LETTER K2
"\U000102BE", # 𐊾 CARIAN LETTER ND
"\U000102C0", # 𐋀 CARIAN LETTER G
"\U000102C1", # 𐋁 CARIAN LETTER G2
"\U000102C2", # 𐋂 CARIAN LETTER ST
"\U000102C3", # 𐋃 CARIAN LETTER ST2
"\U000102C4", # 𐋄 CARIAN LETTER NG
"\U000102C6", # 𐋆 CARIAN LETTER C-39
"\U000102C7", # 𐋇 CARIAN LETTER TT
"\U000102C9", # 𐋉 CARIAN LETTER RR
"\U000102CA", # 𐋊 CARIAN LETTER MB
"\U000102CB", # 𐋋 CARIAN LETTER MB2
"\U000102CC", # 𐋌 CARIAN LETTER MB3
"\U000102CD", # 𐋍 CARIAN LETTER MB4
"\U000102CE", # 𐋎 CARIAN LETTER LD2
]
# The i and r used at Hyllarima are not represented as a glyph
# of their own yet.
HYLLARIMA = [
"\U000102A0", # 𐊠 a
"\U000102A2", # 𐊢 d
"\U000102A3", # 𐊣 l
"\U000102A4", # 𐊤 y
"\U000102A5", # 𐊥 r
"\U000102CE", # 𐋎 λ
"\U000102A8", # 𐊨 q
"\U000102A9", # 𐊩 b
"\U000102AA", # 𐊪 m
"\U000102AB", # 𐊫 o
"\U000102AD", # 𐊭 t
"\U000102AE", # 𐊮 sh
"\U000102B0", # 𐊰 s
"\U000102B2", # 𐊲 u
"\U000102B3", # 𐊳 ñ
"\U000102B5", # 𐊵 n
"\U000102B7", # 𐊷 p
"\U000102B8", # 𐊸 ś
"\U000102B9", # 𐊹 i
"\U000102CF", # 𐋏 e
"\U000102BD", # 𐊽 k
"\U000102BE", # 𐊾 δ
"\U000102C3", # 𐋃 z
"\U000102C7", # 𐋇 τ
]
# The q and r used at Euromos are not represented as a glyph of their own yet.
EUROMOS = [
"\U000102A0", # 𐊠 a
"\U000102A2", # 𐊢 d
"\U000102A3", # 𐊣 l
"\U000102A4", # 𐊤 y
"\U000102A5", # 𐊥 r
"\U000102CE", # 𐋎 λ
"\U000102A8", # 𐊨 q
"\U000102A9", # 𐊩 b
"\U000102AA", # 𐊪 m
"\U000102AB", # 𐊫 o
"\U000102AD", # 𐊭 t
"\U000102B0", # 𐊰 s
"\U000102B2", # 𐊲 u
"\U000102B4", # 𐊴 ḱ
"\U000102B5", # 𐊵 n
"\U000102B8", # 𐊸 ś
"\U000102B9", # 𐊹 i
"\U000102CF", # 𐋏 e
"\U000102BD", # 𐊽 k
"\U000102BC", # 𐊼 k2
"\U000102BE", # 𐊾 δ
"\U000102C3", # 𐋃 z
]
# The β, i, q and z used at Mylasa are not represented as a glyph
# of their own yet.
MYLASA = [
"\U000102A0", # 𐊠 a
"\U000102A2", # 𐊢 d
"\U000102A3", # 𐊣 l
"\U000102D0", # 𐋐 y
"\U000102A5", # 𐊥 r
"\U000102A8", # 𐊨 q
"\U000102A9", # 𐊩 b
"\U000102AA", # 𐊪 m
"\U000102AB", # 𐊫 o
"\U000102AD", # 𐊭 t
"\U000102AE", # 𐊮 sh
"\U000102B0", # 𐊰 s
"\U000102B2", # 𐊲 u
"\U000102B4", # 𐊴 ḱ
"\U000102B5", # 𐊵 n
"\U000102B7", # 𐊷 p
"\U000102B8", # 𐊸 ś
"\U000102B9", # 𐊹 i
"\U000102CF", # 𐋏 e
"\U000102BD", # 𐊽 k
"\U000102BE", # 𐊾 δ
"\U000102C3", # 𐋃 z
]
# The ḱ and β used at Stratonikeia are not represented as a glyph
# of their own yet.
STRATONIKEIA = [
"\U000102A0", # 𐊠 a
"\U000102A2", # 𐊢 d
"\U000102A3", # 𐊣 l
"\U000102A4", # 𐊤 y
"\U000102A5", # 𐊥 r
"\U000102A6", # 𐊦 λ
"\U000102A8", # 𐊨 q
"\U000102AA", # 𐊪 m
"\U000102AB", # 𐊫 o
"\U000102AD", # 𐊭 t
"\U000102AE", # 𐊮 sh
"\U000102B0", # 𐊰 s
"\U000102B1", # 𐊱 ?
"\U000102B2", # 𐊲 u
"\U000102B3", # 𐊳 ñ
"\U000102B4", # 𐊴 ḱ
"\U000102B5", # 𐊵 n
"\U000102B7", # 𐊷 p
"\U000102B8", # 𐊸 ś
"\U000102B9", # 𐊹 i
"\U000102BA", # 𐊺 e
"\U000102BD", # 𐊽 k
"\U000102BE", # 𐊾 δ
"\U000102C3", # 𐋃 z
]
# The a used at Sinuri-Kildara is not represented as a glyph of its own yet.
SINURI_KILDARA = [
"\U000102A0", # 𐊠 a
"\U000102A2", # 𐊢 d
"\U000102A3", # 𐊣 l
"\U000102D0", # 𐋐 y
"\U000102A5", # 𐊥 r
"\U000102A6", # 𐊦 λ
"\U000102A8", # 𐊨 q
"\U000102A9", # 𐊩 b
"\U000102AA", # 𐊪 m
"\U000102AB", # 𐊫 o
"\U000102AD", # 𐊭 t
"\U000102AE", # 𐊮 sh
"\U000102B0", # 𐊰 s
"\U000102B1", # 𐊱 ?
"\U000102B2", # 𐊲 u
"\U000102B3", # 𐊳 ñ
"\U000102B4", # 𐊴 ḱ
"\U000102B5", # 𐊵 n
"\U000102B7", # 𐊷 p
"\U000102B8", # 𐊸 ś
"\U000102B9", # 𐊹 i
"\U000102BA", # 𐊺 e
"\U000102BC", # 𐊼 k
"\U000102BE", # 𐊾 δ
"\U000102C3", # 𐋃 z
"\U000102C4", # 𐋄 ŋ?
]
# Kaunos, C.series, Memphis
|
[
"caioaguida@protonmail.com"
] |
caioaguida@protonmail.com
|
a69b2c11900d6d7328f335f6420a6b344ad49c97
|
0ddcaee809d93e4d5b12d8269964cafd7dd8333d
|
/__init__.py
|
f2285368ce87d1d0884809647b24e4f7d8dca542
|
[] |
no_license
|
tin2tin/Text_Editor_Reworked
|
63db1ebde254104700158011228654f417f76a1a
|
5504b57d6e34a905cd009be825baacd8b5a2edb8
|
refs/heads/master
| 2020-06-06T10:12:28.655841 | 2019-06-19T12:25:58 | 2019-06-19T12:25:58 | 192,710,657 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,156 |
py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
# support reloading sub-modules
if "bpy" in locals():
from importlib import reload
_modules_loaded[:] = [reload(val) for val in _modules_loaded]
del reload
_modules = [
"add_mesh_torus",
"anim",
"clip",
"console",
"constraint",
"file",
"image",
"mask",
"mesh",
"node",
"object",
"object_align",
"object_quick_effects",
"object_randomize_transform",
"presets",
"rigidbody",
"screen_play_rendered_anim",
"sequencer",
"text_editor",
"userpref",
"uvcalc_follow_active",
"uvcalc_lightmap",
"uvcalc_smart_project",
"vertexpaint_dirt",
"view3d",
"wm",
]
import bpy
if bpy.app.build_options.freestyle:
_modules.append("freestyle")
__import__(name=__name__, fromlist=_modules)
_namespace = globals()
_modules_loaded = [_namespace[name] for name in _modules]
del _namespace
def register():
from bpy.utils import register_class
for mod in _modules_loaded:
for cls in mod.classes:
register_class(cls)
def unregister():
from bpy.utils import unregister_class
for mod in reversed(_modules_loaded):
for cls in reversed(mod.classes):
if cls.is_registered:
unregister_class(cls)
|
[
"noreply@github.com"
] |
tin2tin.noreply@github.com
|
310a2ff7d5c25b08fd026424c91c406d6dce04a7
|
8e4a5e0a81fc9401fc0b6e55dd55e8d6e29c3ed6
|
/PycharmProjects/licamb/licamb/db.py
|
56e07023c14dd0a9ab4cc3e86d345f33321735e3
|
[] |
no_license
|
rogeriodelphi/portifolio
|
1fb16c8c723b97f20cdd305224b660a1657f3913
|
5c704305ce26576afb4efd1e410f691971f06fac
|
refs/heads/master
| 2023-08-11T05:33:37.539047 | 2021-09-26T01:57:02 | 2021-09-26T01:57:02 | 284,164,866 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 597 |
py
|
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SQLITE = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
# }
POSTGRESQL = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'db',
'USER': 'postgres',
'PASSWORD': '123456',
'HOST': 'localhost',
'PORT': '5432',
}
}
|
[
"rogeriodelphi@gmail.com"
] |
rogeriodelphi@gmail.com
|
bd979fbfdf99268a25aa04c104f5e1b6c5f056af
|
beba93bfd1ef7506ffac19e93e7e14abfc6b9aa6
|
/category/admin.py
|
c0fb99b4ed7c5acc71ed8fc24bc55c05aa8b1bdc
|
[] |
no_license
|
Glazzko/reccomendation-system
|
9fde738ecfdefbba4fb13447bda8dd202869c5a7
|
199a7a1347d93955cdc3da5e50d005d8002b72e1
|
refs/heads/master
| 2023-05-31T22:56:17.872071 | 2021-06-12T22:18:34 | 2021-06-12T22:18:34 | 375,421,326 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
from django.contrib import admin
from category.models import Category
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
list_display = ("label",)
search_fields = ("label",)
|
[
"rimas.juzeliunas@panko.lt"
] |
rimas.juzeliunas@panko.lt
|
075d9d7617dfb64faaea57e301f6784de40b09c2
|
d78766f66557c424a2e3b85286a95e621ea1f345
|
/windaq.py
|
cacc8052fb2c78122754423b5095721be464a086
|
[] |
no_license
|
aguiarla/windaq3
|
d5550a3ee5b076a510ea29bca5d7c0deb65fca55
|
8b07a02f71efef8f3d749c3300491c6114ce2c69
|
refs/heads/master
| 2022-01-09T04:36:23.999147 | 2019-05-02T13:40:38 | 2019-05-02T13:40:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,181 |
py
|
'''
Created on Febuary, 2019
@author: samper
Windaq class object to work directly with .wdq files
Python 3 Version
'''
#!/usr/bin/python
import struct
import datetime
class windaq(object):
'''
Read windaq files (.wdq extension) without having to convert them to .csv or other human readable text
Code based on http://www.dataq.com/resources/pdfs/misc/ff.pdf provided by Dataq, code and comments will refer to conventions from this file
and python library https://www.socsci.ru.nl/wilberth/python/wdq.py that does not appear to support the .wdq files created by WINDAQ/PRO+
'''
def __init__(self, filename):
''' Define data types based off convention used in documentation from Dataq '''
UI = "<H" # unsigned integer, little endian
I = "<h" # integer, little endian
B = "B" # unsigned byte, kind of reduntent but lets keep consistant with the documentation
UL = "<L" # unsigned long, little endian
D = "<d" # double, little endian
L = "<l" # long, little endian
F = "<f" # float, little endian
''' Open file as binary '''
with open(filename, 'rb') as self._file:
self._fcontents = self._file.read()
''' Read Header Info '''
if (struct.unpack_from(B, self._fcontents, 1)[0]): # max channels >= 144
self.nChannels = (struct.unpack_from(B, self._fcontents, 0)[0]) # number of channels is element 1
else:
self.nChannels = (struct.unpack_from(B, self._fcontents, 0)[0]) & 31 # number of channels is element 1 mask bit 5
self._hChannels = struct.unpack_from(B, self._fcontents, 4)[0] # offset in bytes from BOF to header channel info tables
self._hChannelSize = struct.unpack_from(B, self._fcontents, 5)[0] # number of bytes in each channel info entry
self._headSize = struct.unpack_from(I, self._fcontents, 6)[0] # number of bytes in data file header
self._dataSize = struct.unpack_from(UL, self._fcontents, 8)[0] # number of ADC data bytes in file excluding header
self.nSample = (self._dataSize/(2*self.nChannels)) # number of samples per channel
self._trailerSize = struct.unpack_from(UL, self._fcontents,12)[0] # total number of event marker, time and date stamp, and event marker commet pointer bytes in trailer
self._annoSize = struct.unpack_from(UI, self._fcontents, 16)[0] # toatl number of usr annotation bytes including 1 null per channel
self.timeStep = struct.unpack_from(D, self._fcontents, 28)[0] # time between channel samples: 1/(sample rate throughput / total number of acquired channels)
e14 = struct.unpack_from(L, self._fcontents, 36)[0] # time file was opened by acquisition: total number of seconds since jan 1 1970
e15 = struct.unpack_from(L, self._fcontents, 40)[0] # time file was written by acquisition: total number of seconds since jan 1 1970
self.fileCreated = datetime.datetime.fromtimestamp(e14).strftime('%Y-%m-%d %H:%M:%S') # datetime format of time file was opened by acquisition
self.fileWritten = datetime.datetime.fromtimestamp(e15).strftime('%Y-%m-%d %H:%M:%S') # datetime format of time file was written by acquisition
self._packed = ((struct.unpack_from(UI, self._fcontents, 100)[0]) & 16384) >> 14 # bit 14 of element 27 indicates packed file. bitwise & e27 with 16384 to mask all bits but 14 and then shift to 0 bit place
self._HiRes = ((struct.unpack_from(UI, self._fcontents, 100)[0]) & 1) # bit 1 of element 27 indicates a HiRes file with 16-bit data
''' read channel info '''
self.scalingSlope = []
self.scalingIntercept = []
self.calScaling = []
self.calIntercept = []
self.engUnits = []
self.sampleRateDivisor = []
self.phyChannel = []
for channel in range(0,self.nChannels):
channelOffset = self._hChannels + (self._hChannelSize * channel) # calculate channel header offset from beginging of file, each channel header size is defined in _hChannelSize
self.scalingSlope.append(struct.unpack_from(F, self._fcontents, channelOffset)[0]) # scaling slope (m) applied to the waveform to scale it within the display window
self.scalingIntercept.append(struct.unpack_from(F,self._fcontents, channelOffset + 4)[0]) # scaling intercept (b) applied to the waveform to scale it withing the display window
self.calScaling.append(struct.unpack_from(D, self._fcontents, channelOffset + 4 + 4)[0]) # calibration scaling factor (m) for waveforem vale dispaly
self.calIntercept.append(struct.unpack_from(D, self._fcontents, channelOffset + 4 + 4 + 8)[0]) # calibration intercept factor (b) for waveform value display
self.engUnits.append(struct.unpack_from("cccccc", self._fcontents, channelOffset + 4 + 4 + 8 + 8)) # engineering units tag for calibrated waveform, only 4 bits are used last two are null
if self._packed: # if file is packed then item 7 is the sample rate divisor
self.sampleRateDivisor.append(struct.unpack_from(B, self._fcontents, channelOffset + 4 + 4 + 8 + 8 + 6 + 1)[0])
else:
self.sampleRateDivisor.append(1)
self.phyChannel.append(struct.unpack_from(B, self._fcontents, channelOffset + 4 + 4 + 8 + 8 + 6 + 1 + 1)[0]) # describes the physical channel number
''' read user annotations '''
aOffset = self._headSize + self._dataSize + self._trailerSize
aTemp = ''
for i in range(0, self._annoSize):
aTemp += struct.unpack_from('c', self._fcontents, aOffset + i)[0].decode("utf-8")
self._annotations = aTemp.split('\x00')
def data(self, channelNumber):
''' return the data for the channel requested
data format is saved CH1tonChannels one sample at a time.
each sample is read as a 16bit word and then shifted to a 14bit value
'''
dataOffset = self._headSize + ((channelNumber -1) * 2)
data = []
for i in range(0, int(self.nSample)):
channelIndex = dataOffset + (2*self.nChannels * i)
if self._HiRes:
temp = struct.unpack_from("<h", self._fcontents, channelIndex)[0] * 0.25 # multiply by 0.25 for HiRes data
else:
temp = struct.unpack_from("<h", self._fcontents, channelIndex)[0] >> 2 # bit shift by two for normal data
temp2 = self.calScaling[channelNumber-1]*temp + self.calIntercept[channelNumber-1]
data.append(temp2)
return data
def time(self):
''' return time '''
t = []
for i in range(0, int(self.nSample)):
t.append(self.timeStep * i)
return t
def unit(self, channelNumber):
''' return unit of requested channel '''
unit = ''
for b in self.engUnits[channelNumber-1]:
unit += b.decode('utf-8')
''' Was getting \x00 in the unit string after decodeing, lets remove that and whitespace '''
unit.replace('\x00', '').strip()
return unit
def chAnnotation(self, channelNumber):
''' return user annotation of requested channel '''
return self._annotations[channelNumber-1]
|
[
"17242713+sdp8483@users.noreply.github.com"
] |
17242713+sdp8483@users.noreply.github.com
|
e610e2ff68b9264be3b2f2e6659c8a516cad7e27
|
eb136fec7f6dfcb11834cc0cd4d3daec1d7a4dc6
|
/fiasco_api/expenses/migrations/0001_initial.py
|
40ad410a30d5561dfacbc245e35bd26e587ef388
|
[
"MIT"
] |
permissive
|
xelnod/fiasco_backend
|
4635cff2fd220585c4433010e64208dfebbf2441
|
edeca8cac8c7b1a1cc53051d4443cc2996eba37c
|
refs/heads/master
| 2020-09-21T13:37:37.971952 | 2020-09-15T19:38:37 | 2020-09-15T19:38:37 | 224,804,463 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,396 |
py
|
# Generated by Django 3.1.1 on 2020-09-13 21:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('categories', '0001_initial'),
('channels', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ExpenseProto',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('comment', models.TextField(blank=True, null=True)),
('amount', models.IntegerField(default=0)),
('channel', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='channels.channel')),
('kit', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='categories.kit')),
],
),
migrations.CreateModel(
name='Expense',
fields=[
('expenseproto_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='expenses.expenseproto')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('is_fulfilled', models.BooleanField(default=True)),
('money_stored', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
bases=('expenses.expenseproto', models.Model),
),
migrations.CreateModel(
name='OngoingExpense',
fields=[
('expenseproto_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='expenses.expenseproto')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('scope', models.IntegerField(choices=[(0, 'Month'), (1, 'Year')], default=0)),
],
options={
'abstract': False,
},
bases=('expenses.expenseproto', models.Model),
),
]
|
[
"s.zench@yandex.ru"
] |
s.zench@yandex.ru
|
9a703574fa19dba5c5f356bbf2eb597785ed8295
|
99f46a281afb98f76ac4cc2bd6e7d419d99baa5b
|
/train.py
|
1fa353c2c5c881b119e28c0e10e986703e02eef2
|
[
"MIT"
] |
permissive
|
mattdangerw/keras-text-generation
|
5fc1951760851cdd9678ce963cdad9e0cf43c277
|
b04ac44bf68e9f68520b1492d7f30864b1862fb3
|
refs/heads/master
| 2023-04-06T16:23:01.756861 | 2020-11-09T07:53:02 | 2020-11-09T08:32:00 | 94,829,643 | 24 | 10 |
MIT
| 2023-03-25T00:21:17 | 2017-06-19T23:33:23 |
Python
|
UTF-8
|
Python
| false | false | 1,932 |
py
|
# -*- coding: utf-8 -*-
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from model import MetaModel, save
def main():
parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--data-dir', type=str, default='data/tinyshakespeare',
help='data directory containing input.txt')
parser.add_argument('--live-sample', action='store_true',
help='live sample the model after each epoch')
parser.add_argument('--word-tokens', action='store_true',
help='whether to model the rnn at word level or char level')
parser.add_argument('--pristine-input', action='store_true',
help='do not lowercase or attempt fancy tokenization of input')
parser.add_argument('--pristine-output', action='store_true',
help='do not detokenize output (word-tokens only)')
parser.add_argument('--embedding-size', type=int, default=64,
help='size of the embedding')
parser.add_argument('--rnn-size', type=int, default=128,
help='size of RNN layers')
parser.add_argument('--num-layers', type=int, default=2,
help='number of layers in the RNN')
parser.add_argument('--batch-size', type=int, default=32,
help='minibatch size')
parser.add_argument('--seq-length', type=int, default=50,
help='training sequence length')
parser.add_argument('--seq-step', type=int, default=25,
help='how often to pull a training sequence from the data')
parser.add_argument('--num-epochs', type=int, default=50,
help='number of epochs')
args = parser.parse_args()
model = MetaModel()
model.train(**vars(args))
save(model, args.data_dir)
if __name__ == '__main__':
main()
|
[
"mattdangerw@gmail.com"
] |
mattdangerw@gmail.com
|
236dd011739d70280f592f42aa12d08407006032
|
2ea8277bf5a0bb496616c35cfecdf7ad9871c361
|
/examples/TimoBeam/homo-nn-T3-mesh1/calculate_traction.py
|
10e8deb7ddad73be670938164b288110fb4aff48
|
[] |
no_license
|
J-Mounir/multiscale-homogenization
|
f7f7c9e9f2567bc07c4ab0b830e85dc539f709f3
|
36b51abc91847ad10726c94600f57577eca27d02
|
refs/heads/master
| 2023-06-16T12:45:52.768027 | 2021-07-14T09:31:44 | 2021-07-14T09:31:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,541 |
py
|
import numpy as np
thickness = 1
nodes = np.array([
[713, 20.0, 0.0],
[712, 20.0, 0.384615391],
[711, 20.0, 0.769230783],
[710, 20.0, 1.15384614],
[709, 20.0, 1.53846157],
[708, 20.0, 1.92307687],
[707, 20.0, 2.30769229],
[706, 20.0, 2.69230771],
[705, 20.0, 3.07692313],
[704, 20.0, 3.46153855],
[703, 20.0, 3.84615374],
[702, 20.0, 4.23076916],
[701, 20.0, 4.61538458],
[700, 20.0, 5.0]])
traction = np.array([
[713, 0.0, -0.25],
[712, 0.0, -0.25],
[711, 0.0, -0.25],
[710, 0.0, -0.25],
[709, 0.0, -0.25],
[708, 0.0, -0.25],
[707, 0.0, -0.25],
[706, 0.0, -0.25],
[705, 0.0, -0.25],
[704, 0.0, -0.25],
[703, 0.0, -0.25],
[702, 0.0, -0.25],
[701, 0.0, -0.25],
[700, 0.0, -0.25]]) # A sequence of node must be in this list
rows, columns = np.shape(traction)
loading = np.zeros([rows, columns-1])
for i in range(rows-1):
node1 = nodes[i, 1:]
node2 = nodes[i+1, 1:]
L = np.sqrt((node2[0] - node1[0])**2 + (node2[1] - node1[1])**2)
T1 = L/2.0 * thickness * traction[i, 1:]
T2 = L/2.0 * thickness * traction[i+1, 1:]
loading[i, :] += T1
loading[i+1, :] += T2
# Check input
outputpath = './traction.dat'
# Open output file to write
outputfile = open(outputpath, 'a+')
outputfile.write('<ExternalForces>\r')
for i in range(rows):
strg1 = 'u[' + str(int(traction[i, 0])) + '] = ' + str(loading[i, 0]) + ';\r'
outputfile.write(strg1)
strg2 = 'v[' + str(int(traction[i, 0])) + '] = ' + str(loading[i, 1]) + ';\r'
outputfile.write(strg2)
outputfile.write('</ExternalForces>\r')
|
[
"ntvminh286@gmail.com"
] |
ntvminh286@gmail.com
|
bcb6a7fd57fa30d196aa43d973ea34ee9f191570
|
96d0a7fc3deee438749fc59fcb6951eb0bc1affe
|
/blockchain.py
|
451f012e9c3fa3bd6957343f25d09fdefe8a5fb8
|
[] |
no_license
|
Kay-Wilkinson/smallBlockchainProject
|
5530d8ef4794d4f42736b6933c64a27ae3f96ad7
|
730ef4af6b6de2094c61a22afd2e17b201fbd5af
|
refs/heads/master
| 2020-03-28T18:48:19.998887 | 2018-09-15T15:22:38 | 2018-09-15T15:22:38 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,818 |
py
|
import hashlib
from hashlib import sha256
import json
from time import time
from urllib.parse import urlparse
from uuid import uuid4
from textwrap import dedent
import requests
from flask import Flask, jsonify, request
class Blockchain(object):
def _init_(self):
self.chain = []
self.current_transactions = []
# create the genesis block
self.new_block(previous_hash=1, proof=100)
def new_block(self, proof, previous_hash=None):
#create a new block and add it to the chain
"""
:param proof: <int> The proof given by the Proof of Work algorithm
:param previous_hash: (Optional) <str> Hash of previous Block
:return: <dict> New Block
"""
block = {
'index': len(self.chain) + 1,
'timestamp': time(),
'transactions': self.current_transactions,
'proof': proof,
'previous_hash': previous_hash or self.hash(self.chain[-1]),
}
#reset the current list of transactions
self.current_transactions = []
self.chain.append(block)
return block
# pass
def new_transaction(self, sender, recipient, amount):
"""
#creates a new transaction to the list of transactions
:param sender: <str> Address of the Sender
:param recipient: <str> Address of the Recipient
:param amountL <int> Amount
:return: <int> The index of the BLock that will hold this transaction
"""
self.current_transactions.append({
'sender': sender,
'recipient': recipient,
'amount': amount,
})
return self.last_block['index'] + 1
# returns index of the block that the transaction was added to - the next one to mined.
# pass
def proof_of_work(self, last_proof):
"""
Simple POW algorith,m:
- Find a number p' such that hash(pp') contains leading 4 zeros, where p is the previous p'
- p is the previous proof, and p' is the new proof
:param last_proof: <int>
:return: <int>
"""
proof = 0
while self.valid_proof(last_proof, proof) is False:
proof += 1
return proof
@staticmethod
def valid_proof(last_proof, proof):
"""
Validates the proof : Does hash(last_proof, proof) contain 4 leading zeroes?
:param last_proof: <int> Previous proof
:param proof: <int> Current proof
:return: <bool> True if correct, False if not.
"""
guess = f'{last_proof}{proof}'.encode()
guess_hash = hashlib.sha256(guess).hexdigest()
return guess_hash[:4] == "0000"
#changing the hash to a different integer set will change the difficulty of the POW.
#This change would have a quadratic impact upon computational power to mine
@property
def last_block(self):
#returns the tail of the chain
#pass
return self.chain[-1]
@staticmethod
# no implicit arguments of the class it is called from. Can refactor this to method?
def hash(block):
#hashes the block
"""
Creates a SHA-256 hash of the Block
:param block: <dict> Block
:return: <str>
"""
#Dict must be ordered or inconsistent hashes D:
block_string = json.dumps(block, sort_keys=True).encode()
return hashlib.sha256(block_string).hexdigest()
pass
#Instantiate our Ndode
app = Flask(__name__)
#Generate a globally unique address for this node
node_identifier = str(uuid4()).replace('-', '')
#Instantiate the Blockchain
blockchain = Blockchain()
# @app.route('/mine', methods=['GET'])
# def mine():
# return "Mining new Block"
@app.route('/transactions/new', methods=['POST'])
def new_transaction():
return "Adding new transaction"
values = request.get_json()
#Form verifcation in POST data
required = ['sender', 'recipient', 'amount']
if not all(k in values for k in required):
return 'Missing values', 400
#create a new transaction
index = blockchain.new_transaction(values['sender'], values['recipient'], values['amount'])
response = {'message': f'Transaction will be added to Block {index}'}
return jsonify(response), 201
@app.route('/chain', methods=["GET"])
def full_chain():
response = {
'chain': blockchain.chain,
'length': len(blockchain.chain),
}
return jsonify(response), 200
@app.route('/mine', methods=['GET'])
def mine():
#We run the proof of work algorithm to get the next proof...
last_block = blockchain.last_block
last_proof = last_proof['proof']
proof = blockchain.proof_of_work(last_proof)
#recieve reward
#Sender is "0" to signify that this node has mined a new coin.
blockchain.new_transaction(
sender="0",
recipient=node_identifier,
amount=1,
)
#forge a new block by adding it to the chain
previous_hash = blockchain.hash(last_block)
block = blockchain.new_block(proof, previous_hash)
response = {
'message': "New Block Forged",
'index': block['index'],
'transactions': block['transactions'],
'proof': block['proof'],
'previous_hash': block['previous_hash'],
}
return jsonify(response), 200
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5000)
|
[
"noreply@github.com"
] |
Kay-Wilkinson.noreply@github.com
|
b39ad6bd4903fbdc886c9de6c9f08c51f8e9bf92
|
81a1e1f5907f34126cf26468e93c9b03d564a139
|
/blog/views.py
|
286b0ee315dcaa0bfa2e5021a85d5ccb3ec15ac5
|
[] |
no_license
|
cxrlover/Email
|
27e559b7135e67aba93ae3ca3cc5fe8c8cd8999f
|
54b685ab5feab4d7075cd08088a395b087e9632a
|
refs/heads/master
| 2022-02-01T11:15:32.147519 | 2019-07-21T03:46:50 | 2019-07-21T03:46:50 | 198,010,755 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,402 |
py
|
from django.core.mail import send_mail
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Post
from .forms import EmailPostForm
# Create your views here.
def share_post(req, post_id):
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if req.method == "POST":
form = EmailPostForm(req.POST)
if form.is_valid():
# 获取数据,数据类型是字典
cd = form.cleaned_data
# 凑出一个完整的地址
post_url = req.build_absolute_uri(post.get_absolute_url())
subject = '{}({}) recommends you reading "{}"'.format(cd['name'],
cd['email'],
post.title)
message = "Read'{}' at {}\n\n{}\'s comments:{}".format(post.title,
post_url,
cd['name'],
cd['comments'])
send_mail(subject, message, '1107849083@qq.com', [cd['to'],])
sent = True
else:
form = EmailPostForm(req.POST)
return render(req, 'blog/share.html', {'form': form,
'post': post,
})
# get没有访问到,则抛出404异常
# get_object_or_404
def post_list(req):
object_list = Post.published.all()
paginator = Paginator(object_list, 3)
page = req.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
# posts = Post.published.all()
# return render(req, 'blog/list.html', {'posts': posts})
return render(req, 'blog/list.html', {'page': page, 'posts': posts})
def post_detail(req, year, month, day, post):
post = get_object_or_404(Post,
publish__year=year,
publish__month=month,
publish__day=day,
slug=post, )
return render(req, 'blog/detail.html', {'post': post})
|
[
"1107849083@qq.com"
] |
1107849083@qq.com
|
92d34b6fea3257a43a8012c3d157b1864b4eb4cf
|
1893db5fbfb57a6d73c1e3c684e8e5dbcd4b58cb
|
/textRnn_embedding_tf2.py
|
2723ac4c6fe46b5514517b1491a2184efafa036f
|
[] |
no_license
|
crespo18/tf2.0
|
69a72097e34239d49a0eca9b5e7dcf14172a0f3a
|
36cea7b11b3bff04116ef482845eed0cba9c3cb8
|
refs/heads/main
| 2023-04-03T13:33:09.967634 | 2021-03-23T07:05:54 | 2021-03-23T07:05:54 | 325,563,049 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,485 |
py
|
#!/usr/bin/env python
#!-*- coding:utf-8 -*-
import jieba
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import Input,Model
from tensorflow.keras import preprocessing
class TextRnnTag:
def __init__(self, user_name):
self.user_name = user_name
def load_jiedai_data(self, file_name):
wordcnt_dict = {}
black_num = 0
white_num = 0
with open(file_name) as fp:
lines = fp.readlines()
for line in lines:
label,desc=line.split("@@@@@@@@@@")[0],line.split("@@@@@@@@@@")[1]
seg_list = self.cut_word(desc)
wordcnt_dict = self.generate_wordcnt_dict(wordcnt_dict, seg_list)
if int(label) == 1:
black_num += 1
elif int(label) == 0:
white_num += 1
#print('wordcnt_dict len: ', len(wordcnt_dict))
fp.close()
return black_num,white_num,wordcnt_dict
def cut_word(self, line):
seg_list = jieba.cut(line, cut_all=True, HMM=True)
return seg_list
def generate_wordcnt_dict(self, wordcnt_dict, seg_list):
for seg in seg_list:
if len(seg)>=1 and seg != '\n':
if not seg in wordcnt_dict.keys():
wordcnt_dict[seg] = 1
else:
wordcnt_dict[seg] += 1
return wordcnt_dict
def encode_word(self, wordcnt_dict):
word_index_dict = {}
wordcnt_list = sorted(wordcnt_dict.items(),key = lambda x:x[1], reverse=True)
idx = 0
word_index = 3
for item in wordcnt_list:
word_index_dict[item[0]] = word_index
#if idx <= 100:
# print('word: ', item[0], 'word_cnt: ', item[1], 'word_index: ', word_index)
word_index += 1
idx += 1
return word_index_dict
def encode_train_data(self, file_name, sample_num, word_index_dict, word_num, max_len):
lenp = len(range(0,sample_num))
train_data = [0]*lenp
train_labels = [0]*lenp
train_sequences = [0]*lenp
idx = 0
with open(file_name) as fp:
lines = fp.readlines()
for line in lines:
label,desc=line.split("@@@@@@@@@@")[0],line.split("@@@@@@@@@@")[1]
train_labels[idx] = int(label)
data = []
seq_list = self.cut_word(desc)
for seq in seq_list:
if not seq in word_index_dict.keys():
data.append(2)
else:
if word_index_dict[seq] < word_num:
data.append(word_index_dict[seq])
else:
data.append(3)
train_data[idx] = data
idx += 1
fp.close()
train_sequences = preprocessing.sequence.pad_sequences(train_data, max_len)
return ([train_data,train_labels, train_sequences])
def load_need_pred_data(self, file_name, word_index_dict, word_num, max_len):
lenp = len(range(0,100000))
need_pred_data = [0]*lenp
need_pred_sequences = [0]*lenp
need_pred_apk = [0]*lenp
need_pred_desc = {}
idx = 0
with open(file_name) as fp:
lines = fp.readlines()
for line in lines:
if len(line.split("@@@@@@@@@@")) != 2:
print('lines: ', lines)
else:
apk,desc = line.split("@@@@@@@@@@")[0], line.split("@@@@@@@@@@")[1]
#print('apk: ', apk, 'desc: ', desc)
need_pred_desc[apk] = desc
need_pred_apk[idx] = apk
data = []
seq_list = self.cut_word(desc)
for seq in seq_list:
if not seq in word_index_dict.keys():
data.append(2)
else:
if word_index_dict[seq] < word_num:
data.append(word_index_dict[seq])
else:
data.append(3)
#print('idx:', idx, 'data: \n', data)
need_pred_data[idx] = data
idx += 1
fp.close()
#print('need_pred_data_len:\n', len(need_pred_data))
#print('need_pred_data[0]:\n', need_pred_data[0])
#print('need_pred_data[99]:\n', need_pred_data[99])
need_pred_apk = need_pred_apk[0:idx]
need_pred_sequences = preprocessing.sequence.pad_sequences(need_pred_data[0:idx], max_len)
print('pred_data len: ', len(need_pred_sequences))
return([need_pred_apk, need_pred_desc, need_pred_sequences])
def text_rnn_model(self, train_sequences, train_labels, word_num, embedding_dim, max_len, model_file):
input = Input((max_len,))
embedding = layers.Embedding(word_num, embedding_dim, input_length = max_len)(input)
bi_lstm = layers.Bidirectional(layers.LSTM(128))(embedding)
output = layers.Dense(2, activation='softmax')(bi_lstm)
model = Model(inputs = input, outputs = output)
print(model.summary())
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_sequences, train_labels, batch_size = 512, epochs = 5)
#保存模型
model.save(model_file)
#input = Input((max_len,))
#embedding = layers.Embedding(word_num, embedding_dim, input_length=max_line_len)(input)
#convs = []
#for kernel_size in [ 3, 4, 5]:
# c = layers.Conv1D(128, kernel_size, activation='relu')(embedding)
# c = layers.GlobalMaxPooling1D()(c)
# convs.append(c)
#x = layers.Concatenate()(convs)
#output = layers.Dense(2, activation='softmax')(x)
#model = Model(inputs = input, outputs = output)
#print(model.summary())
#model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
#model.fit(train_sequences, train_labels, batch_size = 512, epochs = 5)
return(model)
def model(self, train_sequences, train_labels, word_num, embedding_dim):
model = tf.keras.Sequential()
model.add(layers.Embedding(word_num, embedding_dim))
model.add(layers.GlobalAveragePooling1D())
model.add(layers.Dense(128, activation=tf.nn.relu))
model.add(layers.Dense(2, activation='softmax'))
#model.add(layers.Dense(1))
print(model.summary())
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(train_sequences, train_labels, batch_size = 512, epochs = 10)
return model
def predict_with_model_file(self, model_file, need_pred_sequences):
model = tf.keras.models.load_model(model_file)
pred_result = model.predict(need_pred_sequences)
#print('predict_result: ', pred_result, pred_result.shape)
print('predict_result.shape: ', pred_result.shape)
return(pred_result)
def predict_new(self, model, need_pred_sequences):
pred_result = model.predict(need_pred_sequences)
#print('predict_result: ', pred_result, pred_result.shape)
print('predict_result.shape: ', pred_result.shape)
return(pred_result)
def predict(self, file_name, model, need_pred_apk, need_pred_sequences):
idx = 0
with open(file_name, "w") as fp:
for sequence in need_pred_sequences:
data = [0]*1
data[0] = sequence
pred_result = model.predict(data)
if idx <= 2:
print('idx: ', idx,'apk: ', need_pred_apk[idx], 'sequences: ', len(data),sequence)
print('predict_result: ', pred_result, pred_result.shape)
idx += 1
fp.close()
def save_predict_result(self, file_name, need_pred_apk, need_pred_desc, predict_result):
with open(file_name, "w") as fp:
for idx in range(0,len(need_pred_apk)):
apk = need_pred_apk[idx]
if apk in need_pred_desc.keys():
desc = need_pred_desc[apk]
white_pred_score = predict_result[idx][0]
black_pred_score = predict_result[idx][1]
fp.write("%.3f\t%s\t%s" % (black_pred_score, apk, desc))
fp.close()
def print_data(self, train_data, train_labels, train_sequences):
print('train len: ', len(train_data), len(train_labels), len(train_sequences))
for idx in range(0,3):
print('train_data: \n', len(train_data[idx]), train_data[idx])
print('train_sequences: \n', len(train_sequences[idx]), train_sequences[idx])
print('train_labels: \n', train_labels[idx])
if __name__ == '__main__':
app_name_tag = TextRnnTag('text rnn model')
print('load train_data file')
black_num,white_num,wordcnt_dict = app_name_tag.load_jiedai_data("../train_data.txt")
print("black_num: ", black_num, "white_num: ", white_num, "word_cnt: ", len(wordcnt_dict))
word_index_dict = app_name_tag.encode_word(wordcnt_dict)
word_num = 10000
embedding_dim = 100
max_len = 256
max_line_len = 1000000
model_file = 'text_rnn.model'
sample_num = black_num + white_num
train_data,train_labels,train_sequences = app_name_tag.encode_train_data("../train_data.txt", sample_num, word_index_dict, word_num, max_len)
app_name_tag.print_data(train_data, train_labels, train_sequences)
model = app_name_tag.text_rnn_model(train_sequences, train_labels,word_num, embedding_dim, max_len)
#model = app_name_tag.model(train_sequences, train_labels, word_num, embedding_dim)
need_pred_apk,need_pred_desc,need_pred_sequences = app_name_tag.load_need_pred_data("../need_pred_data.txt", word_index_dict, word_num, max_len)
#predict_result = app_name_tag.predict_with_model_file(model_file, need_pred_sequences)
predict_result = app_name_tag.predict_new(model, need_pred_sequences)
app_name_tag.save_predict_result("predict_result.txt", need_pred_apk, need_pred_desc, predict_result)
|
[
"noreply@github.com"
] |
crespo18.noreply@github.com
|
2837c0365e573379cc39e4415dca0ab8792aed7e
|
a54e2d0b5edb4be2a1cb676b124f6f3b18d02728
|
/hxzxLogin.py
|
7bd2d090b5b50e796ffe16a1e931c5bf5cd9b73d
|
[] |
no_license
|
lz023231/untitled
|
1161baec0a311bdc782ca5f373a9b17e2bdb9db0
|
9a43d2b428eec41d13559f22c6f08e5367cf9fc2
|
refs/heads/master
| 2020-09-10T23:12:25.643632 | 2020-03-16T09:12:55 | 2020-03-16T09:12:55 | 221,407,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,057 |
py
|
from selenium.webdriver.common.keys import Keys
import time
import re
import requests
import pytesseract
from PIL import Image,ImageEnhance
from selenium import webdriver
from selenium.webdriver.common.by import By
class Login():
def login(self, driver, username, password):
driver.find_element_by_xpath('//*[(@id = "username")]').clear()
driver.find_element_by_xpath('//*[(@id = "username")]').send_keys(username)
driver.find_element_by_xpath('//*[(@id = "passwd")]').clear()
driver.find_element_by_xpath('//*[(@id = "passwd")]').send_keys(password)
#screenImg = "C:/image/screenImg.png"
#driver.find_element_by_name("username").send_keys(username)
#driver.find_element_by_name("username").send_keys(Keys.TAB)
#driver.find_element_by_name("password").clear()
#driver.find_element_by_name("password").send_keys(password)
#driver.find_element_by_name("password").send_keys(Keys.TAB)
#driver.find_element_by_xpath('//div[contains(text(),"登 录")]').click()
|
[
"1449775115@qq.com"
] |
1449775115@qq.com
|
52de1d8032b9889325355b2972e6a94348c16981
|
23f59d8c524be424bd5d5b8047f22341c769fd3e
|
/Week 02/id_624/LeetCode_105_624.py
|
1010d927a4eff43a32d927da021088efcc84d881
|
[] |
no_license
|
cboopen/algorithm004-04
|
59ef7609eb0f8d5f36839c546b0943e84d727960
|
f564806bd8e18831eeb20f2fd4bdd2d4aaa829ce
|
refs/heads/master
| 2020-08-11T12:30:04.843364 | 2019-12-08T13:21:38 | 2019-12-08T13:21:38 | 214,565,309 | 2 | 0 | null | 2019-10-12T02:44:08 | 2019-10-12T02:44:08 | null |
UTF-8
|
Python
| false | false | 1,258 |
py
|
#
# @lc app=leetcode.cn id=105 lang=python3
#
# [105] 从前序与中序遍历序列构造二叉树
#
# https://leetcode-cn.com/problems/construct-binary-tree-from-preorder-and-inorder-traversal/description/
#
# algorithms
# Medium (61.89%)
# Likes: 261
# Dislikes: 0
# Total Accepted: 28K
# Total Submissions: 45.1K
# Testcase Example: '[3,9,20,15,7]\n[9,3,15,20,7]'
#
# 根据一棵树的前序遍历与中序遍历构造二叉树。
#
# 注意:
# 你可以假设树中没有重复的元素。
#
# 例如,给出
#
# 前序遍历 preorder = [3,9,20,15,7]
# 中序遍历 inorder = [9,3,15,20,7]
#
# 返回如下的二叉树:
#
# 3
# / \
# 9 20
# / \
# 15 7
#
#
# @lc code=start
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, preorder: [int], inorder: [int]) -> TreeNode:
if inorder:
index = inorder.index(preorder.pop(0))
root = TreeNode(inorder[index])
root.left = self.buildTree(preorder, inorder[0:index])
root.right = self.buildTree(preorder, inorder[index+1:])
return root
# @lc code=end
|
[
"haozhenyi@58.com"
] |
haozhenyi@58.com
|
ce380d3589392eb45c41c9531c47bd45cd60d350
|
31d43b73e8104cd8aef3d97e39666022f2946223
|
/run_all_banim.py
|
ef93f88c9cad345baeb4a1f638669943af4a7b6e
|
[] |
no_license
|
kgelber1/SSX-Python
|
2ed6b5e6b7b3775779464a7f624a70155ec8f657
|
4f5cded3acec68e24206af90ef5611db9adb1ac3
|
refs/heads/master
| 2020-06-24T07:08:33.486962 | 2019-10-24T18:11:18 | 2019-10-24T18:11:18 | 198,890,544 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 829 |
py
|
from __future__ import division, print_function, absolute_import
import anim_bfield_merging_nBT as an
import anim_bfield_merging as a
import numpy as np
def main():
"""Just a place to specifiy variables"""
day ='073019'
first_shot = 12
# last_shot = 44
last_shot = 43
bad_shots = [27]
all_shots = np.arange(first_shot,last_shot+1)
shots = [shot for shot in all_shots if shot not in bad_shots]
sample_Freq = 5# sampling frequency - turn up for faster animations
t0 = 20
tf = 60
for shot in shots:
# will save each file in the Analyzed folder.
print("shot", shot)
try:
an.run(day, shot, t0, tf, sample_Freq, show = False)
except:
a.run(day, shot, t0, tf, sample_Freq, show = False)
if __name__ == '__main__':
main()
|
[
"kgelber1@swarthmore.edu"
] |
kgelber1@swarthmore.edu
|
12ef2b38944a242344c1c84f46b2cbb486d937cf
|
b8dc89452b3c42a38e027d0344ba13f2850563cd
|
/Models/model-NN-reg-all.py
|
d2916e4d4428d977520fc8a30f3bd15445dd3786
|
[] |
no_license
|
sn06/Stock_Efficiency_Fundamental
|
c925d2bd55ad0fa8ca1fe16d62490052e7cc4c29
|
1e1abba14a1fc97ab3263c9abe5fe01520c1fa50
|
refs/heads/master
| 2020-03-29T13:45:02.453979 | 2018-09-23T12:03:00 | 2018-09-23T12:03:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,785 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 05 19:56:14 2018
@author: sn06
"""
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import date
from sklearn import preprocessing
from sklearn.metrics import r2_score
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import Adam
le = preprocessing.LabelEncoder()
adm = Adam(lr=0.0001)
def encode(y):
out = []
for i in y:
if i == 0:
out.append([0,1,0])
elif i == -1:
out.append([1,0,0])
elif i == 1:
out.append([0,0,1])
out = np.array(out)
return out
def decode(y):
out = []
for i in y:
if (i == [0,1,0]).all():
out.append(0)
elif (i == [1,0,0]).all():
out.append(-1)
elif (i == [0,0,1]).all():
out.append(1)
out = np.array(out)
return out
def decode_classes(y):
out = []
for i in y:
if i == 0:
out.append(-1)
elif i == 1:
out.append(0)
elif i == 2:
out.append(1)
out = np.array(out)
return out
def create_model():
model = Sequential()
model.add(Dense(280,input_dim=X_train.shape[1],activation='relu'))
model.add(Dense(280,activation='relu'))
model.add(Dense(280,activation='relu'))
model.add(Dense(280,activation='relu'))
model.add(Dense(280,activation='relu'))
model.add(Dense(1,activation='relu'))
model.compile(optimizer=adm, loss='mean_squared_error', metrics=['mae'])
return model
def remove_anomalies():
finaldata = pd.DataFrame(columns=data.columns)
for j in data['Company'].unique():
data_comp = data[data['Company']==j]
print(j)
for k in data['Quarter end'].unique():
data_comp = data_comp[data_comp['Quarter end']==k]
for i in list(data_comp):
if i=='PriceChange':
break
if data_comp[i].dtype!='object':
if data_comp[i].dtype!='int64':
quantile_val = data_comp[i].dropna().quantile(0.999)
if quantile_val > 0:
data_comp = data_comp[data_comp[i] < quantile_val]
finaldata = finaldata.append(data_comp)
finaldata.to_csv('finaldata.csv')
data = finaldata.copy()
del(finaldata)
data = pd.read_csv('2018-08-17-AllCompanyQuarterly.csv')
data['Quarter end'] = pd.to_datetime(data['Quarter end'])
data['Quarter end'] = data['Quarter end'].dt.date
data = data[data['Company']!='BRK.A']
data = data[data['Company']!='RRD']
data['NextPrice']=data.groupby('Company')['Price'].shift(-1)
data = data.sort_values(by=['Company','Quarter end'])
data = data.fillna(data.interpolate())
data = data.dropna(subset=['Shares-6'])
for q in range(2002,2018):
for w in [1,4,7,10]:
if w==1:
ww = 7
qq = q-2
if w==4:
ww = 10
qq = q-2
if w==7:
ww = 1
qq = q-1
if w==10:
ww = 4
qq = q-1
datatrain = data[data['Quarter end'] < date(q,w,1)]
#datatrain = datatrain[datatrain['Quarter end'] >= date(qq,ww,1)]
datatrain = datatrain.drop(['Quarter end','Company','PriceChange','Shares'],axis=1)
for i in range(1,7):
datatrain = datatrain.drop(['Quarter end-%s' % i],axis=1)
datatrain = datatrain.drop(['Company-%s' % i],axis=1)
datatrain = datatrain.drop(['PriceChange-%s' % i],axis=1)
datatrain = datatrain.drop(['Buy/Sell-%s' % i],axis=1)
datatrain = datatrain.drop(['Shares-%s' % i],axis=1)
datatrain = datatrain.drop(datatrain.columns[0],axis=1)
datatrain = datatrain.dropna()
datatest = data[data['Quarter end'] <= date(q,w,1)]
datatest = datatest[datatest['Quarter end'] >= date(q,w,1)]
testcompany= datatest[['Quarter end','Company','PriceChange']]
datatest = datatest.drop(['Quarter end','Company','PriceChange','Shares'],axis=1)
for i in range(1,7):
datatest = datatest.drop(['Quarter end-%s' % i],axis=1)
datatest = datatest.drop(['Company-%s' % i],axis=1)
datatest = datatest.drop(['PriceChange-%s' % i],axis=1)
datatest = datatest.drop(['Buy/Sell-%s' % i],axis=1)
datatest = datatest.drop(['Shares-%s' % i],axis=1)
datatest = datatest.drop(datatest.columns[0],axis=1)
datatest = datatest.dropna()
X_train = datatrain.drop(['Buy/Sell','NextPrice'],axis=1)
X_train = X_train.values
y_train = datatrain[['NextPrice']].values
y_train = y_train[:,0]
X_test = datatest.drop(['Buy/Sell','NextPrice'],axis=1)
X_test = X_test.values
y_test = datatest['NextPrice'].values
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
model = create_model()
history = model.fit(X_train,y_train,validation_data=(X_test,y_test),epochs=60,verbose=0)
y_pred = model.predict(X_test)
testout = datatest.merge(testcompany,left_index=True,right_index=True)
testout['y_pred'] = y_pred
testout['y_test']=y_test
testout['y_test'][testout['y_test']==0] = 0.1
testout['y_size'] = testout['y_pred'] / testout['Price']
testout['y_size'][testout['y_pred']<testout['Price']] = testout['Price'] / testout['y_pred']
testout = testout[testout['y_pred']<100]
print('%s-%s' % (q,w))
print(r2_score(testout['y_test'],testout['y_pred']))
plt.plot(history.history['mean_absolute_error'],label='mae')
plt.plot(history.history['val_mean_absolute_error'],label='v_mae')
plt.legend()
plt.show()
plt.scatter(testout['y_test'].values,testout['y_pred'].values)
plt.show()
testout = testout[['Quarter end','Company','Price','PriceChange','Shares split adjusted','y_test','y_pred','y_size']]
testwin = testout.copy()
testwin['y_pred']=testwin['y_test']
testwin['y_size'] = testwin['y_pred'] / testwin['Price']
testwin['y_size'][testwin['y_pred']<testwin['Price']] = testwin['Price'] / testwin['y_pred']
testout.to_csv('testout-NN-reg-%s-%s.csv' % (q,w))
testwin.to_csv('testwin-WIN-reg-%s-%s.csv' % (q,w))
|
[
"noreply@github.com"
] |
sn06.noreply@github.com
|
95f80931ce3e1950630bdcb0e3f55094940a17ea
|
772c8cba17bcb20e8b143c17f7858e028c6b7890
|
/backend/src/lambdas/http/getPlayer.py
|
9f9b178e38fb0d372a92c2c7ade4889b9b99db03
|
[] |
no_license
|
carlos4ndre/xadrez
|
9ff22ca09639b73d15e4358f8403e17a4867c828
|
853a8c629715b9f3280ca9283e95eceebe7ceef4
|
refs/heads/master
| 2023-02-16T11:41:03.308833 | 2021-05-08T19:29:28 | 2021-05-08T21:41:44 | 222,162,846 | 2 | 0 | null | 2023-01-24T01:00:51 | 2019-11-16T21:42:10 |
TypeScript
|
UTF-8
|
Python
| false | false | 751 |
py
|
import logging
from src.helpers.aws import create_aws_lambda_response
from src.bussiness_logic.player import get_player
logger = logging.getLogger(__name__)
def handler(event, context):
logger.info("Parse event")
data, err = parse_event(event)
if err:
return create_aws_lambda_response(500, err)
player_id = data["id"]
logger.info("Get player")
player, err = get_player(player_id)
if err:
return create_aws_lambda_response(500, err)
return create_aws_lambda_response(200, {"player": player})
def parse_event(event):
try:
data = {"id": event["pathParameters"]["id"]}
return data, ""
except KeyError as e:
logger.error(e)
return {}, "Failed to parse event"
|
[
"carlos.ulrich@gmail.com"
] |
carlos.ulrich@gmail.com
|
039c153355a6c5ae71a3ea378489b51370de832a
|
be474fede1befd306ff40b99b0941832ef358b06
|
/setup.py
|
738abc2333b1b8488c194521c75e09b4102d1cdd
|
[] |
no_license
|
tomcusack1/peer
|
0ce26ac30212181e035a3620747fb10757907149
|
39ade61afb22756d337aecc7d3619f012543634f
|
refs/heads/develop
| 2020-03-29T14:00:15.188911 | 2018-09-23T15:15:44 | 2018-09-23T15:15:44 | 149,993,884 | 2 | 0 | null | 2018-09-23T15:16:27 | 2018-09-23T14:48:55 |
Python
|
UTF-8
|
Python
| false | false | 212 |
py
|
from setuptools import setup
setup(
name='peer',
version='0.0.1',
description='',
author='Tom Cusack',
author_email='tom@cusack-huang.com',
packages=['peer'],
install_requires=[],
)
|
[
"tom@tom-cusack.com"
] |
tom@tom-cusack.com
|
d261e5485de52a7c82d1b984a5572442fe270d2e
|
1186a5add1e1d5688f2de34980fbb8bfbb0f07a7
|
/onlineshop/onlineshop/urls.py
|
45075ce5732e4a6c8afca7909c96634e71e7fd16
|
[] |
no_license
|
wzj1143/Einkaufsseit
|
9c381ba0ca04552f16a426c688f1207750b6c8d3
|
a2c528881f943ae510033ea3b46509704ec718ba
|
refs/heads/master
| 2023-02-28T18:10:52.574042 | 2021-02-10T08:00:45 | 2021-02-10T08:00:45 | 314,256,996 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,455 |
py
|
"""onlineshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.views.static import serve
from django.contrib import admin
from django.urls import path, re_path
from django.conf import settings
from django.conf.urls.static import static
from Einkaufswagen import views
from Einkaufswagen.views import Wagen_legen, Wagen_Seite, Waren_delete, Bestellung_abgeben_Seite, \
Bestellung_abegeben_fertig, Bestellung_erfolgreich
from Users.views import User_Register, User_abmelden, User_anmelden, user_Bestellungen
from Waren.views import index, Waren_Seite, Waren_katg, angemeldete_homepage
urlpatterns = [
path('', views.index),
path('admin/', admin.site.urls),
# Homepage, nicht anmelden
re_path(r'^index/$', index),
# Waren_Seite
re_path(r'^Waren_Seite/$', Waren_Seite),
# In den Warenkorb legen
re_path(r'^Wagen_legen/$', Wagen_legen),
# Waren_katg Seite
re_path(r'^Waren_katg/$', Waren_katg),
# Einkaufswagen Seite
re_path(r'^Wagen_Seite/$', Wagen_Seite),
# delete ware von Einkaufswagen
re_path(r'^Waren_delete/$', Waren_delete),
# Bestellung abgeben Seite(Empfaenger Information ist leer)
re_path(r'^Bestellung_abgeben_Seite/$', Bestellung_abgeben_Seite),
# Bestellung hat schon abgegeben(Empfaenger Information speichern)
re_path(r'^Bestellung_abegeben_fertig/$', Bestellung_abegeben_fertig),
# Bestellung erfolgreich zeigen
re_path(r'^Bestellung_erfolgreich/$', Bestellung_erfolgreich),
# User registerieren
re_path(r'^User_Register/$', User_Register),
# User anmelden
re_path(r'^User_anmelden/$', User_anmelden),
# User abmelden
re_path(r'^User_abmelden/$', User_abmelden),
# User Bestellungen
re_path(r'^user_Bestellungen/$', user_Bestellungen),
re_path(r'^static/(?P<path>.*)$', serve, {'document_root': settings.STATIC_ROOT}, name='static'),
]
|
[
"zwang@campus.uni-paderborn.de"
] |
zwang@campus.uni-paderborn.de
|
895a6ff291a61e66f00fd311bf599cf8fdb80ba1
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-dataartsstudio/huaweicloudsdkdataartsstudio/v1/model/list_workspaceusers_request.py
|
03fe5cb9797831a7a33f080679f078e6c5bedd22
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 4,633 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListWorkspaceusersRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'workspace_id': 'str',
'limit': 'str',
'offset': 'str'
}
attribute_map = {
'workspace_id': 'workspace_id',
'limit': 'limit',
'offset': 'offset'
}
def __init__(self, workspace_id=None, limit=None, offset=None):
"""ListWorkspaceusersRequest
The model defined in huaweicloud sdk
:param workspace_id: 工作空间id
:type workspace_id: str
:param limit: 数据条数限制
:type limit: str
:param offset: 偏移量
:type offset: str
"""
self._workspace_id = None
self._limit = None
self._offset = None
self.discriminator = None
self.workspace_id = workspace_id
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
@property
def workspace_id(self):
"""Gets the workspace_id of this ListWorkspaceusersRequest.
工作空间id
:return: The workspace_id of this ListWorkspaceusersRequest.
:rtype: str
"""
return self._workspace_id
@workspace_id.setter
def workspace_id(self, workspace_id):
"""Sets the workspace_id of this ListWorkspaceusersRequest.
工作空间id
:param workspace_id: The workspace_id of this ListWorkspaceusersRequest.
:type workspace_id: str
"""
self._workspace_id = workspace_id
@property
def limit(self):
"""Gets the limit of this ListWorkspaceusersRequest.
数据条数限制
:return: The limit of this ListWorkspaceusersRequest.
:rtype: str
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListWorkspaceusersRequest.
数据条数限制
:param limit: The limit of this ListWorkspaceusersRequest.
:type limit: str
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListWorkspaceusersRequest.
偏移量
:return: The offset of this ListWorkspaceusersRequest.
:rtype: str
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListWorkspaceusersRequest.
偏移量
:param offset: The offset of this ListWorkspaceusersRequest.
:type offset: str
"""
self._offset = offset
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListWorkspaceusersRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
a35ab8bb6703f50081e6b72e486a8a6e088e5397
|
08dec3427326ce10e694aa7f27ac4a3a47c1a7b4
|
/zulip_bots/zulip_bots/bots/merels/test/test_interface.py
|
a8e571d64c87fe42d4a5cf3fc2dd8085ff8c9bd0
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0"
] |
permissive
|
Soumi7/python-zulip-api
|
967b289eb635a5a8c339d9a9109de8200ca3e126
|
dffc09fe9f60f7dac277bb65e476138fa1d620fe
|
refs/heads/master
| 2023-04-07T23:59:54.655885 | 2020-04-27T07:35:44 | 2020-04-27T07:35:44 | 257,053,623 | 1 | 0 |
Apache-2.0
| 2023-04-04T00:25:13 | 2020-04-19T16:55:24 |
Python
|
UTF-8
|
Python
| false | false | 3,180 |
py
|
import unittest
from libraries import interface
class BoardLayoutTest(unittest.TestCase):
def test_empty_layout_arrangement(self):
grid = interface.construct_grid("NNNNNNNNNNNNNNNNNNNNNNNN")
self.assertEqual(interface.graph_grid(grid), '''` 0 1 2 3 4 5 6
0 [ ]---------------[ ]---------------[ ]
| | |
1 | [ ]---------[ ]---------[ ] |
| | | | |
2 | | [ ]---[ ]---[ ] | |
| | | | | |
3 [ ]---[ ]---[ ] [ ]---[ ]---[ ]
| | | | | |
4 | | [ ]---[ ]---[ ] | |
| | | | |
5 | [ ]---------[ ]---------[ ] |
| | |
6 [ ]---------------[ ]---------------[ ]`''')
def test_full_layout_arragement(self):
grid = interface.construct_grid("NXONXONXONXONXONXONXONXO")
self.assertEqual(interface.graph_grid(grid), '''` 0 1 2 3 4 5 6
0 [ ]---------------[X]---------------[O]
| | |
1 | [ ]---------[X]---------[O] |
| | | | |
2 | | [ ]---[X]---[O] | |
| | | | | |
3 [ ]---[X]---[O] [ ]---[X]---[O]
| | | | | |
4 | | [ ]---[X]---[O] | |
| | | | |
5 | [ ]---------[X]---------[O] |
| | |
6 [ ]---------------[X]---------------[O]`''')
def test_illegal_character_arrangement(self):
grid = interface.construct_grid("ABCDABCDABCDABCDABCDXXOO")
self.assertEqual(interface.graph_grid(grid), '''` 0 1 2 3 4 5 6
0 [ ]---------------[ ]---------------[ ]
| | |
1 | [ ]---------[ ]---------[ ] |
| | | | |
2 | | [ ]---[ ]---[ ] | |
| | | | | |
3 [ ]---[ ]---[ ] [ ]---[ ]---[ ]
| | | | | |
4 | | [ ]---[ ]---[ ] | |
| | | | |
5 | [ ]---------[ ]---------[X] |
| | |
6 [X]---------------[O]---------------[O]`''')
class ParsingTest(unittest.TestCase):
def test_consistent_parse(self):
boards = ["NNNNOOOOXXXXNNNNOOOOXXXX",
"NOXNXOXNOXNOXOXOXNOXONON",
"OOONXNOXNONXONOXNXNNONOX",
"NNNNNNNNNNNNNNNNNNNNNNNN",
"OOOOOOOOOOOOOOOOOOOOOOOO",
"XXXXXXXXXXXXXXXXXXXXXXXX"]
for board in boards:
self.assertEqual(board, interface.construct_board(
interface.construct_grid(
interface.construct_board(
interface.construct_grid(board)
)
)
)
)
|
[
"robhoenig@gmail.com"
] |
robhoenig@gmail.com
|
fb387c1fd927c61dc479cb1b8fee7dd4729268d6
|
a73635203629fef49ac22b8c9fabb2e6728f502f
|
/connect.py
|
f16adb45cfec42e0ccbdca8768b8890dfc93c606
|
[] |
no_license
|
megavas/PersonalAutoparking
|
82cab852b459f3f2e916006b760872317fe933f8
|
d7c499d9f14424748d2b8dfc20ea5708301e14e7
|
refs/heads/main
| 2023-08-24T20:49:33.475324 | 2021-09-23T04:59:25 | 2021-09-23T04:59:25 | 409,298,104 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 936 |
py
|
import psycopg2
from config import config
def connect(query):
""" Connect to the PostgreSQL database server """
conn = None
try:
# read connection parameters
params = config()
# connect to the PostgreSQL server
print('Connecting to the PostgreSQL database...')
conn = psycopg2.connect(**params)
# create a cursor
cur = conn.cursor()
# execute a statement
print('PostgreSQL database version:')
cur.execute(query)
# display the PostgreSQL database server version
db_version = cur.fetchall()
print(db_version)
# close the communication with the PostgreSQL
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
print('Database connection closed.')
if __name__ == '__main__':
connect()
|
[
"megavas@outlook.com"
] |
megavas@outlook.com
|
a80bce838dadbf03c0eef1018dc976737190f523
|
af5e059dfab3b81ba349f7d2c1206ef3f2dba88f
|
/Solutions/Objective 03 - Portfolio grade problem.py
|
3a01bf12f3a834b224ae81024e2969543df84686
|
[] |
no_license
|
Wither-Bane/intro-to-python
|
e5518f341589a58a8a54f115888d68af2db069cc
|
61233f32bd7edca2d23db76f9cdaaa4c8c980983
|
refs/heads/master
| 2022-12-31T04:18:53.758872 | 2020-10-24T19:24:53 | 2020-10-24T19:24:53 | 297,266,660 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,516 |
py
|
#Objective 4 - Portfolio grade challenge
analysis = int(input("Enter the analysis mark:"))
design = int(input("Enter the design mark:"))
implementation = int(input("Enter the implementation mark:"))
evaluation = int(input("Enter the evaluation mark:"))
total = analysis + design + implementation + evaluation
if total < 2:
print("Grade: U")
print("You needed",2 - total,"more marks to get to the next mark band.")
if total >= 2 and total < 4:
print("Grade: 1")
print("You needed",4 - total,"more marks to get to the next mark band.")
if total >= 4 and total < 13:
print("Grade: 2")
print("You needed",13 - total,"more marks to get to the next mark band.")
if total >= 13 and total < 22:
print("Grade: 3")
print("You needed",22 - total,"more marks to get to the next mark band.")
if total >= 22 and total < 31:
print("Grade: 4")
print("You needed",31 - total,"more marks to get to the next mark band.")
if total >= 31 and total < 41:
print("Grade: 5")
print("You needed",41 - total,"more marks to get to the next mark band.")
if total >= 41 and total < 54:
print("Grade: 6")
print("You needed",41 - total,"more marks to get to the next mark band.")
if total >= 54 and total < 67:
print("Grade: 7")
print("You needed",67 - total,"more marks to get to the next mark band.")
if total >= 67 and total < 80:
print("Grade: 8")
print("You needed",80 - total,"more marks to get to the next mark band.")
if total >= 80:
print("Grade: 9")
|
[
"dipo106@gmail.com"
] |
dipo106@gmail.com
|
b5bfc185e3c0e76fb33a254d444155ab0931f2c8
|
f723b36a64d7c5ccd2a4937d02f05279fc9e907c
|
/calls/urls.py
|
48317b35fa4b2d6bacf2ee72c3c3734774b5c08e
|
[] |
no_license
|
DmitrySham/grand-django-site
|
92259098d209954ee5f5c994989f6c1f7c9826f4
|
e65988c441e9fb37fd15126d28301c47643b501d
|
refs/heads/master
| 2023-01-22T08:37:08.921212 | 2023-01-13T15:05:30 | 2023-01-13T15:05:30 | 184,014,992 | 0 | 0 | null | 2022-12-04T20:45:03 | 2019-04-29T06:44:37 |
JavaScript
|
UTF-8
|
Python
| false | false | 145 |
py
|
from django.urls import path
from calls import views
urlpatterns = [
path('ajax/call/request/', views.call_request, name='calls_request')
]
|
[
"tggrmi@gmail.com"
] |
tggrmi@gmail.com
|
13a72a827d1ac449f36e7b71d4401cc38f34e16e
|
2e4c0c2dfc17156b293dd70fe03587351e6b7da9
|
/src/execute_setting.py
|
cbb33da75b01a41cf89d1eef888465202249c3c2
|
[] |
no_license
|
MarCheMath/thesis-code
|
7f76f410976396daeeba75e1fe53c37407965a77
|
0fb2883dcdf2ae1b92f180fb2eb13311e1df8255
|
refs/heads/master
| 2020-09-17T08:21:12.812587 | 2019-11-25T21:35:16 | 2019-11-25T21:35:16 | 224,050,315 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,140 |
py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import os
from argparse import ArgumentParser
import time
import itertools
def main(hparams):
base = [
" --num-input-images 64",
" --batch-size 64",
" --mloss1_weight 0.0",
" --mloss2_weight 1.0",
" --dloss1_weight 0",
" --dloss2_weight 0.0",
" --lmbd 0",
" --sparsity 0",
" --optimizer-type adam",
" --momentum 0.9",
" --save-images",
" --save-stats",
" --print-stats",
" --checkpoint-iter 1",
" --image-matrix 0",
]
submit_mode = hparams.submit_mode
qsub_time = hparams.qsub_time
del hparams.submit_mode
del hparams.qsub_time
fields = {field:getattr(hparams,field) for field in dir(hparams) if not field.startswith('_')}
fields = {v:field if type(field)==list else [field] for v,field in fields.iteritems() }
b = ''.join(base)
for setting in itertools.product(*fields.values()):
setting = dict(zip(fields.keys(),setting))
head = ''.join(['--'+str(v1).replace('_','-')+' ' +str(v2)+' ' for v1,v2 in setting.iteritems()])
#head=head.replace('_','-')
if submit_mode == 'qsub':
head = head.replace(" '",""" "'""")
head = head.replace("' ","""'" """)
print(head)
string = './src/compressed_sensing_mod.py'+ b+' '+head
ex_string = 'python -u '+string
print(submit_mode)
if submit_mode == 'tmux':
print('tmux new-session -d '+ex_string)
os.system('tmux new-session -d '+ex_string)
elif submit_mode == 'qsub':
# print("qsub -cwd -N 'CS_VAE' -j y -l h_rt=7200 "+string)
# os.system("qsub -cwd -N 'CS_VAE' -j y -l h_rt=7200 "+string)
print("qsub -cwd -N 'CS_VAE' -j y -l h_rt={} ".format(qsub_time)+string)
os.system("echo "+string+ " | qsub -cwd -N 'CS_VAE' -j y -l h_rt={}".format(qsub_time))
elif submit_mode == 'cluster':
print("Cluster "+ex_string)
os.system("Cluster "+ex_string)
elif submit_mode == 'vanilla':
print(ex_string)
os.system(ex_string)
else:
raise NotImplementedError
#time.sleep(3)#For batch systems, which are not well configured
# print(string)
# os.system(string)
if __name__ == '__main__':
PARSER = ArgumentParser()
PARSER.add_argument('--submit-mode', type=str, default='tmux', help='Selected process mode')
PARSER.add_argument('--n-z', type=int, nargs = '+', default=-1, help='hidden dimension n_z')
PARSER.add_argument('--zprior-weight', type=float, nargs = '+', default=0, help='hidden dimension n_z')
PARSER.add_argument('--stdv', type=float, nargs = '+', default=1, help='hidden dimension n_z')
PARSER.add_argument('--mean', type=float, nargs = '+', default=0, help='hidden dimension n_z')
PARSER.add_argument('--max-update-iter', type=int, nargs = '+', default=1000, help='hidden dimension n_z')
PARSER.add_argument('--num-measurements', type=int, nargs = '+', default=100, help='hidden dimension n_z')
PARSER.add_argument('--measurement-type', type=str, nargs = '+', default="'gaussian'", help='hidden dimension n_z')
PARSER.add_argument('--model-types', type=str, nargs = '+', default="'vae'", help='hidden dimension n_z')
PARSER.add_argument('--num-random-restarts', type=int, nargs = '+', default=10, help='hidden dimension n_z')
PARSER.add_argument('--pretrained-model-dir', type=str, nargs = '+', default='./mnist_vae/models/mnist-vae/mnist-vae-flex-100/', help='directory to pretrained model')
PARSER.add_argument('--grid', type=str, nargs = '+', default="NoGrid", help='directory to pretrained model')
PARSER.add_argument('--eps', type=float, default=0.01, nargs ='+', help='eps for measurement for flex vae (weighted with norm of A)')
PARSER.add_argument('--qsub-time', type=int, default=50000, help='Time for qsub')
PARSER.add_argument('--tol', type=int, default=5, help='tolerance for binary search in vae flex')
PARSER.add_argument('--init-mode', type=str, default='random', help='mode for the initialization in estimator')
PARSER.add_argument('--flex-chosen', type=str, nargs = '+',default='flexible', help='fixed dimension of the VAE flex (e.g. good for projection)')
PARSER.add_argument('--use-gpu', action='store_true', help='Whether to use GPUs')
PARSER.add_argument('--lmbd', type=float, default=0.0, help='Whether to use GPUs')
PARSER.add_argument('--lasso-solver', type=str, default='sklearn', help='Solver for LASSO')
PARSER.add_argument('--tv-or-lasso-mode', type=str, default='nada', nargs = '+', help='cvxopt-constr, cvxopt-reg,cvxopt-reg-param10, fista')
PARSER.add_argument('--batch-size', type=int, default=64, help='Batch size of images')
PARSER.add_argument('--reproducible', type=str, default='None', help='Whether the measurement matrix A is drawn with fixed seed')
PARSER.add_argument('--omp-k', type=int, default=300, help='Orthogonal Matching Pursuit sparsity parameter')
PARSER.add_argument('--noise-std', type=float, default=0.0, nargs = '+', help='std dev of noise')
PARSER.add_argument('--kterm', type=int, default=-1, nargs = '+', help='For representation system to make incomplete')
PARSER.add_argument('--input-type', type=str, default='full-input', nargs = '+', help='input type')
PARSER.add_argument('--dataset', type=str, default='mnist', nargs = '+', help='Dataset to use')
PARSER.add_argument('--emd-bol', type=str, default = 'True', help='emd loss logged')
PARSER.add_argument('--tolerance-checking', type=str, default='non-squared', nargs = '+', help='Tolerance checking w.r.t. euclidian norm or squared euclidian norm')
PARSER.add_argument('--strict-checking', type=str, default='strict', nargs = '+', help='When using alternating checking, use only the grid points')
PARSER.add_argument('--repetition-bol', type=str, default = 'False', nargs = '+', help='Whether to repeat generator training as many times as grid points')
PARSER.add_argument('--fair-counter', type=str, default='unequal', help='If and how many times the fixed version is reiterated to make up for the additional optimization')
PARSER.add_argument('--input-seed', type=str, default='no_seed', help='For random-test input mode fixes a seed')
PARSER.add_argument('--fair-counter-end', type=int, default=1, help='If and how many times the final iteration is reiterated to improve the optimization')
PARSER.add_argument('--learning-rate', type=float, default=0.1, help='learning rate')
PARSER.add_argument('--wavelet-type', type=str, default='db1selected',nargs = '+', help='Which wavelet type to use')
PARSER.add_argument('--matlab', type=str, default='nada', help='Wavelet case: Should use python generated or matlab generated wavelet systems')
PARSER.add_argument('--class-bol', type=str, default = 'True', help='emd loss logged')
HPARAMS = PARSER.parse_args()
main(HPARAMS)
|
[
"50325379+MarCheMath@users.noreply.github.com"
] |
50325379+MarCheMath@users.noreply.github.com
|
00fbc42cca0c3841578397974668d121651b7c39
|
4a1a33ad18d87dde4b13261e2cda21cdda959ee7
|
/app/api/wrappers/post.py
|
0956130b554c3cb0eb10b644236851be27ebf977
|
[] |
no_license
|
lazarusvc/microservices_template
|
8fb8e807576f3a72df3d11cdffc38d95eabbed5a
|
d50a6e9bee678ad84aff68fb0ac7308ad931367d
|
refs/heads/master
| 2023-04-28T00:59:45.867699 | 2017-07-31T17:52:18 | 2017-07-31T17:52:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 502 |
py
|
from flask import session
from app import db
from app.models import Api
#************************
#=> POST manager
class PostManager(object):
def __init__(self):
pass
@staticmethod
def post_req(request):
data = request.args.get('data')
meta_data = request.args.get('meta_data')
status = request.args.get('status')
post = Api( status, meta_data, data)
db.session.add(post)
db.session.commit()
return dict( data, meta_data, status)
|
[
"austin.lazarus@gmail.com"
] |
austin.lazarus@gmail.com
|
018026601e8c6997eb1a8665d1c7bc1d5937f020
|
ee02370d1870bd5b19a1c96ef9fd73c563d2a003
|
/run_coupled_fault_SP.py
|
79d563abc23c4200b17324281b0e12502ed3a809
|
[] |
no_license
|
SiccarPoint/stochastic-delta
|
df956c8a7af7324a9d053939d2b36bfda22f6601
|
68b8d1667fb3e333e8a6364a27d6d52c96c44e99
|
refs/heads/master
| 2021-01-19T11:02:11.786856 | 2018-10-25T09:31:07 | 2018-10-25T09:31:07 | 41,691,352 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,953 |
py
|
from delta_obj2 import delta
import numpy as np
from matplotlib.pyplot import figure, plot, show, legend
import matplotlib.pyplot as plt
from matplotlib import cm
from copy import deepcopy
import cPickle as pickle
# import the real params
# all are in standard units of meters; fluxes in m**3
num_pres_strata = 1600
channel_prop_median = 0.2 # arbitrary. Prob too high
font = {'size': 16, }
numnodes = 50000
delr = 0.05
ST = 0.01
SF = 0.65
theta = 1.571
drift = 0.1
dt_true = 200.
Uinit = 0.0005
flux_scaling = 0.5 # arbitrary accounting for basin size
# make a dummy counter for plotting
figcounter = 0
mydeltasims = [] # store each sim here
Q_options = [np.loadtxt('SPU5e-4Acc5dt200_12.30.44_sedfluxout.txt'),
np.loadtxt('SPU5e-4Acc10dt200_12.30.44_sedfluxout.txt'),
np.loadtxt('SPU5e-4Acc20dt200_12.30.44_sedfluxout.txt')]
# screwed up the normalization in the SP runs, so apply janky but robust
# normalization:
Q_options_sde = [np.loadtxt('U5e-4Acc5dt200_13.40.11_sedfluxout.txt'),
np.loadtxt('U5e-4Acc10dt200_13.40.11_sedfluxout.txt'),
np.loadtxt('U5e-4Acc20dt200_13.40.11_sedfluxout.txt')]
for SP, SDE in zip(Q_options, Q_options_sde):
ratio = SDE[0]/SP[0]
SP *= ratio
accel_options = [5., 10., 20.]
def load_mydelta_and_strat(fname, accel):
if np.isclose(accel, 5.):
Q_in = Q_options[0]
elif np.isclose(accel, 10.):
Q_in = Q_options[1]
elif np.isclose(accel, 20.):
Q_in = Q_options[2]
else:
raise NameError
len_drift = 800.
Q_real = np.concatenate((Q_in[0]*np.ones(400), Q_in,
Q_in[-1]*np.ones(len_drift)))
Q_real *= flux_scaling
# ^does this need adjusting for dt?
# add the subsidence curve. Same idea
SL_rate = np.concatenate((Uinit*np.ones(400), accel*Uinit*np.ones(1200)))
SL_trajectory = np.cumsum(SL_rate)
# DO NOT multiply by dt. Let's just scale everything to dt = 1 for now.
# add an initial water depth if necessary here:
SL_trajectory += 0.1
# load it up
f = open(fname, 'rb')
mydelta = pickle.load(f)
f.close()
# do the plot
color = [item/Q_real.max() for item in Q_real]
for i in xrange(num_pres_strata):
plot(mydelta.radial_distances, mydelta.strata[i, :],
c=cm.plasma(color[i]))
# pick the rollovers & add them to the fig:
for i in xrange(num_pres_strata):
# fit a (SF-ST) angle line to each point. rollover is point with
# largest intersect
# remove unmodified floor:
notfloor = np.where(
np.logical_not(np.isclose(mydelta.strata[i, :], 0.)))[0]
c = ((SF - ST) * mydelta.radial_distances[notfloor] +
mydelta.strata[i, notfloor])
diff = np.diff(c)
rollover_subindexes = np.where(
np.diff((diff < 0.).astype(int)) == 1)[0]
rollover_index = notfloor[rollover_subindexes]
plot(mydelta.radial_distances[rollover_index],
mydelta.strata[i, rollover_index], 'k,')
for (Q_in, accel) in zip(Q_options, accel_options):
# add buffers before and after to get to nt = 1600
len_drift = 800.
drift_up = np.arange(len_drift, dtype=float)/len_drift*(
accel*Uinit - Q_in[-1]) + Q_in[-1]
Q_real = np.concatenate((Q_in[0]*np.ones(400), Q_in,
Q_in[-1]*np.ones(len_drift)))
Q_real *= flux_scaling
# ^does this need adjusting for dt?
# add the subsidence curve. Same idea
SL_rate = np.concatenate((Uinit*np.ones(400), accel*Uinit*np.ones(1200)))
SL_trajectory = np.cumsum(SL_rate)
# DO NOT multiply by dt. Let's just scale everything to dt = 1 for now.
# add an initial water depth if necessary here:
SL_trajectory += 0.1
mydelta = delta()
ins = {}
# build the input dict:
ins['nt'] = num_pres_strata
ins['n'] = numnodes
ins['delr'] = delr
ins['delt'] = dt_true
ins['Q'] = 0. # superceded by Q input direct
ins['ST'] = ST
ins['SF'] = SF
ins['theta'] = theta
ins['activity_py'] = channel_prop_median
ins['erosion_py_width'] = channel_prop_median
ins['depo_py_width'] = channel_prop_median
ins['drift'] = drift
completenesses = []
tscales, completenesses = mydelta.execute(
ins, SL_trajectory, completeness_records=completenesses, graphs=False,
Q=Q_real)
figure(figcounter)
color = [item/Q_real.max() for item in Q_real]
for i in xrange(num_pres_strata):
plot(mydelta.radial_distances, mydelta.strata[i, :],
c=cm.plasma(color[i]))
# pick the rollovers & add them to the fig:
for i in xrange(num_pres_strata):
# fit a (SF-ST) angle line to each point. rollover is point with
# largest intersect
# remove unmodified floor:
notfloor = np.where(
np.logical_not(np.isclose(mydelta.strata[i, :], 0.)))[0]
c = ((SF - ST) * mydelta.radial_distances[notfloor] +
mydelta.strata[i, notfloor])
diff = np.diff(c)
rollover_subindexes = np.where(
np.diff((diff < 0.).astype(int)) == 1)[0]
rollover_index = notfloor[rollover_subindexes]
plot(mydelta.radial_distances[rollover_index],
mydelta.strata[i, rollover_index], 'k,')
f = open('mydeltaSP' + str(int(accel)) + '.save', 'wb')
pickle.dump(mydelta, f, protocol=pickle.HIGHEST_PROTOCOL)
f.close()
figcounter += 1
# final_pres = mydelta.final_preserved
# completeness_subsampled = []
# for i in xrange(1):
# condition = np.random.rand(final_pres.size) < (float(num_pres_strata)/nt)
# new_pres_strata = np.logical_and(final_pres, condition)
# tsc, comp = mydelta.full_completeness(record=new_pres_strata)
# completeness_subsampled.append(comp.copy())
# figure(7)
# plt.gca().set_xscale('log')
# plt.xlabel('Timescale (s)')
# plt.ylabel('Completeness')
# for i in completeness_subsampled:
# plot(tsc, i, '0.5', ls='--')
# plot(sect_comps[54:,0], sect_comps[54:,1], 'k', lw=3)
# plot(sect_comps[54:,0], sect_comps[54:,2], 'k', lw=3) # only >2000 s
# plot([],[],'0.5', ls='--', label='resampled completenesses')
# plot([],[],'k', lw=3, label='real sections')
# plt.gca().set_ybound(0,1)
# plt.rc('font', **font)
# legend(loc=4)
#
# # now the restricted channel version
# completeness_surface_walk = [] # forced with the mean "deep channel" proportion
# completeness_synth_walk = [] # this one is forced with py (num_pres_strata-1)/nt == 0.0258
# completeness_surface_rand = []
# completeness_synth_rand = []
# num_roll_pres_surf_w = []
# num_roll_pres_synth_w = []
# num_roll_pres_surf_r = []
# num_roll_pres_synth_r = []
# synth_py = float((num_pres_strata-1)/nt)
# mydelta1 = delta()
# mydelta2 = delta()
# mydelta3 = delta()
# mydelta4 = delta()
# for i in xrange(30):
# tscales, completeness_surface_walk = mydelta1.execute('real_inputs.txt', SL_trajectory,
# completeness_records=completeness_surface_walk, graphs=False, Q=Q_real,
# walking_erosion_depo=True)
# num_roll_pres_surf_w.append(mydelta1.final_preserved.sum())
# tscales, completeness_synth_walk = mydelta2.execute('real_inputs_frfixed.txt', SL_trajectory,
# completeness_records=completeness_synth_walk, graphs=False, Q=Q_real,
# walking_erosion_depo=True)
# num_roll_pres_synth_w.append(mydelta2.final_preserved.sum())
# tscales, completeness_surface_rand = mydelta3.execute('real_inputs.txt', SL_trajectory,
# completeness_records=completeness_surface_rand, graphs=False, Q=Q_real,
# restricted_channel_mass_conserved=True)
# num_roll_pres_surf_r.append(mydelta3.final_preserved.sum())
# tscales, completeness_synth_rand = mydelta4.execute('real_inputs_frfixed.txt', SL_trajectory,
# completeness_records=completeness_synth_rand, graphs=False, Q=Q_real,
# restricted_channel_mass_conserved=True)
# num_roll_pres_synth_r.append(mydelta4.final_preserved.sum())
# ### for some reason the initial topo breaks these!!! run it without...
#
# figure(8)
# for i in xrange(len(completeness_surface_walk)):
# plot(tscales, completeness_surface_walk[i],'darkblue', ls='--')
# #plot(tscales, completeness_synth_walk[i],'skyblue', ls='-')
# plot(tscales, completeness_surface_rand[i],'firebrick', ls='--')
# #plot(tscales, completeness_synth_rand[i],'lightsalmon', ls='-')
# plot([],[],'darkblue', ls='--', label='random walk')
# #plot([],[],'skyblue', ls='-', label='random walk, forced py')
# plot([],[],'firebrick', ls='--', label='no system memory')
# #plot([],[],'lightsalmon', ls='-', label='no system memory, forced py')
# plot(sect_comps[54:,0], sect_comps[54:,1], 'k', lw=3)
# plot(sect_comps[54:,0], sect_comps[54:,2], 'k', lw=3) # only >2000 s
# plot([],[],'k', lw=3, label='real sections')
# plt.gca().set_xscale('log')
# plt.xlabel('Timescale (s)')
# plt.ylabel('Completeness')
# plt.rc('font', **font)
# legend(loc=4)
#
# figure('8b')
# for i in xrange(len(completeness_surface_walk)):
# plot(tscales, completeness_surface_walk[i],'darkblue', ls='--')
# plot(tscales, completeness_surface_rand[i],'firebrick', ls='--')
# plot(tscales, completeness_synth_walk[i],'skyblue', ls='-')
# #plot(tscales, completeness_synth_rand[i],'lightsalmon', ls='-')
# plot([],[],'darkblue', ls='--', label='random walk, real py')
# plot([],[],'firebrick', ls='--', label='no system memory, real py')
# plot([],[],'skyblue', ls='-', label='random walk, forced py')
# #plot([],[],'lightsalmon', ls='-', label='no system memory, forced py')
# plot(sect_comps[54:,0], sect_comps[54:,1], 'k', lw=3)
# plot(sect_comps[54:,0], sect_comps[54:,2], 'k', lw=3) # only >2000 s
# plot([],[],'k', lw=3, label='real sections')
# plt.gca().set_xscale('log')
# plt.xlabel('Timescale (s)')
# plt.ylabel('Completeness')
# plt.rc('font', **font)
# legend(loc=4)
#
# figure(9)
# for i in xrange(len(completeness_surface_walk)):
# plot(tscales, completeness_surface_walk[i],'darkblue', ls='--')
# plot(tscales, completeness_synth_walk[i],'skyblue', ls='-')
# plot(tscales, completeness_surface_rand[i],'firebrick', ls='--')
# plot(tscales, completeness_synth_rand[i],'lightsalmon', ls='-')
# plot([],[],'darkblue', ls='--', label='random walk, real py')
# plot([],[],'skyblue', ls='-', label='random walk, forced py')
# plot([],[],'firebrick', ls='--', label='no system memory, real py')
# plot([],[],'lightsalmon', ls='-', label='no system memory, forced py')
# plot([],[],'k', lw=3, label='real sections')
# plt.gca().set_xscale('log')
# plt.xlabel('Timescale (s)')
# plt.ylabel('Completeness')
# legend(loc=4)
#
# figure(10)
# plt.gca().set_xscale('log')
# plt.xlabel('Timescale (s)')
# plt.ylabel('Completeness')
# for i in completenesses:
# plot(tsc, i, '0.5', ls='--')
# plot(sect_comps[54:,0], sect_comps[54:,1], 'k', lw=3)
# plot(sect_comps[54:,0], sect_comps[54:,2], 'k', lw=3) # only >2000 s
# plot([],[],'0.5', ls='--', label='simulated completeness')
# plot([],[],'k', lw=3, label='real sections')
# plt.gca().set_ybound(0,1)
# plt.rc('font', **font)
# legend(loc=4)
#
# figure(11)
# for i in xrange(mydelta.strat_eta.shape[0]):
# plot(mydelta.radial_distances, mydelta.strat_eta[i,:], 'k')
# plt.xlabel('Radial distance')
# plt.ylabel('Height')
# plt.gca().set_xbound(0,3.5)
#
# figure(12)
# for i in xrange(len(completeness_surface_walk)):
# plot(tscales, completeness_synth_walk[i],'skyblue', ls='-')
# plot([],[],'skyblue', ls='-', label='random walk, forced py')
# plot([],[],'k', lw=3, label='real sections')
# plot(sect_comps[54:,0], sect_comps[54:,1], 'k', lw=3)
# plot(sect_comps[54:,0], sect_comps[54:,2], 'k', lw=3) # only >2000 s
# plt.gca().set_xscale('log')
# plt.xlabel('Timescale (s)')
# plt.ylabel('Completeness')
# legend(loc=4)
#
# figure(13)
# for i in xrange(mydelta2.strat_eta.shape[0]):
# plot(mydelta2.radial_distances, mydelta2.strat_eta[i,:], 'k')
# plt.xlabel('Radial distance (m)')
# plt.ylabel('Height (m)')
# plt.gca().set_xbound(0,3.5)
#
# figure(14)
# mystrata = np.where(np.random.rand(mydelta.strat_eta.shape[0])
# <0.025831564048124558)[0]
# for i in mystrata:
# plot(mydelta.radial_distances, mydelta.strat_eta[i,:], 'k')
# plt.xlabel('Radial distance')
# plt.ylabel('Height')
# plt.gca().set_xbound(0,3.5)
#
# figure(15)
# plot(sect_comps[54:,0], sect_comps[54:,1], 'k--', lw=3, label='Section A')
# plot(sect_comps[54:,0], sect_comps[54:,2], 'k:', lw=3, label='Section B') # only >2000 s
# plt.gca().set_xscale('log')
# plt.xlabel('Timescale (s)')
# plt.ylabel('Completeness')
# plt.gca().set_ybound(0,1)
# plt.rc('font', **font)
# legend(loc=4)
#
# figure(16)
# plot(mydelta.output_times, SL_trajectory, 'k', lw=3)
# plt.xlabel('Time (s)')
# plt.ylabel('Water surface elevation (m)')
# plt.ticklabel_format(style='sci', axis='x', scilimits=(0,0))
# plt.gca().set_ybound(0.,0.3)
# plt.rc('font', **font)
#
# show()
|
[
"daniel@dhmac.geol.cf.ac.uk"
] |
daniel@dhmac.geol.cf.ac.uk
|
059a6e527608807195b529860ad584d8239d5b7c
|
6d3720fdd723710a4e2f0c0f41a8329b959a3be4
|
/database/subject.py
|
34ea275614e3a59e864ca4b5f55119a266a48f43
|
[] |
no_license
|
lazyplatypus/natcar-server
|
f5b86675ffc118aae1ff9f002d8f1d22c02fee40
|
31c824c3466d8605c0c335db913448e4e84c4a4e
|
refs/heads/master
| 2020-04-21T23:32:47.871440 | 2019-02-10T08:01:14 | 2019-02-10T08:01:14 | 169,950,387 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,901 |
py
|
# TODO all saves should go to ES
import re
import uuid
from schemas.subject import schema as subject_schema
from database.util import deliver_fields
from database.entity_base import save_entity_to_es
from database.util import insert_row, save_row, get_row, list_rows
from modules.util import convert_slug_to_uuid, convert_uuid_to_slug
def is_valid_members(db_conn, data):
"""
"""
from database.unit import does_unit_exist
# TODO-3 this is going to be slow
for member_desc in data['members']:
entity_id, kind = member_desc['id'], member_desc['kind']
entity = None
if kind == 'unit':
entity = does_unit_exist(db_conn, entity_id)
elif kind == 'subject':
entity = does_subject_exist(db_conn, entity_id)
if not entity:
return [{
'name': 'id',
'message': 'Not a valid entity.',
'value': entity_id,
'ref': 'qKUTNkDuSiGLh0PZdhu9Xw',
}]
return []
def ensure_no_cycles(db_conn, data):
"""
Ensure no membership cycles form.
"""
seen = set()
found = {'cycle': False}
def _(members):
entity_ids = [
convert_slug_to_uuid(member['id'])
for member in members
if member['kind'] == 'subject'
]
entities = list_latest_accepted_subjects(db_conn, entity_ids)
for entity in entities:
if entity['entity_id'] in seen:
found['cycle'] = True
break
seen.add(entity['entity_id'])
_(entity['members'])
seen.add(data['entity_id'])
_(data['members'])
if found['cycle']:
return [{
'name': 'members',
'message': 'Found a cycle in membership.',
'ref': 'PfEdjTllRpqh_bKGM9oyTA',
}]
return []
def insert_subject(db_conn, data):
"""
Create a new version of a new a subject, saving to ES.
"""
schema = subject_schema
query = """
INSERT INTO subjects_entity_id (entity_id)
VALUES (%(entity_id)s);
INSERT INTO subjects
( entity_id , name , user_id ,
body , members )
VALUES
(%(entity_id)s, %(name)s, %(user_id)s,
%(body)s, %(members)s)
RETURNING *;
"""
data = {
'entity_id': uuid.uuid4(),
'name': data['name'],
'user_id': convert_slug_to_uuid(data['user_id']),
'body': data['body'],
'members': data.get('members', []),
}
errors = is_valid_members(db_conn, data) + ensure_no_cycles(db_conn, data)
if errors:
return None, errors
data, errors = insert_row(db_conn, schema, query, data)
if not errors:
save_entity_to_es('subject', deliver_subject(data, access='view'))
return data, errors
def insert_subject_version(db_conn, current_data, next_data):
"""
Create a new version of an existing subject.
"""
schema = subject_schema
query = """
INSERT INTO subjects
( entity_id , previous_id , name , user_id ,
body , members )
VALUES
(%(entity_id)s, %(previous_id)s, %(name)s, %(user_id)s,
%(body)s, %(members)s)
RETURNING *;
"""
data = {
'entity_id': current_data['entity_id'],
'previous_id': current_data['version_id'],
'user_id': convert_slug_to_uuid(next_data['user_id']),
'name': next_data.get('name') or current_data.get('name'),
'body': next_data.get('body') or current_data.get('body'),
'members': (next_data.get('members') or
current_data.get('members') or
[]),
}
errors = is_valid_members(db_conn, data) + ensure_no_cycles(db_conn, data)
if errors:
return None, errors
data, errors = insert_row(db_conn, schema, query, data)
if not errors:
save_entity_to_es('subject', deliver_subject(data, access='view'))
return data, errors
def update_subject(db_conn, version_id, status):
"""
Update a subject version's status and available. [hidden]
"""
query = """
UPDATE subjects
SET status = %(status)s
WHERE version_id = %(version_id)s
RETURNING *;
"""
data = {
'version_id': convert_slug_to_uuid(version_id),
'status': status,
}
data, errors = save_row(db_conn, query, data)
if not errors:
save_entity_to_es('subject', deliver_subject(data, access='view'))
return data, errors
def deliver_subject(data, access=None):
"""
Prepare a response for JSON output.
"""
schema = subject_schema
return deliver_fields(schema, data, access)
def does_subject_exist(db_conn, entity_id):
"""
Just... is this a valid subject entity_id.
"""
query = """
SELECT entity_id
FROM subjects_entity_id
WHERE entity_id = %(entity_id)s
LIMIT 1;
"""
params = {
'entity_id': convert_slug_to_uuid(entity_id),
}
return get_row(db_conn, query, params)
def get_latest_accepted_subject(db_conn, entity_id):
"""
Get Latest Accepted Subject Version by EID
"""
query = """
SELECT DISTINCT ON (entity_id) *
FROM subjects
WHERE status = 'accepted' AND entity_id = %(entity_id)s
ORDER BY entity_id, created DESC;
/* TODO LIMIT */
"""
params = {
'entity_id': convert_slug_to_uuid(entity_id),
}
return get_row(db_conn, query, params)
def list_latest_accepted_subjects(db_conn, entity_ids):
"""
List Latest Accepted Subject Versions by EIDs
"""
if not entity_ids:
return []
query = """
SELECT DISTINCT ON (entity_id) *
FROM subjects
WHERE status = 'accepted' AND entity_id in %(entity_ids)s
ORDER BY entity_id, created DESC;
/* TODO LIMIT OFFSET */
"""
params = {'entity_ids': tuple([
convert_slug_to_uuid(entity_id)
for entity_id in entity_ids
])}
return list_rows(db_conn, query, params)
def list_many_subject_versions(db_conn, version_ids):
"""
List Subject Versions by VIDs
"""
if not version_ids:
return []
query = """
SELECT *
FROM subjects
WHERE version_id in %(version_ids)s
ORDER BY created DESC;
/* TODO LIMIT OFFSET */
"""
params = {'version_ids': tuple(
convert_slug_to_uuid(vid)
for vid in version_ids
)}
return list_rows(db_conn, query, params)
def get_subject_version(db_conn, version_id):
"""
Get a subject version.
"""
query = """
SELECT *
FROM subjects
WHERE version_id = %(version_id)s
ORDER BY created DESC;
/* TODO LIMIT OFFSET */
"""
params = {'version_id': convert_slug_to_uuid(version_id)}
return get_row(db_conn, query, params)
def list_one_subject_versions(db_conn, entity_id):
"""
List Subjects Versions by EID
"""
query = """
SELECT *
FROM subjects
WHERE entity_id = %(entity_id)s
ORDER BY created DESC;
/* TODO LIMIT OFFSET */
"""
params = {'entity_id': convert_slug_to_uuid(entity_id)}
return list_rows(db_conn, query, params)
def list_subjects_by_unit_flat(db_conn, unit_id):
"""
List Subjects by Unit EID
"""
unit_id = convert_uuid_to_slug(unit_id)
# ENSURE THIS IS SQL SAFE
unit_id = re.sub(r'[^a-zA-Z0-9\-\_]', '', unit_id)
query = """
WITH temp AS (
SELECT DISTINCT ON (entity_id) *
FROM subjects
WHERE status = 'accepted'
ORDER BY entity_id, created DESC
)
SELECT *
FROM temp
WHERE members @> '[{"id":"%(unit_id)s"}]'
ORDER BY created DESC;
/* TODO limit offset */
""" % {'unit_id': unit_id}
params = {}
return list_rows(db_conn, query, params)
def list_subject_parents(db_conn, subject_id):
"""
List the direct parents of the subject specified.
"""
subject_id = convert_uuid_to_slug(subject_id)
# ENSURE THIS IS SQL SAFE
subject_id = re.sub(r'[^a-zA-Z0-9\-\_]', '', subject_id)
query = """
WITH temp AS (
SELECT DISTINCT ON (entity_id) *
FROM subjects
WHERE status = 'accepted'
ORDER BY entity_id, created DESC
)
SELECT *
FROM temp
WHERE members @> '[{"id":"%(subject_id)s"}]'
ORDER BY created DESC;
/* TODO limit offset */
""" % {'subject_id': subject_id}
params = {}
return list_rows(db_conn, query, params)
def list_my_recently_created_subjects(db_conn, user_id):
"""
List My Recently Created Subjects (by User ID)
"""
query = """
SELECT DISTINCT ON (entity_id) *
FROM subjects
WHERE user_id = %(user_id)s
ORDER BY entity_id, created DESC;
/* TODO LIMIT OFFSET */
"""
params = {'user_id': user_id}
return list_rows(db_conn, query, params)
def list_all_subject_entity_ids(db_conn):
"""
List all subject entity ids.
"""
query = """
SELECT entity_id
FROM subjects;
"""
params = {}
return [
row['entity_id']
for row in list_rows(db_conn, query, params)
]
def get_recommended_subjects(db_conn):
"""
list recommended subjects
"""
query = """
SELECT DISTINCT ON (entity_id) *
FROM subjects
WHERE status = 'accepted' AND name = %(name)s
ORDER BY entity_id, created DESC;
/* TODO LIMIT OFFSET */
"""
params = {
'name': 'An Introduction to Electronic Music',
}
return list_rows(db_conn, query, params)
|
[
"dgkim@ucdavis.edu"
] |
dgkim@ucdavis.edu
|
86b082d38e2f308f0a9eb3f9b74eb82523828273
|
b478d1e63cce432b6fd3692c0aa7a84f411ae9dc
|
/meta_py3/main.py
|
b2fcdb9da12e44315b927e032eb6c0442104b5d4
|
[] |
no_license
|
yiqing95/py_study
|
8d414aa00b4ac31070fe5667a98815980eee46d0
|
6ce6b46ad729a795bc9253d6339169e62ef47766
|
refs/heads/master
| 2016-09-06T17:45:26.081269 | 2015-01-12T15:22:29 | 2015-01-12T15:22:29 | 20,810,777 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 198 |
py
|
from meta_py3 import example2
__author__ = 'yiqing'
from meta_py3.example import *
from meta_py3.helper import printHr
p = Point(3,4)
print(p.x)
printHr()
obj = example2.MyClass(3)
print(obj.x)
|
[
"yiqing-95@qq.com"
] |
yiqing-95@qq.com
|
6dd32ff3379c39cecf5e238ed5eb616c60e199dd
|
3f574dc4937965029c5b342849a71afe45e89e5d
|
/blog/migrations/0002_auto_20200911_1005.py
|
18b15546d7373514a47ade34b9fb391720120443
|
[] |
no_license
|
geihar/mini_blog
|
a5fa326c513444d3788ee9c327d9faedcea72a4f
|
2ae22f6765a20182565d5b66722ff311fe0242b0
|
refs/heads/master
| 2022-12-17T14:59:53.336269 | 2020-09-17T12:08:41 | 2020-09-17T12:08:41 | 294,717,947 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 789 |
py
|
# Generated by Django 3.1.1 on 2020-09-11 10:05
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("blog", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="post",
name="author",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL
),
),
migrations.AddField(
model_name="post",
name="tags",
field=models.ManyToManyField(related_name="post_tag", to="blog.Tag"),
),
]
|
[
"l0635968488@gmail.com"
] |
l0635968488@gmail.com
|
bfb2219c3b7f01806926849541c2ebcaa6f0d79c
|
9de5c748145962520a0180521acdec063e14f789
|
/DJango-FLOR-CERDAN/Semana06/Caso Biblioteca/EliminarRegistrosEditorial.py
|
c3933a9ee57ab1746e70feaa68f3ebc12ad91afd
|
[] |
no_license
|
CritianChipana/DJango-
|
fe83dd09b923be380906e74bc5d4db3940b18cfa
|
930f74570e1a5e1f6f476c51a4fe56310d7a123c
|
refs/heads/master
| 2023-05-29T03:54:26.690202 | 2021-01-14T05:10:09 | 2021-01-14T05:10:09 | 322,924,955 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 296 |
py
|
import sqlite3
conexion = sqlite3.connect("bdbiblioteca.db")
cursor = conexion.cursor()
consulta = """
DELETE FROM EDITORIAL
WHERE
IDEDITORIAL = 5
"""
cursor = conexion.cursor()
cursor.execute(consulta)
conexion.commit()
conexion.close()
|
[
"cristianchipanahuaman@gmail.com"
] |
cristianchipanahuaman@gmail.com
|
47c04dbadee3061503e71a0c7e9a0557ba1e95e6
|
9495498e3275aef4de17963f679dab1ada3148a2
|
/experiments/karla/diplomski-rad/blade/pb/datasets/n3-all/done-model-testing-4.py
|
66098df78fd968879613bad54f51c5496e1962e8
|
[
"MIT"
] |
permissive
|
lvrcek/consensus-net
|
131a4b378b8cc7704c033882560953442773fd74
|
560957f315751822e1ddf8c097eb7b712ceadff3
|
refs/heads/master
| 2020-04-13T02:09:00.435155 | 2018-12-17T18:53:43 | 2018-12-17T18:53:43 | 162,894,761 | 0 | 0 |
MIT
| 2018-12-23T13:50:29 | 2018-12-23T13:50:28 | null |
UTF-8
|
Python
| false | false | 1,496 |
py
|
from comet_ml import Experiment
experiment = Experiment(api_key="oda8KKpxlDgWmJG5KsYrrhmIV", project_name="consensusnet")
import numpy as np
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Input
from keras.layers import Conv1D, MaxPooling1D, Conv2D, MaxPool2D
import sys
module_path = '/home/diplomski-rad/consensus-net/src/python/dataset/'
if module_path not in sys.path:
print('Adding dataset module.')
sys.path.append(module_path)
import dataset
X_train = np.load('./pysam-all-dataset-n3-X-reshaped-train.npy')
X_validate = np.load('./pysam-all-dataset-n3-X-reshaped-validate.npy')
y_train = np.load('./pysam-all-dataset-n3-y-reshaped-train.npy')
y_validate = np.load('./pysam-all-dataset-n3-y-reshaped-validate.npy')
input_layer = Input(shape=(7, 1, 4))
conv_1 = Conv2D(filters=40, kernel_size=3, padding='same', activation='relu')(input_layer)
pool_1 = MaxPool2D(pool_size=(2, 1))(conv_1)
conv_2 = Conv2D(filters=20, kernel_size=3, padding='same', activation='relu')(pool_1)
drop_1 = Dropout(0.25)(conv_2)
flatten = Flatten()(drop_1)
predictions = Dense(4, activation='softmax')(flatten)
model = Model(input_layer, predictions)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
batch_size = 10000
epochs = 50
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate))
|
[
"juric.antonio@hotmail.com"
] |
juric.antonio@hotmail.com
|
17e37b200e4daabdb7bde731b5f7ece860ff30f5
|
9f440599da392a55d7d5b2b7ce571bc3f2dc881e
|
/rhea/cores/usbext/fpgalink/__init__.py
|
40502351eaf29688fab9e182e67fd1cd214d5167
|
[
"MIT"
] |
permissive
|
zignig/rhea
|
713559f688f85e1304ab43c2b871553da3bf01ae
|
e0d04ff4fcbd57dfeb6f84fa8f87d6b03caee590
|
refs/heads/master
| 2020-04-06T06:53:33.541215 | 2016-03-15T12:45:23 | 2016-03-15T12:45:23 | 53,943,632 | 1 | 0 | null | 2016-03-15T12:42:06 | 2016-03-15T12:42:06 | null |
UTF-8
|
Python
| false | false | 196 |
py
|
from __future__ import absolute_import
from . import _fpgalink_fx2 as fpgalink
from ._fpgalink_fx2 import get_interfaces
from ._fpgalink_fx2 import fpgalink_fx2
from ._fl_convert import convert
|
[
"chris.felton@gmail.com"
] |
chris.felton@gmail.com
|
fd214189a62abb5078e581b8a3d09b1ae4134238
|
b7f6108f9105a169250c824e6db407f6f45b5aa9
|
/Fizzbuzz.py
|
ef1b851a7b813a65fd28cb4b08e26cda02109e0f
|
[] |
no_license
|
FlowerbeanAnsh/python_problems_2
|
fa478a6443a0e25fee4f30fc3b761abc22304e14
|
e5ccb94034f88215462711bfe42f8a3bbf124c08
|
refs/heads/master
| 2023-02-10T11:07:01.984622 | 2021-01-07T05:07:42 | 2021-01-07T05:07:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 273 |
py
|
def fizzbuzz(n):
if n % 3 == 0 and n % 5 == 0:
return 'FizzBuzz'
elif n % 3 == 0:
return 'Fizz'
elif n % 5 == 0:
return 'Buzz'
else:
return str(n)
n=int(input("enter number"))
print((fizzbuzz(n) for n in range(1, 15))
|
[
"60190254+anshsaxena5621@users.noreply.github.com"
] |
60190254+anshsaxena5621@users.noreply.github.com
|
f911e8ca80e57210a5d75e86e8fabdcae495d772
|
1ee5e693c0e5054682d625d1cfd68b26bb0ae3a6
|
/test_sql1.py
|
4b41cf49230d46006e79070599a5ec68a18300c4
|
[] |
no_license
|
Genskill2/03-bootcamp-sql-Praveen45-max
|
792e2ce711523a0e13d95cbf871159a9b6efa818
|
2fc91ef6a597ef30b87f4e49ceffbf569aec781a
|
refs/heads/master
| 2023-06-01T16:36:28.672536 | 2021-06-20T11:14:21 | 2021-06-20T11:14:21 | 377,354,235 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,470 |
py
|
import sqlite3
import os.path
import pytest
@pytest.fixture(scope="package")
def db():
if os.path.exists("db.sqlite"):
os.unlink("db.sqlite")
f = sqlite3.connect("db.sqlite")
c = f.cursor()
c.execute("PRAGMA foreign_keys = ON;")
c.close()
return f
def run_query(dbconn, statement):
cur = dbconn.cursor()
cur.execute(statement)
items = cur.fetchall()
cur.close()
return items
def test_create_and_insert(db):
cur = db.cursor()
with open("create.sql") as f:
cur.executescript(f.read())
cur.close()
cur = db.cursor()
with open("insert.sql") as f:
cur.executescript(f.read())
cur.close()
items = run_query(db, "select name from publisher")
assert set (x[0] for x in items) == set(["PHI","Harper","GCP","Avery","Del Rey","Vintage"]), "Publisher mismatch"
items = run_query(db, "select title from books")
assert set(x[0] for x in items) == set(["The C Programming Language","The Go Programming Language","The UNIX Programming Environment","Cryptonomicon","Deep Work","Atomic Habits","The City and The City","The Great War for Civilisation"]), "Book titles mismatch"
items = run_query(db, "select name from subjects")
assert set(x[0] for x in items) == set(["C","UNIX","Technology","Science Fiction","Productivity","Psychology","Politics","History","Go"]), "Subjects mismatch"
def test_run_query1(db):
with open("query1.sql") as f:
query = f.read()
items = run_query(db, query)
assert set(x[0] for x in items) == set(["The C Programming Language", "The Go Programming Language", "The UNIX Programming Environment"])
def test_run_query2(db):
with open("query2.sql") as f:
query = f.read()
items = run_query(db, query)
expected = set([("The City and The City", "Del Rey"),
("The Great War for Civilisation","Vintage")])
assert set(items) == expected
def test_run_query3(db):
with open("query3.sql") as f:
query = f.read()
items = run_query(db, query)
expected = set(['The C Programming Language', 'The Go Programming Language', 'The UNIX Programming Environment', 'Cryptonomicon', 'Deep Work', 'The City and The City', 'The Great War for Civilisation'])
assert set(x[0] for x in items) == expected
def test_run_query4(db):
with open("query4.sql") as f:
query = f.read()
items = run_query(db, query)
expected = set(["Productivity", "Psychology"])
assert set(x[0] for x in items) == expected
def test_run_update1(db):
cur = db.cursor()
with open("update1.sql") as f:
cur.executescript(f.read())
cur.close()
items = run_query(db, "select name from publisher")
assert set (x[0] for x in items) == set(["Prentice Hall","Harper","GCP","Avery","Del Rey","Vintage"]), "Publisher mismatch"
def test_run_delete(db):
cur = db.cursor()
with open("delete1.sql") as f:
cur.executescript(f.read())
cur.close()
items = run_query(db, "select s.name from books b, subjects s, books_subjects bs where b.id = bs.book and s.id = bs.subject and b.title = 'The Great War for Civilisation'");
expected = set(["Politics"])
assert set(x[0] for x in items) == expected
items = run_query(db, "select name from subjects")
assert set(x[0] for x in items) == set(["C","UNIX","Technology","Science Fiction","Productivity","Psychology","Politics","Go"]), "Subjects mismatch"
|
[
"noreply@github.com"
] |
Genskill2.noreply@github.com
|
09fdfef1f34be76e6985aa38562d9fca58cd5f9d
|
9cfc9f2b1401f172fd67a136cee2f6a47de397f9
|
/python_oop/player.py
|
c2385cc357290dd8953dad831d43cc3cae6858f1
|
[] |
no_license
|
wathiwut193/project_code_backup-
|
523668080ee8c175b584943cfc7fd61445da4e63
|
b4ba2e73ffb3e463fcf4a8a42c94037df9785530
|
refs/heads/master
| 2020-05-15T11:27:27.087387 | 2019-04-19T08:13:34 | 2019-04-19T08:13:34 | 182,227,898 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 369 |
py
|
class Player:
def __init__(self):
self.fname = ""
self.lname = ""
self.number = ""
class Player2:
def __init__(self, fname, lname, number):
self.fname = fname
self.lname = lname
self.number = number
if __name__ == '__main__':
p1 = Player()
p1.fname = "Loris"
p1.lname = "Karius"
p1.number = 1
|
[
"wathiwut193@gmail.com"
] |
wathiwut193@gmail.com
|
4781a8d60651487049551717065223d80d10326a
|
3b2e96799191cfe33a9a26415f81b750098a188f
|
/recipe/app/management/commands/wait_for_db.py
|
a846e1e393454f8db81ffab96ed09371398170f6
|
[
"MIT"
] |
permissive
|
DeMT/django-rest-API-recipe-project
|
75c1e226c1e3acbdbf88c479ee583ae66b402a2f
|
f19c4a6a124927780ae5029a6fdbd96325d2567c
|
refs/heads/master
| 2020-08-05T21:03:01.547847 | 2019-12-29T14:17:31 | 2019-12-29T14:17:31 | 212,709,274 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 699 |
py
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
""" Django command to pause excution until database is available."""
def handle(self, *args, **options):
self.stdout.write('waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write(
'database unavailable, waiting for one second.')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('database up and ready.'))
|
[
"gn00468461@gmail.com"
] |
gn00468461@gmail.com
|
383b72a994b731a49eb57b0fb8c8e8ce459d58b8
|
01187998930aa81bb15729cc7883a4a55cf64bce
|
/article/views.py
|
873e860f24c208a4bb964130b908877fe6daf287
|
[] |
no_license
|
todokku/django_blog_heroku
|
1c8af4a0f678c0a182423ca6e8c0f2a2632e996d
|
d16c90908ec2cb39ae4699641303bfef9116775e
|
refs/heads/master
| 2022-06-10T12:19:11.201293 | 2020-05-10T07:38:37 | 2020-05-10T07:38:37 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,221 |
py
|
import markdown
from markdown.extensions.toc import TocExtension
from django.shortcuts import render
from django.views.generic.base import View
from django.utils.text import slugify
from .models import Post, Category, Tag
# Create your views here.
class IndexView(View):
"""
首页视图
"""
def get(self, request):
post_list = Post.objects.all().order_by('-created_time')
return render(request, 'index.html', context={
'post_list': post_list
})
class DetailView(View):
"""
文章详情页
"""
def get(self, request, id):
post = Post.objects.get(id=id)
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.fenced_code',
TocExtension(slugify=slugify),
])
post.body = md.convert(post.body)
post.toc = md.toc
return render(request, 'detail.html', context={
'post': post
})
class DateView(View):
"""
侧边栏日期归档
"""
def get(self, request, year, month):
post_list = Post.objects.filter(created_time__year=year,
created_time__month=month
).order_by('-created_time')
return render(request, 'index.html', context={
'post_list': post_list
})
class CategoryView(View):
"""
侧边栏分类
"""
def get(self, request, id):
# 记得在开始部分导入 Category 类
category = Category.objects.get(id=id)
post_list = Post.objects.filter(category=category).order_by('-created_time')
return render(request, 'index.html', context={
'post_list': post_list
})
class TagView(View):
"""
侧边栏标签
"""
def get(self, request, id):
# 记得在开始部分导入 Category 类
tag = Tag.objects.get(id=id)
post_list = Post.objects.filter(tags=tag).order_by('-created_time')
return render(request, 'index.html', context={
'post_list': post_list
})
|
[
"893821635@qq.com"
] |
893821635@qq.com
|
cda9bea59192db09772dbd4a7ed517e620d10c63
|
54b562f272e6d9759256f1f0d5219940aff0d6e3
|
/modules/Message.py
|
9141a1b0f8c9f81a51dc2406efc71ea481689c45
|
[
"Apache-2.0"
] |
permissive
|
duan602728596/48Live
|
e5c93248dd8de40f7c057e1c05bf348289438da4
|
ccf68a05bf504612132c16d741d901a013a480cb
|
refs/heads/master
| 2021-09-10T14:46:47.912114 | 2018-03-28T01:57:01 | 2018-03-28T01:57:11 | 86,576,208 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 294 |
py
|
"""
提示框类
warn: 警告
"""
from PyQt5 import QtWidgets
class Message:
def __init__(self):
self.messageBox = QtWidgets.QMessageBox
# 警告框
def warn(self, text):
msg = self.messageBox(self.messageBox.Warning, u'警告', text)
msg.exec_()
message = Message()
|
[
"duanhaochen@126.com"
] |
duanhaochen@126.com
|
8ca2092b35c528787dbaeb02d872ef9ddec3146c
|
04ba0f47c055a6839b12e62a6c50e885d041124a
|
/internshipcontract/urls.py
|
a4272dcf147a34c4b5afc510999f4871453c8327
|
[] |
no_license
|
Anurodhyadav/contractofreduct
|
cd7aa760659064eef5cf2565c361cdad78588f05
|
769176017f44b29ddf94847841e3174342f5b5dd
|
refs/heads/master
| 2022-11-04T14:44:20.258156 | 2020-06-18T05:41:20 | 2020-06-18T05:41:20 | 273,150,246 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,034 |
py
|
"""internshipcontract URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.contrib.staticfiles.urls import static
from django.conf import settings
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('reductcontract.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
|
[
"anurodhyadav072@gmail.com"
] |
anurodhyadav072@gmail.com
|
14f72ef3edf0e3c9535949575581519b8e43e50c
|
31b7c7e67095ca779dbf26d9dc5a26d4fd57b054
|
/JOHNY.py
|
1a9fe3b34b920231de70528e41b4c817ddc77a96
|
[] |
no_license
|
ayusinghi96/CodeChefPrograms
|
938d2aafb4c2b6f72d47a1318703f314c2e9d00d
|
471dc369bb59ea6a8d2bc4c1931a3166a3100a80
|
refs/heads/master
| 2020-03-16T08:17:28.332269 | 2018-05-27T18:41:58 | 2018-05-27T18:41:58 | 132,593,727 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 230 |
py
|
t = int(input())
for i in range(t):
n = int(input())
a = list(map(int,input().split()))
k = int(input())
store = a[k-1]
a.sort()
for i in range(n):
if a[i] == store:
print(i+1)
|
[
"ayusinghi96@gmail.com"
] |
ayusinghi96@gmail.com
|
63a704c73f1c4393d8e54d3e901f86d996919a73
|
565875f01529123b23f4f80fc3368542f1096b99
|
/crawler.py
|
c0ee88df6d2a78770c5a85db52c1e6753f3e6078
|
[] |
no_license
|
infinitewhim/python_collection
|
3a2c9062b9c703380aa5b38c7b85d91c9915f226
|
71b7fc27adb4d2fcccdc66887270848d8630add2
|
refs/heads/master
| 2021-05-05T09:42:30.737764 | 2018-04-19T15:48:38 | 2018-04-19T15:48:38 | 103,911,026 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 509 |
py
|
import requests
import os
from bs4 import BeautifulSoup
url = 'https://www.irishtimes.com/'
source = requests.get(url).text
soup = BeautifulSoup(source,'lxml')
f = open('/home/vagrant/Desktop/python/news.txt', 'a')
for link in soup.findAll('span',class_='h2'):
#strip() can remove whitespace from the beginning and end of a string
str1 = link.string.encode('utf-8').strip()
f.write(str1+'\n\n')
f.close()
os.system('/usr/local/hadoop/bin/hdfs dfs -put /home/vagrant/Desktop/python/news.txt input')
|
[
"noreply@github.com"
] |
infinitewhim.noreply@github.com
|
da05e683451316dbe9fccd0198ed9243a80cc710
|
831f8e39c95d706fa440054d53ed61aa8f0e435c
|
/portfolio/settings.py
|
39801b7a1ed2c84b40bcdc61b4aa010db759badc
|
[] |
no_license
|
x14119641/portfolio_project
|
274d56503cea8ed3863bd805b85f86fd5cf5c43c
|
f114ecd7f0a25307b3289a6bcd68b954c50558dd
|
refs/heads/master
| 2020-04-07T00:11:18.564673 | 2018-11-19T18:22:58 | 2018-11-19T18:22:58 | 157,894,156 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,436 |
py
|
"""
Django settings for portfolio project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q781+uwq33@##m4_j^aunie1vwe$xp9qvm2ka0j$l19@n0fn^c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jobs.apps.JobsConfig',
'blog.apps.BlogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'portfoliodb',
'USER': 'postgres',
'PASSWORD': 'Barbera123+',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'portfolio/static')
]
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
[
"dgilromero@bloomberg.net"
] |
dgilromero@bloomberg.net
|
c084c779769711bfd5a13c5d81c33ad53c92a8e4
|
3357cf3ac92b582aeacab694b325110ddffcc2fe
|
/app.py
|
28ffd59854dddd72d1846ed3d8c3a49ea56c1391
|
[] |
no_license
|
reficashabong/appforheroku
|
eef687becef3a7cbb6b9c7ce8b40fd4eb9b93969
|
4aeeb517f2bb63a2bb2794a0caa3fbbdfd76137f
|
refs/heads/master
| 2022-12-16T22:56:31.738859 | 2020-09-05T08:31:13 | 2020-09-05T08:31:13 | 293,031,993 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 993 |
py
|
import pandas as pd
import numpy as np
from flask import Flask, request, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl','rb'))
@app.route('/')
def home():
return render_template('form.html')
@app.route('/predict',methods=['POST','GET'])
def predict():
input_features = [float(x) for x in request.form.values()]
features_value = [np.array(input_features)]
features_name = ["Married","Dependents","Education","Self_Employed","ApplicantIncome",
"CoapplicantIncome","LoanAmount","Loan_Amount_Term","Credit_History","Property_Area"]
df = pd.DataFrame(features_value, columns=features_name)
output = model.predict(df)
if output == 1:
res_val = "** a higher probalility of getting a Loan**"
else:
res_val = "very low changes of getting a Loan"
return render_template('form.html', pred='Applicant has {}'.format(res_val))
if __name__ == "__main__":
app.run()
|
[
"noreply@github.com"
] |
reficashabong.noreply@github.com
|
7f2c7d176ea3859cfed71a6a2c41bf13a5fd8d3c
|
61ac1b89ad090a41d41be1de178a26ae89711dd2
|
/solutions/985.py
|
5c16323850249b54e3e27b5d3cf1406ad6b81290
|
[] |
no_license
|
zszzlmt/leetcode
|
ea0bdc24248075e35094d036520f729e699f7d7e
|
daee4df1850740438e3860b993aa32b45c03812e
|
refs/heads/master
| 2023-07-16T18:55:21.418378 | 2021-05-10T07:24:05 | 2021-05-10T07:24:05 | 119,399,280 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,083 |
py
|
class Solution:
def sumEvenAfterQueries(self, A: List[int], queries: List[List[int]]) -> List[int]:
def get_sum(A):
result = 0
for value in A:
if abs(value) % 2 == 0:
result += value
return result
def is_even(num):
return abs(num) % 2 == 0
results = list()
init_sum = get_sum(A)
results.append(init_sum)
for value, index in queries:
previous_value = A[index]
now_value = A[index] + value
result_base = results[-1]
if is_even(previous_value) and is_even(now_value):
results.append(result_base + value)
elif is_even(previous_value) and not is_even(now_value):
results.append(result_base - previous_value)
elif not is_even(previous_value) and is_even(now_value):
results.append(result_base + now_value)
else:
results.append(result_base)
A[index] = now_value
return results[1:]
|
[
"zpu@pku.edu.cn"
] |
zpu@pku.edu.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.