blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 220
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 257
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
74b4d5ca9ebb3d0cd459690448d060ca8e81b9b5
|
22178b74fa7715f94f702b8e62bff881b72f9a79
|
/we.py
|
ec6fdc308795a9bbacc0d96a556a624ffb651030
|
[] |
no_license
|
dominion-ubah/firstgitfile
|
3fb6727756dc218fa930ead7f915cb3e65982b3d
|
8b31ff21c256f742bcb0b927fc8d4c5e4cfbb26b
|
refs/heads/master
| 2022-11-06T09:28:53.081996 | 2020-06-13T18:00:11 | 2020-06-13T18:00:11 | 272,060,553 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 767 |
py
|
# T = ['test1', 'test2a', 'test2b', 'test3a', 'test3b']
# R = ['Ok', 'No', 'No', 'Ok', 'No']
# # def solution(T, R):
# T_new= []
# T_v= []
# for i in range(len(T)):
# test = filter(lambda x: x.isdigit(), T[i])
# # if R[i].lower() == 'ok':
# if test in T_v:
# for i in range(len(T_new)):
# if T_new[i][i] == test
# T_new.append({int(test): R[i].lower()})
# else:
# T_v.append(int(test))
# T_new.append({int(test): R[i].lower()})
# for i in range(len(T_new)):
# T_new[i][i]
# print(T_new, R[i].lower())
# print(T[i])
import numpy as np
new_array = np.array([1,2,3,4])
new_rand_array = np.random.rand(5,5)
print('hi baby')
print(new_array)
print(new_rand_array)
|
[
"domee.ubah@gmail.com"
] |
domee.ubah@gmail.com
|
272ba43a0b22e419b2d7635c267bfdbe10e79df4
|
7ad70705a5b2b853a1e51c5e7c7bc1a8a576f22b
|
/lib/app.py
|
cf89a82823ee8179986efef267058759f74c93a3
|
[] |
no_license
|
kwansupp/solid-pancovid-19
|
1e76c21a13fcb32293f04acef3f0153000c97270
|
50c3b76bab01c76cd3b639e1f4a6ae5b755db286
|
refs/heads/master
| 2021-05-17T02:40:42.408239 | 2020-04-15T15:51:59 | 2020-04-15T15:51:59 | 250,580,345 | 0 | 0 | null | 2020-03-27T16:06:56 | 2020-03-27T16:06:56 | null |
UTF-8
|
Python
| false | false | 3,536 |
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import pandas as pd
from datahandler import dummyDataHandler
import plotly.express as px
import numpy as np
from datetime import datetime
import traceback
'''
This is a prototype webapp for the CORD19 challenge on Kaggle
This involves a demo pandas dataframe, and sample visualisations
All data here is fictional!
'''
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
dh = dummyDataHandler()
def make_bubbleplot(dh):
return px.scatter(dummyDataHandler().get_pivot(), x="publish_time_month", y="phase", color="tag", size='count',
hover_name="tag", title='Occurance of research tag per month per phase sized by #Occurances')
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(children='COVID-19: Visual Research Exploration Tool'),
html.Marquee('The data in this tool is fictional!', draggable='true'),
dcc.Tabs([
dcc.Tab(label='Overview', children=[
dcc.Graph(
id='phase-plot',
figure=make_bubbleplot(dh)
)]),
dcc.Tab(label='Discover', children=[
html.Div(id='selected-element'),
dcc.Dropdown(
id=f'dropdown-tag',
options=[{'label': k, 'value':k} for k in dh.data.tag.unique() if not pd.isna(k)],
multi=True,
value=[k for k in dh.data.tag.unique()]
),
dcc.Dropdown(
id=f'dropdown-phase',
options=[{'label': k, 'value':k} for k in dh.data.phase.unique()],
multi=True,
value=[k for k in dh.data.phase.unique()]
),
dcc.DatePickerRange(
id='date-range',
min_date_allowed=min(dh.data.publish_time),
max_date_allowed=max(dh.data.publish_time),
initial_visible_month=datetime(2020, 1, 1),
start_date=datetime(2020, 1, 1),
end_date = datetime(2020, 1, 31)
),
dcc.Graph(
id='discover-plot',
figure=None,
)
]
)
]),
])
@app.callback(
Output('selected-element', 'children'),
[Input('discover-plot', 'clickData')]
)
def show_point_data(data_dict):
try:
print(data_dict)
sha = data_dict['points'][0]['customdata'][0]
abstract = dh.data.loc[sha]['abstract']
return f'{abstract}'
except Exception as e:
print(e)
traceback.print_exc()
return ''
@app.callback(
Output('discover-plot', 'figure'),
[Input('dropdown-tag', 'value'),
Input('dropdown-phase', 'value'),
Input('date-range', 'start_date'),
Input('date-range', 'end_date')
]
)
def discover_plot(tag, phase, start, end):
try:
start = datetime.strptime(start.split('T')[0], '%Y-%m-%d')
end = datetime.strptime(end.split('T')[0], '%Y-%m-%d')
df = dh.get_date_range_data(start, end)
df = df[(df.tag.isin(tag)) & (df.phase.isin(phase))]
df['count'] = 1
df= df.reset_index()
fig = px.bar(df, x='phase', y='count', color='tag', hover_data=['sha', 'publish_time_month'])
return fig
except Exception as e:
print(e)
print('failed...')
return None
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port=8080, debug=True)
|
[
"levivdheijden@hotmail.nl"
] |
levivdheijden@hotmail.nl
|
739ca7dea2b81c1a4463926742729e375c715cbd
|
58fa3041a188f457332d1f83d16ceb7e492e3e17
|
/uniGrades.py
|
81000d2530e8e5fc6f542be768c6e07247c94b46
|
[] |
no_license
|
osama-aboukoura/My-Uni-Grades-Calculator
|
c2f292541fd1faca7f6d3a3e6887c930e2563e02
|
90a372c8085b3d6fa3a54f67f3e6270f8f951411
|
refs/heads/master
| 2020-03-20T21:42:47.334614 | 2018-09-24T17:40:24 | 2018-09-24T17:40:24 | 137,756,089 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,250 |
py
|
from tkinter import *
from tkinter import messagebox
from Database import *
def get_year_average(array_of_entries):
total = 0
try:
for number in array_of_entries:
if int(number.get()) < 40:
info_label_text.set("Scoring less than 40 in any of the modules "
"means you have failed that module and the degree.")
return 0
if int(number.get()) > 100:
info_label_text.set("You cannot have a module with a score greater than 100.")
return 0
total += int(number.get())
except ValueError:
info_label_text.set("ValueError: invalid input in one of the entries.")
return 0
return round(total / len(array_of_entries), 2)
def calculate_grade(first_year_average, second_year_average, third_year_average):
if first_year_average < 40 or second_year_average < 40 or third_year_average < 40:
return 0
total = first_year_average + (second_year_average * 3) + (third_year_average * 5)
return round(total / 9, 2)
def degree_classification(final_grade):
if final_grade >= 70:
return "1st"
elif final_grade >= 60:
return "2.1"
elif final_grade >= 50:
return "2.2"
elif final_grade >= 40:
return "3rd"
else:
return "Fail"
def calculate_grade_and_classification(event):
year1average = get_year_average(year1Entries)
year2average = get_year_average(year2Entries)
year3average = get_year_average(year3Entries)
grade = calculate_grade(year1average, year2average, year3average)
classification = degree_classification(grade)
if classification != "Fail":
info_label_text.set("Congratulations! You have passed with a " + classification)
year1score_label.set(year1average)
year2score_label.set(year2average)
year3score_label.set(year3average)
grade_label.set(grade)
classification_label.set(classification)
def add_year_modules(yearNumber, yearPanel, database_grades):
year_title = Label(yearPanel, text="Year " + str(yearNumber) + " Modules", font=boldFont)
year_title.grid(row=0, column=0, columnspan=2, pady=10)
year_entries = []
label = Label(yearPanel, text="Module 1 (30 credits)")
v = StringVar()
v.set(database_grades[0]) # value from database
entry = Entry(yearPanel, textvariable=v, width=6, justify='center')
year_entries.append(entry)
year_entries.append(entry)
label.grid(row=1, column=0, pady=7)
entry.grid(row=1, column=1, pady=7)
for index in range(2, 8):
label = Label(yearPanel, text='Module ' + str(index) + " (15 credits)")
v = StringVar()
v.set(database_grades[index-1])
entry = Entry(yearPanel, textvariable=v, width=6, justify='center')
year_entries.append(entry)
label.grid(row=index, column=0, pady=7)
entry.grid(row=index, column=1, pady=7)
return year_entries
def on_close():
answer = messagebox.askquestion("Quit", "Do you want to save grades before you quit?")
if answer == 'yes':
year1table = create_table('year1')
year2table = create_table('year2')
year3table = create_table('year3')
# grade index 0 is omitted because it was added twice to the year entry arrays (30 credits)
for index in range(1, len(year1Entries)):
insert_grades_into_table(year1table, "Module " + str(index), year1Entries[index].get())
for index in range(1, len(year2Entries)):
insert_grades_into_table(year2table, "Module " + str(index), year2Entries[index].get())
for index in range(1, len(year3Entries)):
insert_grades_into_table(year3table, "Module " + str(index), year3Entries[index].get())
root.destroy()
root = Tk()
root.title("My Uni Grades Calculator")
boldFont = ("Helvetica", 16, "bold")
# creating the 4 main containers
top_frame = Frame(root, bg='#E67E22', width=200, pady=5, padx=5)
center_frame = Frame(root, bg='#E67E22', width=200)
info_frame = Frame(root, bg='gray', width=200, height=100, pady=20, padx=10)
btm_frame = Frame(root, bg='#E67E22', width=200, pady=5)
# adding the 4 main containers to the root
top_frame.grid(row=0, sticky="ew")
center_frame.grid(row=1, sticky="nsew")
info_frame.grid(row=2, sticky="ew")
btm_frame.grid(row=3, sticky="ew")
# adding the main title to the top frame
mainTitle = Label(top_frame, text="Welcome to My Uni Grades Calculator", fg="red", font=("Helvetica", 20, "bold"))
message = Label(top_frame, text="Enter your marks in each of the following modules then " +
"click submit to find out your final grade and degree classification")
mainTitle.pack(expand=YES, fill=X)
message.pack(expand=YES, fill=X)
# creating the 3 middle vertical containers
firstYearPanel = Frame(center_frame, bg='#5499C7', padx=30, pady=10)
secondYearPanel = Frame(center_frame, bg='#F4D03F', padx=30, pady=10)
thirdYearPanel = Frame(center_frame, bg='#2ECC71', padx=30, pady=10)
# adding the 3 middle containers to the centre frame
firstYearPanel.grid(row=0, column=0, sticky="ns")
secondYearPanel.grid(row=0, column=1, sticky="nsew")
thirdYearPanel.grid(row=0, column=2, sticky="ns")
# getting grades from database. if no tables exist, 60s are the default
year1grades = get_grades_for_year('year1')
year2grades = get_grades_for_year('year2')
year3grades = get_grades_for_year('year3')
# adding all 7 modules to the year panels
year1Entries = add_year_modules(1, firstYearPanel, year1grades)
year2Entries = add_year_modules(2, secondYearPanel, year2grades)
year3Entries = add_year_modules(3, thirdYearPanel, year3grades)
year1score_label = StringVar()
Label(firstYearPanel, text="Year 1 Average:", font=boldFont).grid(row=8, column=0, pady=7, padx=7)
Label(firstYearPanel, textvariable=year1score_label, font=boldFont).grid(row=8, column=1, pady=7, padx=7)
year2score_label = StringVar()
Label(secondYearPanel, text="Year 2 Average:", font=boldFont).grid(row=8, column=0, pady=7, padx=7)
Label(secondYearPanel, textvariable=year2score_label, font=boldFont).grid(row=8, column=1, pady=7, padx=7)
year3score_label = StringVar()
Label(thirdYearPanel, text="Year 3 Average:", font=boldFont).grid(row=8, column=0, pady=7, padx=7)
Label(thirdYearPanel, textvariable=year3score_label, font=boldFont).grid(row=8, column=1, pady=7, padx=7)
# add information label to the info container
info_label_text = StringVar()
info_label_text.set('No information available.')
information = Label(info_frame, text="INFO:", font=boldFont, anchor="w")
info_label = Label(info_frame, textvariable=info_label_text, wraplength=800, anchor="w")
information.pack(expand=YES, fill=X)
info_label.pack(expand=YES, fill=X)
# creating 2 containers for the result labels and submit button
resultsPanel = Frame(btm_frame, bg='#E74C3C', height=250, padx=20, pady=10)
buttonPanel = Frame(btm_frame, bg='#E74C3C', height=250, padx=30, pady=10)
# adding the 2 containers to the bottom frame
buttonPanel.pack(side=RIGHT)
resultsPanel.pack(side=RIGHT, fill=BOTH, expand=YES)
# mock data
grade_label = StringVar()
classification_label = StringVar()
# adding labels and submit button to their containers
Label(resultsPanel, text='Final Grade', font=boldFont).grid(row=0, column=0, pady=5, padx=5, sticky="E")
Label(resultsPanel, text='Degree Classification', font=boldFont).grid(row=1, column=0, pady=5, padx=5, sticky="E")
Label(resultsPanel, textvariable=grade_label, font=boldFont).grid(row=0, column=1, pady=5, padx=5, sticky="W")
Label(resultsPanel, textvariable=classification_label, font=boldFont).grid(row=1, column=1, pady=5, padx=5, sticky="W")
Label(resultsPanel, text="Note the weight of the modules of each year is higher than\n" +
"the weight of previous years' modules and that the ratio is \n" +
"1st-year : 2nd-year : 3rd-year \n1 : 3 : 5") \
.grid(row=0, column=2, columnspan=2, rowspan=2, pady=5, padx=5, sticky="W")
submitButton = Button(buttonPanel, text='Submit')
submitButton.grid(row=0, column=0)
submitButton.bind("<Button-1>", calculate_grade_and_classification)
root.protocol("WM_DELETE_WINDOW", on_close)
root.mainloop()
|
[
"osama.aboukoura@kcl.ac.uk"
] |
osama.aboukoura@kcl.ac.uk
|
424765d18851d58e16c21419b2da37af0fadb2c1
|
18d7dc6d87ec867d22d1f2d78e45b947ba39ae75
|
/backend/cwc/migrations/0009_auto_20170122_1230.py
|
ec6bb11a494b8215c3e25befc2e9d4e4d754e3c1
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
gmacciocca/coffee-with-congress
|
ac712f954c41d99d1b409670434f650694b1cb30
|
dd6554615d51b9972c24a75c0c3f0adcc04b9949
|
refs/heads/master
| 2021-03-27T17:03:39.947221 | 2017-12-17T01:58:46 | 2017-12-17T01:58:46 | 73,728,044 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,772 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-22 12:30
from __future__ import unicode_literals
from django.db import migrations, models
template_states = {}
def pull_template_states(apps, schema_editor):
# We can't import the Template model directly as it may be a newer
# version than this migration expects. We use the historical version.
Template = apps.get_model("cwc", "Template")
for template in Template.objects.all():
ts = template_states.get(template.level, None)
if ts is None:
ts = template_states[template.level] = {}
ti = ts.get(template.issue_id, None)
delete_template = True
if ti is None:
ti = ts[template.issue_id] = []
delete_template = False
ti.append(template.state_id)
if delete_template:
template.delete()
def push_template_states(apps, schema_editor):
Template = apps.get_model("cwc", "Template")
for template in Template.objects.all():
states = set(template_states[template.level][template.issue_id])
for state_id in states:
link = template.states.through()
link.state_id = state_id
link.template_id = template.id
link.save()
class Migration(migrations.Migration):
dependencies = [
('cwc', '0008_auto_20170102_0807'),
]
operations = [
migrations.RunPython(pull_template_states),
migrations.RemoveField(
model_name='template',
name='state',
),
migrations.AddField(
model_name='template',
name='states',
field=models.ManyToManyField(to='cwc.State'),
),
migrations.RunPython(push_template_states)
]
|
[
"senad.uka@gmail.com"
] |
senad.uka@gmail.com
|
e055ffb448a0c217d7a2d9c1dee3cbb124ee1579
|
2f2113154f78443433f5ac5429ef70e3de33339b
|
/dataset/dataset.py
|
edd6fd471ed52841d525b43a6177ffd13575ebc2
|
[] |
no_license
|
jjRen-xd/Remote-Sensing-Image-Classification
|
fe4317e7b5cb7a0397463b11b6f74451cda53f2e
|
a6269cfc799747031f0e52a4277bc05dbf6fec90
|
refs/heads/master
| 2022-05-07T23:21:04.768404 | 2019-04-09T00:50:03 | 2019-04-09T00:50:03 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,682 |
py
|
# -*- coding:utf-8 -*-
import sys
sys.path.append('/home/gfx/Projects/remote_sensing_image_classification')
import os
import cv2
import pandas as pd
import numpy as np
from PIL import Image, ImageFilter
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
from config import config
def read_txt(path):
ims, labels = [], []
with open(path, 'r') as f:
for line in f.readlines():
im, label = line.strip().split(' ')
ims.append(im)
labels.append(int(label))
return ims, labels
class RSDataset(Dataset):
def __init__(self, txt_path, width=256, height=256, transform=None, test=False):
self.ims, self.labels = read_txt(txt_path)
self.width = width
self.height = height
self.transform = transform
self.test = test
def __getitem__(self, index):
im_path = self.ims[index]
label = self.labels[index]
im_path = os.path.join(config.data_root, im_path)
im = Image.open(im_path)
#im = im.resize((self.width, self.height))
if self.transform is not None:
im = self.transform(im)
return im, label
def __len__(self):
return len(self.ims)
if __name__ == '__main__':
transform = transforms.Compose([transforms.ToTensor()])
dst_train = RSDataset('./data/train.txt', width=256, height=256, transform=transform)
dataloader_train = DataLoader(dst_train, shuffle=True, batch_size=1, num_workers=0)
#for im, loc, cls in dataloader_train:
for data in dataloader_train:
print data
#print loc, cls
|
[
"fuxungao@163.com"
] |
fuxungao@163.com
|
dd63d8ff2276e64b1806676cac723baf74f0ecb7
|
306afd5282d9c24d58297478a1728a006c29e57e
|
/lintcode/lintcode_0547_Intersection_of_Two_Arrays.py
|
ddcecd3100ee335a8a14d3b209fb6a19895c1786
|
[] |
no_license
|
ytatus94/Leetcode
|
d2c1fe3995c7a065139f772569485dc6184295a9
|
01ee75be4ec9bbb080f170cb747f3fc443eb4d55
|
refs/heads/master
| 2023-06-08T17:32:34.439601 | 2023-05-29T04:33:19 | 2023-05-29T04:33:19 | 171,921,974 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 600 |
py
|
from typing import (
List,
)
class Solution:
"""
@param nums1: an integer array
@param nums2: an integer array
@return: an integer array
we will sort your return value in output
"""
def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:
# write your code here
if not nums1 or not nums2:
return []
hash_set = set()
for i in nums1:
hash_set.add(i)
result = set()
for i in nums2:
if i in hash_set:
result.add(i)
return list(result)
|
[
"noreply@github.com"
] |
ytatus94.noreply@github.com
|
16a5eb29d0899f0a366ee71b2a0b3b5f51f1a41d
|
029c35c8057fb3d836c929a9d13c8ed68e25a076
|
/app/main/forms.py
|
55bea2a21b526c7f9287d077f53b9ed1f320f7c0
|
[
"BSD-3-Clause"
] |
permissive
|
alexstennet/presidents
|
5df663a177e9598ab80d7fc41f79f334c84a73d0
|
224265a8db9cd7a51f8ac40b8519dfa30ffbab61
|
refs/heads/master
| 2020-03-20T17:14:02.190101 | 2018-06-04T02:24:41 | 2018-06-04T02:24:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 354 |
py
|
from flask_wtf import FlaskForm
from wtforms.fields import StringField, SubmitField
from wtforms.validators import Required
class LoginForm(FlaskForm):
"""Accepts a nickname and a room."""
name = StringField('Name', validators=[Required()])
room = StringField('Room', validators=[Required()])
submit = SubmitField('Enter Game')
|
[
"senavi@berkeley.edu"
] |
senavi@berkeley.edu
|
e12977316ee1cd441006009c3cc4ba67dd14fc76
|
d0a3a04657af45bfa159819e593445e7a82a22e2
|
/frames_to_video.py
|
9d171f1dae7d493ec3cdb1d3c7f9aaffbd9a8349
|
[] |
no_license
|
grant81/ParticleFilterMultipleObjectTracking
|
cc9b5a2df7a50150635d07a5ca3cb42e697b23b5
|
e7830892d7d078b98ff7a83555863ebfab5a6ccb
|
refs/heads/master
| 2020-05-03T19:46:31.549365 | 2019-04-17T18:07:29 | 2019-04-17T18:07:29 | 178,790,132 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 530 |
py
|
import cv2
import hyperparameters
image_folder = hyperparameters.bounded_image_out
video_name = 'traking_result_100_particles_high_performance_tracker.avi'
images = [image_folder+ 'frame_{0:04}.jpg'.format(i) for i in range(hyperparameters.number_of_frames) ]
frame = cv2.imread(images[0])
height, width, layers = frame.shape
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
video = cv2.VideoWriter(video_name, fourcc, 15, (width,height))
for image in images:
video.write(cv2.imread(image))
cv2.destroyAllWindows()
video.release()
|
[
"grantzhao75@gmail.com"
] |
grantzhao75@gmail.com
|
0da00b03f57e23e33671671f5fe8c1b3f063c51d
|
7a914f1d1e7a4b4d9757145cac8376bf1f7099d4
|
/jsk_recognition/jsk_perception/node_scripts/fisheye_ray.py
|
e54f0bc80e9af02b5d47f0dcf8377129aa1caeba
|
[
"Apache-2.0"
] |
permissive
|
usdot-fhwa-stol/autoware.ai
|
eed15471180f92900413b1a20213263b40acdb3f
|
35c2be547269f2843de2ad907cec85abcf598577
|
refs/heads/carma-develop
| 2023-08-14T07:59:11.546253 | 2023-08-01T20:51:01 | 2023-08-01T20:51:01 | 186,024,768 | 2 | 12 |
Apache-2.0
| 2023-08-01T20:51:02 | 2019-05-10T17:03:33 |
C++
|
UTF-8
|
Python
| false | false | 2,670 |
py
|
#!/usr/bin/env python
import cv2
import rospy
import cv_bridge
from sensor_msgs.msg import Image, CameraInfo
from geometry_msgs.msg import Vector3, PoseStamped, PointStamped
import math
import tf
import numpy as np
def camera_info_cb(msg):
global latest_camera_info
latest_camera_info = msg;
def image_cb(msg):
global latest_image
latest_image = msg;
def cloud_cb(msg):
global latest_camera_info, latest_image, frame_id
# if latest_camera_info:
if latest_image is not None:
# r = math.sqrt((msg.point.x - latest_camera_info.width/2)*(msg.point.x - latest_camera_info.width/2)+ (msg.point.y - latest_camera_info.height/2.0) * (msg.point.y - latest_camera_info.height/2.0))
r = math.sqrt((msg.point.x - latest_image.width/2)*(msg.point.x - latest_image.width/2)+ (msg.point.y - latest_image.height/2.0) * (msg.point.y - latest_image.height/2.0))
phi=r/341.0
# x = -(msg.point.y - latest_camera_info.height/2.0) / r * math.sin(phi)
# y = (msg.point.x - latest_camera_info.width/2.0) / r * math.sin(phi)
x = -(msg.point.y - latest_image.height/2.0) / r * math.sin(phi)
y = (msg.point.x - latest_image.width/2.0) / r * math.sin(phi)
z = 1.0 * math.cos(phi)
pose = PoseStamped()
pose.pose.position.x = 0
pose.pose.position.y = 0
pose.pose.position.z = 0
first_vec = [x, -y,-z]
second_vec = [1,0,0]
dot = np.dot(first_vec, second_vec)/(tf.transformations.vector_norm(first_vec)*tf.transformations.vector_norm(second_vec))
c = np.arccos(dot)
M = tf.transformations.rotation_matrix(c,np.cross(first_vec, second_vec))
quat = tf.transformations.quaternion_from_matrix(M)
pose.pose.orientation.x = quat[0]
pose.pose.orientation.y = quat[1]
pose.pose.orientation.z = quat[2]
pose.pose.orientation.w = quat[3]
pose.header.frame_id=frame_id
pub.publish(pose)
point = PointStamped()
point.header.stamp = rospy.Time.now()
point.header.frame_id= frame_id
point.point.x = x
point.point.y = y
point.point.z = z
pub_p.publish(point)
if __name__ == "__main__":
rospy.init_node("click_to_pose")
latest_image = None
pub = rospy.Publisher("~output", PoseStamped, queue_size=1)
pub_p = rospy.Publisher("~output_point", PointStamped, queue_size=1)
frame_id = rospy.get_param("~frame_id", "fisheye")
rospy.Subscriber("clicked_point", PointStamped, cloud_cb)
rospy.Subscriber("camera_info", CameraInfo, camera_info_cb)
rospy.Subscriber("image", Image, image_cb)
rospy.spin()
|
[
"Michael.McConnell-2@leidos.com"
] |
Michael.McConnell-2@leidos.com
|
35e895394f6115b675e968c03a9dbcbbecb43f50
|
14c0b8a6fb44fcd6a2bedf0395c436ab3f8657e2
|
/heedpoint/heedpoint/settings.py
|
0e56641638ff8c79c278c7fb259545028b262478
|
[] |
no_license
|
UncleKhab/heedpointone
|
ebe3743c98bc59dd53005b7637d91f7cdcc39b3f
|
9c659ee545455cce3cbb44ee12e999dbf999f6d3
|
refs/heads/main
| 2023-02-17T13:17:52.055169 | 2021-01-12T20:59:27 | 2021-01-12T20:59:27 | 327,727,047 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,185 |
py
|
"""
Django settings for heedpoint project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*)nf@%ouhg-z4&)2a(kiop7tfdu(n(ocd!!u%oyr%van39=7ok'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# REST FRAMEWORK
'rest_framework',
# MY APPS
'api',
'frontend',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'heedpoint.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'heedpoint.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_USER_MODEL = 'api.User'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"cobrosoffice@gmail.com"
] |
cobrosoffice@gmail.com
|
4b8ca9506c9d6487556e760845de5e745d2d87de
|
5b1dd935ccebea61fcb5ee2c28a59c2943784123
|
/layout/layout_body.py
|
bb1e6f9600e95888789071f94fd5aafe9281e8ee
|
[] |
no_license
|
pmecchia/covid19-dashboard-france
|
0715b1674e57ec86d5d6c3c723984dbd51bde6a7
|
15d3a8ade688f09ae2b0c5c8ba9f57044ecda092
|
refs/heads/master
| 2023-01-20T02:56:32.435024 | 2020-11-17T11:17:19 | 2020-11-17T11:17:19 | 309,733,455 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,564 |
py
|
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from static import DEP_LABELS,TIMESTAMP_LABELS
#Confirm/Death Table
departments_tabs = dbc.Card(
[
dbc.CardBody(
html.Div(
id="departments-table",className="results-table",
),
),
],
className="results-table-div",
)
#Map
france_map=dbc.Card(
dbc.CardBody(
[
html.Div(
dcc.Loading(
dcc.Graph(
id="france-map",
style={"height": "50vh",
},
),
id="map-container",
),
),
]
),
className="map-card",
),
className="france-map-container",
#Chart Confirmed Cases
confirmed_chart=dbc.Card(
dbc.CardBody(
[
html.Div(
id="confirmed-chart-title",
className="chart-h1-title",
),
html.Div(
dcc.Dropdown(
id="confirmed-lastdays-dropdown",
options=TIMESTAMP_LABELS,
value="30",
clearable=False,
searchable=False,
className="lastdays-dropdown",
),
),
html.Div(
dcc.Loading(
dcc.Graph(
id="confirmed-timeline",
# figure=cases_chart(),
config={"responsive": False},
style={"height": "40vh"},
className='left-chart-figure"',
),
),
id="confirmed-chart-container",
),
]
)
)
#Deaths
deaths_chart=dbc.Card(
dbc.CardBody(
[
html.Div(
id="deaths-chart-title",
className="chart-h1-title",
),
html.Div(
dcc.Dropdown(
id="deaths-lastdays-dropdown",
options=TIMESTAMP_LABELS,
value="30",
clearable=False,
searchable=False,
className="lastdays-dropdown",
),
className="lastdays-dropdown-container"
),
html.Div(
dcc.Loading(
dcc.Graph(
id="deaths-timeline",
# figure=cases_chart(),
config={"responsive": False},
style={"height": "40vh"},
className='right-chart-figure"',
),
style={"padding-top": "8px",},
color="#32a852",
),
id="deaths-chart-container",
),
]
)
)
build_layout = dbc.Container(fluid=True, children=[
dbc.Row( #TOP BAR Dayly results
[
dbc.Col(
dcc.Dropdown(
id="departments-dropdown",
options=DEP_LABELS,
value="France",
clearable=False,
searchable=False,
className="departments-dropdown",
),
className="departments-dropdown-container",
width=4,
),
dbc.Col(
dbc.Row(id="daily-results", className="top-bar-content"),
width=8,
className="top-bar-content-col",
),
],
className="row-top",
),
dbc.Row(#TABLE AND MAP
[
dbc.Col(
html.Div(
dbc.Row(
[
#TABLE
dbc.Col(
departments_tabs,
className="left-column-table-content",
width=4,
),
#MAP
dbc.Col(
france_map,
className="middle-column-map-content",
width=8,
),
],
),
className="middle-content",
),
className="middle-row",
),
],
),
dbc.Row(#CHARTS
[
dbc.Col(
html.Div(
dbc.Row(
[
#Confirmed Chart
dbc.Col(
confirmed_chart,
className="top-bottom-left-chart",
width=6,
),
#Deaths Chart
dbc.Col(
deaths_chart,
className="top-bottom-right-chart",
width=6,
),
],
#no_gutters=True,
#className="test"
),
className="bottom-charts"
),
className="bottom-row"
),
],
),
],
)
|
[
"mecchia.pierre@gmail.com"
] |
mecchia.pierre@gmail.com
|
293b6a144a299f957a5b76a6c6d6d8346289148a
|
77b3ccdd47989c7090a16b971fde977ebc698661
|
/proxyAndUrl.py
|
4df1aa2dccd7b8a1e69ac4e56132a9844b416fcb
|
[] |
no_license
|
ben-jones/cookie-injection
|
2f50ae37daaacbf337596d5cce0b3f950bf95659
|
248fc7c5c76b45128898439dedb9ba5b6723b703
|
refs/heads/master
| 2016-09-08T01:14:39.265885 | 2013-11-19T03:25:18 | 2013-11-19T03:25:18 | 14,512,954 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,339 |
py
|
#proxyAndUrl.py
# Ben Jones
# ECE 649 Fall 2012
# proxyAndUrl.py: sets up a proxy server and parses out the tracking cookies,
# inserts a new ones, then send the requests to the proxy server. After the
# requests have been returned, tracking cookies are inserted again, then
# the page is returned to the user
#get the
from twisted.python import log
from twisted.web import proxy, http
from random import randint
import re
class Data():
def __init__(self):
self.trackerAdded = False
data = Data()
def set_data(new_data):
global data
data = new_data
return new_data
def get_data():
return data
class MyProxyClient(proxy.ProxyClient):
def __init__(self,*args,**kwargs):
self.buffer = ""
proxy.ProxyClient.__init__(self,*args,**kwargs)
def handleResponsePart(self, buffer):
# Here you will get the data retrieved from the web server
# In this example, we will buffer the page while we shuffle it.
self.buffer = buffer + self.buffer
# log.msg(get_data().trackerAdded)
# log.msg(self.buffer)
# if(get_data().trackerAdded == False):
# self.buffer = self.buffer + "Set-Cookie: tracker=" + str(get_data().trackID) + "\n"
# get_data().trackerAdded = True
# log.msg("Added tracker: %s" % get_data().trackID)
def handleResponseEnd(self):
if not self._finished:
# We might have increased or decreased the page size. Since we have not written
# to the client yet, we can still modify the headers.
lines = self.buffer.split("\n")
if(len(lines) > 1):
newBuff = lines[0:2]
if(re.search("Set\-Cookie", lines[2]) == None):
trackerStr = "Set-Cookie: tracker=" + str(get_data().trackID)
newBuff.append(trackerStr)
newBuff.append(lines[2])
newBuff[4:] = lines[3:]
else:
newline = lines[2] + "; tracker=" + str(get_data().trackID)
newBuff.append(newline)
newbuff[3:] = lines[3:]
self.buffer = "\n".join(newBuff)
log.msg("Response End: %s" % self.buffer)
self.father.responseHeaders.setRawHeaders("content-length", [len(self.buffer)])
self.father.write(self.buffer)
proxy.ProxyClient.handleResponseEnd(self)
class MyProxyClientFactory(proxy.ProxyClientFactory):
protocol = MyProxyClient
class ProxyRequest(proxy.ProxyRequest):
protocols = {'http':MyProxyClientFactory}
ports = {'http':80 }
def process(self):
tracker = self.getCookie('tracker')
log.msg('Request')
data = get_data()
#add the tracker onto the list of equivalent cookies
if(tracker != str(data.trackID)):
trackList = data.trackers[data.trackID]
if(not(tracker in trackList)):
data.trackers[data.trackID].append(tracker)
proxy.ProxyRequest.process(self)
class MyProxy(http.HTTPChannel):
requestFactory = ProxyRequest
class ProxyFactory(http.HTTPFactory):
protocol = MyProxy
portstr = "tcp:8080:interface=localhost" # serve on localhost:8080
if __name__ == '__main__':
data = get_data()
data.trackID = randint(1, 400000000)
data.trackerAdded = False
data.trackers = {}
data.trackers[data.trackID] = []
import sys
from twisted.internet import endpoints, reactor
def shutdown(reason, reactor, stopping=[]):
"""Stop the reactor."""
if stopping: return
stopping.append(True)
if reason:
log.msg(reason.value)
reactor.callWhenRunning(reactor.stop)
log.startLogging(sys.stdout)
endpoint = endpoints.serverFromString(reactor, portstr)
d = endpoint.listen(ProxyFactory())
d.data = data
d.addErrback(shutdown, reactor)
reactor.run()
else: # $ twistd -ny proxy_modify_request.py
data = get_data()
data.trackID = randint(1, 400000000)
data.trackerAdded = False
data.trackers = {}
data.trackers[data.trackID] = []
from twisted.application import service, strports
application = service.Application("proxy_modify_request")
strports.service(portstr, ProxyFactory()).setServiceParent(application)
|
[
"benjonesece@gmail.com"
] |
benjonesece@gmail.com
|
042d0eb712afa10e91d7c10c75dd79dcb1625148
|
67e316dd2de39dfc3df6b5c106f4a3ea956337e5
|
/utils/priority_queue_test.py
|
e8d57651a05d676a79869c2b545fc0de0fdc5e00
|
[] |
no_license
|
pasmargo/t2t-qa
|
cd1f0e0703819bbf38a88c9c440eaecd066f112d
|
3e75bca99aa6cafffb0dafaaca6592aefd4aafdb
|
refs/heads/master
| 2020-04-15T14:42:52.385470 | 2016-11-04T23:37:49 | 2016-11-04T23:37:49 | 68,193,360 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,871 |
py
|
import unittest
# from pudb import set_trace; set_trace()
from extraction.extractor_beam import Transformation
from linguistics.similarity import Similarity
from utils.priority_queue import PriorityQueue
class PriorityQueueTestCase(unittest.TestCase):
def setUp(self):
self.similarity = Similarity(1.0, 'dummy', None, None)
t0_src_path = (0,)
t0_trg_path = (10,)
t0_src_subpaths = ((0, 0), (0, 1))
t0_trg_subpaths = ((10, 0), (10, 1))
self.t0 = Transformation(t0_src_path, t0_trg_path,
t0_src_subpaths, t0_trg_subpaths, self.similarity)
t1_src_path = (1,)
t1_trg_path = (11,)
t1_src_subpaths = ((1, 0), (1, 1))
t1_trg_subpaths = ((11, 0), (11, 1))
self.t1 = Transformation(t1_src_path, t1_trg_path,
t1_src_subpaths, t1_trg_subpaths, self.similarity)
t2_src_path = (2,)
t2_trg_path = (12,)
t2_src_subpaths = ((2, 0), (2, 1))
t2_trg_subpaths = ((12, 0), (12, 1))
self.t2 = Transformation(t2_src_path, t2_trg_path,
t2_src_subpaths, t2_trg_subpaths, self.similarity)
t0bis_src_path = (0,)
t0bis_trg_path = (10,)
t0bis_src_subpaths = ((3, 0), (3, 1))
t0bis_trg_subpaths = ((13, 0), (13, 1))
self.t0bis = Transformation(t0bis_src_path, t0bis_trg_path,
t0bis_src_subpaths, t0bis_trg_subpaths, self.similarity)
self.q_costs = PriorityQueue(2)
self.q_probs = PriorityQueue(2, reverse=True)
def test_RegularInsertions(self):
self.q_costs.Push(0.3, self.t0)
self.q_costs.Push(0.2, self.t1)
self.assertEqual(2, len(self.q_costs.queue))
self.assertIn(self.t0, self.q_costs.GetItems())
self.assertIn(self.t1, self.q_costs.GetItems())
self.q_probs.Push(0.3, self.t0)
self.q_probs.Push(0.2, self.t1)
self.assertEqual(2, len(self.q_probs.queue))
self.assertIn(self.t0, self.q_probs.GetItems())
self.assertIn(self.t1, self.q_probs.GetItems())
def test_InsertionsOverflow(self):
self.q_costs.Push(0.2, self.t0)
self.q_costs.Push(0.3, self.t1)
self.q_costs.Push(0.1, self.t2)
self.assertEqual(2, len(self.q_costs.queue))
self.assertIn(self.t0, self.q_costs.GetItems())
self.assertIn(self.t2, self.q_costs.GetItems())
self.assertNotIn(self.t1, self.q_costs.GetItems())
self.q_probs.Push(0.2, self.t0)
self.q_probs.Push(0.3, self.t1)
self.q_probs.Push(0.1, self.t2)
self.assertEqual(2, len(self.q_probs.queue))
self.assertIn(self.t0, self.q_probs.GetItems())
self.assertIn(self.t1, self.q_probs.GetItems())
self.assertNotIn(self.t2, self.q_probs.GetItems())
def test_InsertionsSameElementOverflow(self):
self.q_costs.Push(0.1, self.t0)
self.q_costs.Push(0.2, self.t1)
self.q_costs.Push(0.1, self.t0)
self.assertEqual(2, len(self.q_costs.queue))
self.assertIn(self.t0, self.q_costs.GetItems())
self.assertIn(self.t1, self.q_costs.GetItems())
self.q_probs.Push(0.1, self.t0)
self.q_probs.Push(0.2, self.t1)
self.q_probs.Push(0.1, self.t0)
self.assertEqual(2, len(self.q_probs.queue))
self.assertIn(self.t0, self.q_probs.GetItems())
self.assertIn(self.t1, self.q_probs.GetItems())
self.q_probs.Push(0.2, self.t0)
self.q_probs.Push(0.1, self.t1)
self.q_probs.Push(0.2, self.t0)
self.assertEqual(2, len(self.q_probs.queue))
self.assertIn(self.t0, self.q_probs.GetItems())
self.assertIn(self.t1, self.q_probs.GetItems())
def test_InsertionsCostReplacement(self):
self.q_costs.Push(0.3, self.t0)
self.q_costs.Push(0.2, self.t1)
self.q_costs.Push(0.1, self.t0)
self.assertEqual(2, len(self.q_costs.queue))
self.assertIn(self.t1, self.q_costs.GetItems())
self.assertIn(self.t0, self.q_costs.GetItems())
self.assertEqual(0.1, self.q_costs.GetBestScore())
self.assertEqual(self.t0, self.q_costs.GetBestScoreItem())
self.q_probs.Push(0.1, self.t0)
self.q_probs.Push(0.2, self.t1)
self.q_probs.Push(0.3, self.t0)
self.assertEqual(2, len(self.q_probs.queue))
self.assertIn(self.t0, self.q_probs.GetItems())
self.assertIn(self.t1, self.q_probs.GetItems())
self.assertEqual(0.3, self.q_probs.GetBestScore())
self.assertEqual(self.t0, self.q_probs.GetBestScoreItem())
"""
def test_InsertionsCostReplacementSimilarItem(self):
self.q_costs.Push(0.3, self.t0)
self.q_costs.Push(0.2, self.t1)
self.q_costs.Push(0.1, self.t0bis)
self.assertEqual(2, len(self.q_costs.queue))
self.assertIn(self.t1, self.q_costs.GetItems())
self.assertNotEqual(self.t0.src_subpaths,
self.q_costs.GetBestScoreItem().src_subpaths)
self.assertEqual(self.t0bis.src_subpaths,
self.q_costs.GetBestScoreItem().src_subpaths)
self.q_probs.Push(0.3, self.t0)
self.q_probs.Push(0.2, self.t1)
self.q_probs.Push(0.4, self.t0bis)
self.assertEqual(2, len(self.q_probs.queue))
self.assertIn(self.t1, self.q_probs.GetItems())
self.assertNotEqual(self.t0.src_subpaths,
self.q_probs.GetBestScoreItem().src_subpaths)
self.assertEqual(self.t0bis.src_subpaths,
self.q_probs.GetBestScoreItem().src_subpaths)
"""
def test_InsertionsCostNotReplacementSimilarItem(self):
self.q_costs.Push(0.1, self.t0)
self.q_costs.Push(0.2, self.t1)
self.q_costs.Push(0.4, self.t0bis)
self.assertEqual(2, len(self.q_costs.queue))
self.assertIn(self.t1, self.q_costs.GetItems())
self.assertEqual(self.t0.src_subpaths,
self.q_costs.GetBestScoreItem().src_subpaths)
self.assertNotEqual(self.t0bis.src_subpaths,
self.q_costs.GetBestScoreItem().src_subpaths)
self.q_probs.Push(0.2, self.t0)
self.q_probs.Push(0.1, self.t1)
self.q_probs.Push(0.0, self.t0bis)
self.assertEqual(2, len(self.q_probs.queue))
self.assertIn(self.t1, self.q_probs.GetItems())
self.assertEqual(self.t0.src_subpaths,
self.q_probs.GetBestScoreItem().src_subpaths)
self.assertNotEqual(self.t0bis.src_subpaths,
self.q_probs.GetBestScoreItem().src_subpaths)
def test_FilterPreservesHeapStructure(self):
big_q = PriorityQueue(3)
big_q.min_threshold = 5
big_q.Push(5.0, self.t1)
big_q.Push(0.1, self.t0)
big_q.Push(0.2, self.t2)
self.assertEqual(3, len(big_q.queue))
big_q.FilterCache()
self.assertEqual(2, len(big_q.queue))
self.assertIn(self.t0, big_q.GetItems())
self.assertIn(self.t2, big_q.GetItems())
self.assertEqual(max(big_q.queue, key=lambda x: abs(x[0])), big_q.queue[0])
if __name__ == '__main__':
suite1 = unittest.TestLoader().loadTestsFromTestCase(PriorityQueueTestCase)
suites = unittest.TestSuite([suite1])
unittest.TextTestRunner(verbosity=2).run(suites)
|
[
"pasmargo@users.noreply.github.com"
] |
pasmargo@users.noreply.github.com
|
f24f3df3ccad16e8d375931cc68915706b7789d7
|
4e264176ebbd27996275d80b9a07e0957960546d
|
/apps/zones/migrations/0013_alter_profile_role.py
|
01ccbc927122427a6cb206e232fb9b798c83d165
|
[
"MIT"
] |
permissive
|
Naz758/GITS-zones
|
53458899ae2f8313363bd1d85000aeec5ddfc2fa
|
d4bf07ba72464c79c0130814ac764c5af50b7739
|
refs/heads/main
| 2023-03-31T01:57:44.011669 | 2021-04-06T23:45:57 | 2021-04-06T23:45:57 | 355,206,125 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 472 |
py
|
# Generated by Django 3.2 on 2021-04-06 22:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('zones', '0012_alter_profile_ext'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='role',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='zones.role'),
),
]
|
[
"swelanauguste@gmail.com"
] |
swelanauguste@gmail.com
|
1a770f79fd81c17269f4ed63636862fb554d30ca
|
0f205fa73d927a15e27f065c6a198935f90d3ada
|
/src/pycones/proposals/migrations/0001_initial.py
|
50994be6deb17fc6ba0539338afcce03c1f8e433
|
[] |
no_license
|
python-spain/web-pycones
|
b27bfb630cb6eafb8e1a5aadfa7b35368f81325a
|
942516169738689f542b0856842372088f34fc2f
|
refs/heads/2020
| 2023-03-30T06:01:30.809205 | 2020-03-23T22:08:27 | 2020-03-23T22:08:27 | 80,434,627 | 3 | 4 | null | 2021-04-08T20:55:32 | 2017-01-30T15:36:49 |
CSS
|
UTF-8
|
Python
| false | false | 4,690 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-31 12:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import markupfield.fields
import model_utils.fields
import taggit_autosuggest.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('speakers', '0001_initial'),
('taggit', '0002_auto_20150616_2121'),
]
operations = [
migrations.CreateModel(
name='Proposal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('audience_level', models.CharField(choices=[('basic', 'Básico'), ('intermediate', 'Intermedio'), ('advanced', 'Avanzado')], default='basic', max_length=32, null=True, verbose_name='Nivel de la audiencia')),
('language', models.CharField(choices=[('es', 'Español'), ('en', 'Inglés')], default='es', max_length=2, verbose_name='Idioma')),
('duration', models.PositiveIntegerField(blank=True, choices=[(15, '15 minutos'), (30, '30 minutos')], default=30, null=True, verbose_name='Duración')),
('title', models.CharField(max_length=100, verbose_name='Título')),
('description', models.TextField(help_text='Si tu propuesta se acepta esto se hará público, y se incluirá en el programa. Debería ser un párrafo, con un máximo de 500 caracteres.', max_length=500, verbose_name='Breve descripción')),
('abstract', markupfield.fields.MarkupField(blank=True, default='', help_text="Resumen detallado. Se hará pública si la propuesta se acepta. Edita usando <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", rendered_field=True, verbose_name='Resumen detallado')),
('abstract_markup_type', models.CharField(choices=[('', '--'), ('markdown', 'markdown')], default='markdown', max_length=30)),
('additional_notes', markupfield.fields.MarkupField(blank=True, default='', help_text="Cualquier cosa que te gustaría hacer saber a los revisores para que la tengan en cuenta al ahora de hacer la selección. Esto no se hará público. Edita usando <a href='http://daringfireball.net/projects/markdown/basics' target='_blank'>Markdown</a>.", rendered_field=True, verbose_name='Notas adicionales')),
('_abstract_rendered', models.TextField(editable=False)),
('additional_notes_markup_type', models.CharField(choices=[('', '--'), ('markdown', 'markdown')], default='markdown', max_length=30)),
('cancelled', models.BooleanField(default=False)),
('_additional_notes_rendered', models.TextField(editable=False)),
('notified', models.BooleanField(default=False)),
('accepted', models.NullBooleanField(default=None, verbose_name='Aceptada')),
('accepted_notified', models.BooleanField(default=False, verbose_name='Notificación de aceptación enviada')),
('code', models.CharField(blank=True, max_length=64, null=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProposalKind',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Name')),
('slug', models.SlugField()),
],
),
migrations.AddField(
model_name='proposal',
name='kind',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='proposals.ProposalKind', verbose_name='Tipo de propuesta'),
),
migrations.AddField(
model_name='proposal',
name='speakers',
field=models.ManyToManyField(related_name='proposals', to='speakers.Speaker'),
),
migrations.AddField(
model_name='proposal',
name='tags',
field=taggit_autosuggest.managers.TaggableManager(blank=True, help_text='Lista de etiquetas separadas por comas.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Etiquetas'),
),
]
|
[
"marcosgabarda@gmail.com"
] |
marcosgabarda@gmail.com
|
da1b0bc7a7be8eda1f40832ad6927fe1e2952394
|
b566710c45a83b9ec86b8617b8816b16871bc3e7
|
/convertAllCSVsToGs.py
|
1dcd1cc32b39eca3058f2b64ef73398b435ba434
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
okfde/2030-watch-dataprocessing
|
76221d849220a210b3219f588ee6c22188fb4a4a
|
40b1950e324fa23850515e2bec33eee90e46a201
|
refs/heads/master
| 2021-07-09T17:06:26.369133 | 2020-11-03T20:25:33 | 2020-11-03T20:25:33 | 64,477,170 | 1 | 2 |
NOASSERTION
| 2021-04-30T20:36:55 | 2016-07-29T11:57:39 |
Python
|
UTF-8
|
Python
| false | false | 3,754 |
py
|
import httplib2
import os
import unicodecsv as csv
import json
import StringIO
from apiclient import discovery, errors
import oauth2client
from oauth2client import client
from oauth2client import tools
from time import sleep
try:
import argparse
flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()
except ImportError:
flags = None
# If modifying these scopes, delete your previously saved credentials
# at ~/.credentials/drive-python-quickstart.json
SCOPES = 'https://www.googleapis.com/auth/drive'
CLIENT_SECRET_FILE = 'client_secret.json'
APPLICATION_NAME = 'Drive API Python Quickstart'
def getValueGeneric(stringvalue):
try:
val = int(stringvalue)
return val
except ValueError:
try:
val = float(stringvalue)
return val
except ValueError:
val = stringvalue
return val
def get_credentials():
"""Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
home_dir = os.path.expanduser('~')
credential_dir = os.path.join(home_dir, '.credentials')
if not os.path.exists(credential_dir):
os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir,
'drive-python-quickstart.json')
store = oauth2client.file.Storage(credential_path)
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
if flags:
credentials = tools.run_flow(flow, store, flags)
else: # Needed only for compatibility with Python 2.6
credentials = tools.run(flow, store)
print('Storing credentials to ' + credential_path)
return credentials
def get_csv_data(service, folder_id):
"""Print files belonging to a folder.
Args:
service: Drive API service instance.
folder_id: ID of the folder to print files from.
"""
page_token = None
while True:
try:
param = {}
if page_token:
param['pageToken'] = page_token
children = service.children().list(
folderId=folder_id, **param).execute()
items = children.get('items', [])
allcsvs = []
counter = 0
for item in items:
filedata = {}
files_resource = service.files().get(fileId=item['id']).execute()
filedata['name'] = files_resource['title']
counter += 1
if files_resource['mimeType'] == u"text/csv":
print 'Updating ' + str(counter) + ' of ' + str(len(items))
body = {'mimeType': 'application/vnd.google-apps.spreadsheet'}
files_resource = service.files().copy(fileId=item['id'],body=body, convert=True, **param).execute()
else:
print 'Skipping ' + str(counter) + ' of ' + str(len(items)) + '- not a CSV file'
page_token = children.get('nextPageToken')
if page_token == None:
print "Finished converting CSVs to GS in Google Drive folder"
return allcsvs
except errors.HttpError, error:
break
def main():
"""Shows basic usage of the Google Drive API.
Creates a Google Drive API service object and outputs the names and IDs
for up to 10 files.
"""
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
service = discovery.build('drive', 'v2', http=http)
get_csv_data(service, "0B06K0pSAyW1gMi11dk1tdUp6Ylk")
if __name__ == '__main__':
main()
|
[
"matt.fullerton@gmail.com"
] |
matt.fullerton@gmail.com
|
bad7021cc9cd22822dc285a2e2ac2f3fd1bcccf0
|
6861a04db56dd89b774aa933e9604b741da092b3
|
/NeighbourhoodOverlap/NeighbourhoodOverlap/NeighbourhoodOverlap.py
|
4ae2775d1acb2e1af5a9547f15aa376de64e4d3f
|
[] |
no_license
|
nayangondaliya/NetworkTheory
|
48ed9a5977d231c5fbb8fedd674dbf1d683628f1
|
0edcb703abe834dbe21b2498a2bc7164511e3dd1
|
refs/heads/main
| 2023-06-19T06:52:26.992021 | 2021-07-21T15:22:42 | 2021-07-21T15:22:42 | 387,580,955 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,309 |
py
|
#get matrix
def getMatrix(row, column, initialiser):
matrix = []
for i in range(row):
row = []
for j in range(column):
row.append(initialiser)
matrix.append(row)
return matrix
#generate adjacency matrix
def generateMatrix(fileName, vertices, totalVertices):
matrix = getMatrix(totalVertices, totalVertices, 0)
file = open(fileName, "r")
for line in file:
if line:
node_detail = line.split("=")
node = node_detail[0].strip()
edge_list = node_detail[1].split(",")
node_index = vertices.index(node)
for edge in edge_list:
edge = edge.strip()
edge_index = vertices.index(edge)
matrix[node_index][edge_index] = 1
matrix[edge_index][node_index] = 1 #graph is undirected
return matrix
#get node connection
def getConnection(graph, node, relatenode, vertices):
result = []
edges = graph[vertices.index(node)]
relateNodeIndex = vertices.index(relatenode)
if edges[relateNodeIndex] == 0:
return result
for edge in range(len(edges)):
if edges[edge] == 1 and edge != relateNodeIndex:
result.append(vertices[edge])
return result
vertices = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n"]
#totalVertices = 12
#graph = generateMatrix("graph.txt", vertices, totalVertices)
#overlap = [{"e":"f"}, {"g":"l"}, {"j":"k"}, {"f":"g"}]
totalVertices = 14
graph = generateMatrix("slide.txt", vertices, totalVertices)
overlap = [{"a":"b"}, {"l":"m"}, {"c":"i"}, {"d":"e"}]
for pair in overlap:
for key in pair:
f_edges = getConnection(graph, key, pair[key], vertices)
s_edges = getConnection(graph, pair[key], key, vertices)
common = list(set(f_edges).intersection(s_edges))
c_common = len(common)
t_edges = len(f_edges) + len(s_edges)
if c_common > 0 or t_edges > 0:
c_overlap = round(c_common / (t_edges - c_common), 2)
print(f'Overlap between {key} and {pair[key]} is: {c_overlap}')
else:
print(f'Overlap between {key} and {pair[key]} is: -1')
|
[
"noreply@github.com"
] |
nayangondaliya.noreply@github.com
|
3601271129a1c739e30ecb04126dea97e0c24895
|
4d9b2420ca30795c328df58ec87ae346f873e059
|
/EqSolver/eqsolver.py
|
a858e6b1dc872947fca926aa4ddb7d77d3babda8
|
[] |
no_license
|
CCALITA/CNNthings
|
1f196d304daed9e819ee55cbe099ff665b7278ca
|
c75ce0844219004bcfe307b4cfd17df04a657df5
|
refs/heads/master
| 2021-02-18T16:33:36.260238 | 2020-06-30T12:56:39 | 2020-06-30T12:56:39 | 245,213,548 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 398 |
py
|
# import numpy as np
# x=np.roots(input().split(' '))
# if np.iscomplex(x[0]):print("no real solution")
# else:print(x)
import tensorflow as tf
import os
import threading
os.environ['TF_CPP_LOG_LEVEL']='2'
# 这个op,作为一个node,添加到graph中
hello = tf.constant('122!')
# 启动TF进程(session)
sess = tf.compat.v1.Session()
# 运行op,并输出结果
print(sess.run(hello))
|
[
"32668177+kirakura@users.noreply.github.com"
] |
32668177+kirakura@users.noreply.github.com
|
81d476ae8ca741ee79612982b6649f6987bd6dd6
|
8339f8990c366f76043c420f0da269661e588229
|
/hub_template.py
|
b076f178cbdebd102e11aa1a115cb3a49d69fe50
|
[] |
no_license
|
davepkennedy/service-templates
|
293ead21df70819f3e2891207da0ce6e60ed39f5
|
64c65442373cdb8e1431af3e74ae6f3367e4fd86
|
refs/heads/master
| 2020-03-30T10:56:30.977075 | 2018-10-04T21:03:44 | 2018-10-04T21:03:44 | 151,144,967 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,262 |
py
|
from github import Github
import json
class GitTemplates(object):
__slots__ = ['client', 'user']
def __init__(self, user):
self.client = Github()
self.user = user
def find_templates(self, repo='service-templates'):
user_data = self.client.get_user(self.user)
repos = user_data.get_repos()
l = list(filter(lambda x: x.name == repo, repos))
if len(l):
return Templates(l[0])
return Templates(False)
class Templates(object):
__slots__ = ['repo']
def __init__ (self, repo):
self.repo = repo
def list_templates(self):
if self.repo:
return json.loads(self.repo.get_contents('manifest.json').decoded_content)
return []
def load_template(self, template):
if self.repo:
return Template(self.repo,
json.loads(self.repo.get_contents(template + '/manifest.json').decoded_content))
return Template(False,False)
class Template(object):
__slots__ = ['repo', 'manifest']
def __init__ (self, repo, manifest):
self.repo = repo
self.manifest = manifest
def dependencies(self):
return self.manifest['dependencies']
def fetch(self):
pass
|
[
"noreply@github.com"
] |
davepkennedy.noreply@github.com
|
a8c0c33925a46eaf12503d5bc1b49d8df02e7c13
|
0f79df41295eca76c63acab672f21610beaf33cc
|
/main/queries.py
|
0034e2c167049612c661c891042cb379c97e6234
|
[] |
no_license
|
kusalkumar/cli-app
|
8504dbe876787325e3acf910ecb04458bea00db0
|
3d1448c012cb191f2db38e57a97185dd2ed2110b
|
refs/heads/main
| 2023-04-15T05:13:37.570202 | 2021-04-24T06:08:21 | 2021-04-24T06:08:21 | 360,888,642 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,462 |
py
|
"""
module where all the project specific functions available
"""
import json
import requests
import params
def get_token():
"""
To get latest token to make different api requests
Parameters
----------
None
Returns
-------
True, token / False, error
"""
try:
url = params.BASE_URL
payload={}
headers = {}
response = requests.request("GET", url, headers=headers, data=payload)
response = json.loads(response.text)
base_url = response.get(params.CONTEXT)
token = base_url.split("/")[-2]
return (True, token)
except Exception as e:
return (False, str(e))
def get_entity_url(token, entity_id):
"""
To get api url to get the entity information
Parameters
----------
token, str
token to get authenticated
entity_id: str,
entity to which we need to get info
Returns
-------
True, url / False, error
"""
try:
api_url = params.GET_ENTITY_URL.format(token, entity_id)
return (True, api_url)
except Exception as e:
return (False, str(e))
def get_filter_fname_url(token, firstname):
"""
To get api url to filter by firstname
Parameters
----------
token, str
token to get authenticated
firstname: str,
firstname to get all the entry with that specific firstname
Returns
True, url / False, error
"""
try:
api_url = params.FILTER_BY_FNAME_URL.format(token, firstname)
return (True, api_url)
except Exception as e:
return (False, str(e))
def get_filter_gender_url(token, gender):
"""
To get api url to filter by gender
Parameters
----------
token, str
token to get authenticated
gender: str,
gender to get all the entry with that specific gender
Returns
True, url / False, error
"""
try:
api_url = params.FILTER_BY_GENDER_URL.format(token, gender)
return (True, api_url)
except Exception as e:
return (False, str(e))
def get_create_entity_url(token):
"""
To get api url to create new entity
Parameters
----------
token, str
token to get authenticated
Returns
True, url / False, error
"""
try:
api_url = params.CREATE_ENTITY_URL.format(token)
return (True, api_url)
except Exception as e:
return (False, str(e))
|
[
"kusal@criterionnetworks.com"
] |
kusal@criterionnetworks.com
|
94aa51e0ae62adb24339c6fea1a38ccd7b23a9e5
|
6500ebe0bf75ce2b639b3545b64647a19e948491
|
/Answers/admin.py
|
f0cf58af87aa885d1ff7fd78ac3ce606ffdd680b
|
[] |
no_license
|
IreneSaenko/PIFAGOR
|
619d1a971632c75e03bfdc7d13eb94e0e0de17b1
|
ac67d5daf132ec1c476919031649d3796085623c
|
refs/heads/master
| 2020-06-11T03:54:59.383701 | 2019-06-26T07:08:27 | 2019-06-26T07:08:27 | 193,839,473 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 98 |
py
|
from django.contrib import admin
from Answers.models import Answers
admin.site.register(Answers)
|
[
"taka.ir@yandex.ru"
] |
taka.ir@yandex.ru
|
84f88719bf89642be3764e8047d815444457cb74
|
8a1944e7b8ff2bb02476003af5aaf06533ce7a57
|
/五一策略.py
|
e35436e087d91d17b3fccf6e7bfb39824c7ab8f8
|
[] |
no_license
|
wzt9332/-jqData-
|
ad86220d4b67620eece10e0c63bc457d81031ccb
|
3808b8e9e6f16156d6ffd7e05a148e49d5910f4e
|
refs/heads/master
| 2022-11-20T09:36:47.732176 | 2020-07-26T05:16:27 | 2020-07-26T05:16:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 9,643 |
py
|
# 克隆自聚宽文章:https://www.joinquant.com/post/25496
# 标题:收益狂飙,年化收益100%,11年1700倍,绝无未来函数
# 作者:jqz1226
# 导入函数库
from jqdata import *
# 初始化函数,设定基准等等
def initialize(context):
# 设定沪深300作为基准
set_benchmark('000300.XSHG')
# 开启动态复权模式(真实价格)
set_option('use_real_price', True)
# 输出内容到日志 log.info()002825002825002825002825
log.info('初始函数开始运行且全局只运行一次')
# 过滤掉order系列API产生的比error级别低的log
log.set_level('order', 'error')
# 股票池
g.security_universe_index = "399101.XSHE" # 中小板
g.buy_stock_count = 70
# 筛选出的股票
g.check_out_lists = []
g.days = 30
### 股票相关设定 ###
# 股票类每笔交易时的手续费是:买入时佣金万分之三,卖出时佣金万分之三加千分之一印花税, 每笔交易佣金最低扣5块钱
set_order_cost(OrderCost(close_tax=0.001, open_commission=0.0003, close_commission=0.0003, min_commission=5),
type='stock')
# run_daily(my_trade, time='before_open', reference_security='000852.XSHG')
## 运行函数(reference_security为运行时间的参考标的;传入的标的只做种类区分,因此传入'000300.XSHG'或'510300.XSHG'是一样的)
# 定时运行
# run_daily(my_trade, time='14:40', reference_security='000300.XSHG')
# 收盘后运行
run_daily(after_market_close, time='after_close', reference_security='000300.XSHG')
## 开盘时运行函数
def my_trade(context):
# 选取中小板中市值最小的若干只
check_out_lists = get_index_stocks(g.security_universe_index)
q = query(valuation.code).filter(
valuation.code.in_(check_out_lists)
).order_by(
valuation.circulating_market_cap.asc()
).limit(
10 * 30
)
check_out_lists = list(get_fundamentals(q).code)
# 过滤: 三停(停牌、涨停、跌停)及st,*st,退市
check_out_lists = filter_st_stock(check_out_lists)
check_out_lists = filter_limitup_stock(context, check_out_lists)
check_out_lists = filter_limitdown_stock(context, check_out_lists)
check_out_lists = filter_paused_stock(check_out_lists)
check_out_lists = filter_old_stock(context,check_out_lists)
check_out_lists = Score(check_out_lists)
# 取需要的只数
check_out_lists = check_out_lists[:g.buy_stock_count]
log.info('股票列表:' + str(check_out_lists))
return check_out_lists
# 买卖
# adjust_position(context, check_out_lists)
## 收盘后运行函数
def after_market_close(context):
log.info(str('函数运行时间(after_market_close):' + str(context.current_dt.time())))
# 得到当天所有成交记录
trades = get_trades()
for _trade in trades.values():
log.info('成交记录:' + str(_trade))
log.info('一天结束')
log.info('##############################################################')
# 自定义下单
# 根据Joinquant文档,当前报单函数都是阻塞执行,报单函数(如order_target_value)返回即表示报单完成
# 报单成功返回报单(不代表一定会成交),否则返回None
def order_target_value_(security, value):
if value == 0:
log.debug("Selling out %s" % (security))
else:
log.debug("Order %s to value %f" % (security, value))
# 如果股票停牌,创建报单会失败,order_target_value 返回None
# 如果股票涨跌停,创建报单会成功,order_target_value 返回Order,但是报单会取消
# 部成部撤的报单,聚宽状态是已撤,此时成交量>0,可通过成交量判断是否有成交
return order_target_value(security, value)
# 开仓,买入指定价值的证券
# 报单成功并成交(包括全部成交或部分成交,此时成交量大于0),返回True
# 报单失败或者报单成功但被取消(此时成交量等于0),返回False
def open_position(security, value):
order = order_target_value_(security, value)
if order != None and order.filled > 0:
return True
return False
# 平仓,卖出指定持仓
# 平仓成功并全部成交,返回True
# 报单失败或者报单成功但被取消(此时成交量等于0),或者报单非全部成交,返回False
def close_position(position):
security = position.security
order = order_target_value_(security, 0) # 可能会因停牌失败
if order != None:
if order.status == OrderStatus.held and order.filled == order.amount:
return True
return False
# 布林线策略
def Bolling(context,current_price,stock):
#用price保存days天的股票收盘价
price=history(g.days,'1d','close',stock)
#转成array方便数据处理
price=np.array(price)
#计算中轨线
middle=price.sum()/g.days
#计算标准差
std=np.std(price)
#计算上轨线
up=middle+2*std
#计算下轨线
down=middle-2*std
#如果股价跌破下轨线则买入
if current_price<down:
return 1
#如果突破上轨线则卖出
elif current_price>up:
return -1
# 每个单位时间(如果按天回测,则每天调用一次,如果按分钟,则每分钟调用一次)调用一次
# 交易
def handle_data(context, data):
stockList = my_trade(context)
# stockList.extend(context.portfolio.positions)
# stockList = list(set(stockList))
buyStock = []
for stock in context.portfolio.positions:
current_price=data[stock].price
if Bolling(context,current_price,stock) == -1:
position = context.portfolio.positions[stock]
close_position(position)
for stock in stockList:
if context.portfolio.positions[stock].total_amount == 0:
current_price=data[stock].price
if Bolling(context,current_price,stock) == 1:
buyStock.append(stock)
position_count = len(buyStock)
print(buyStock,'筛选出来的股票')
if position_count > 0:
# value = context.portfolio.cash / position_count
for stock in buyStock:
if context.portfolio.positions[stock].total_amount == 0:
open_position(stock, 10000)
# def adjust_position(context, buy_stocks):
# for stock in context.portfolio.positions:
# if stock not in buy_stocks:
# log.info("stock [%s] in position is not buyable" % (stock))
# position = context.portfolio.positions[stock]
# close_position(position)
# else:
# log.info("stock [%s] is already in position" % (stock))
# # 根据股票数量分仓
# # 此处只根据可用金额平均分配购买,不能保证每个仓位平均分配
# position_count = len(context.portfolio.positions)
# if g.buy_stock_count > position_count:
# value = context.portfolio.cash / (g.buy_stock_count - position_count)
# for stock in buy_stocks:
# if context.portfolio.positions[stock].total_amount == 0:
# if open_position(stock, value):
# if len(context.portfolio.positions) == g.buy_stock_count:
# break
# 过滤停牌股票
def filter_paused_stock(stock_list):
current_data = get_current_data()
return [stock for stock in stock_list if not current_data[stock].paused]
# 取小数点函数
def get_two_float(f_str, n):
f_str = str(f_str) # f_str = '{}'.format(f_str) 也可以转换为字符串
a, b, c = f_str.partition('.')
c = (c+"0"*n)[:n] # 如论传入的函数有几位小数,在字符串后面都添加n为小数0
return ".".join([a, c])
# 取120均线斜率
def MA120_xielv(security):
array_bars = get_bars(security, 121, '1d', fields=['close'], include_now =True)
ma_now = array_bars[1:120]
ma_last = array_bars[0:120]
k_ma = np.mean(ma_now['close'])/np.mean(ma_last['close'])
return get_two_float(k_ma,2) == '1.00'
# 过滤斜率为1.00的股票
def Score(stock_list):
newlist = filter(MA120_xielv, stock_list)
return list(newlist)
# 过滤上市没有超过120天的股票
def filter_old_stock(context, stock_list):
tmpList = []
for stock in stock_list :
days_public = len(get_trade_days(get_security_info(stock).start_date, context.current_dt.date(), count=None))
# days_public=(context.current_dt.date() - get_security_info(stock).start_date).days
# 上市未超过1年
if days_public >= 120:
tmpList.append(stock)
return tmpList
# 过滤ST及其他具有退市标签的股票
def filter_st_stock(stock_list):
current_data = get_current_data()
return [stock for stock in stock_list
if not current_data[stock].is_st
and 'ST' not in current_data[stock].name
and '*' not in current_data[stock].name
and '退' not in current_data[stock].name]
# 过滤涨停的股票
def filter_limitup_stock(context, stock_list):
last_prices = history(1, unit='1m', field='close', security_list=stock_list)
current_data = get_current_data()
# 已存在于持仓的股票即使涨停也不过滤,避免此股票再次可买,但因被过滤而导致选择别的股票
return [stock for stock in stock_list if stock in context.portfolio.positions.keys()
or last_prices[stock][-1] < current_data[stock].high_limit]
# return [stock for stock in stock_list if stock in context.portfolio.positions.keys()
# or last_prices[stock][-1] < current_data[stock].high_limit * 0.995]
# 过滤跌停的股票
def filter_limitdown_stock(context, stock_list):
last_prices = history(1, unit='1m', field='close', security_list=stock_list)
current_data = get_current_data()
return [stock for stock in stock_list if stock in context.portfolio.positions.keys()
or last_prices[stock][-1] > current_data[stock].low_limit]
|
[
"910933955@qq.com"
] |
910933955@qq.com
|
643ff60586427861267b2eb9c8e880763094d83e
|
4609ee89172d6f5f0b0bb59faf13f67f8a4bad28
|
/gclient/mark_as_read.py
|
21fb6040a8bf72d2e955bbfe3a497187132b5985
|
[] |
no_license
|
QuentinDuval/GmailClient
|
82cf53f4d412280af608b9d90d50eded75b393e1
|
c0a69fe75d22d1ddd932de16107d799473c68e6b
|
refs/heads/master
| 2020-06-10T21:17:02.591884 | 2019-06-25T17:09:40 | 2019-06-25T17:09:40 | 193,750,874 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,861 |
py
|
from __future__ import print_function
from gclient.authentication import *
from googleapiclient.discovery import build
from typing import List
class Classification:
def __init__(self):
self.creds = get_credentials()
self.service = build('gmail', 'v1', credentials=self.creds)
def get_all_labels(self) -> List[str]:
"""
Returns all the labels used to classify mails
"""
results = self.service.users().labels().list(userId='me').execute()
return list(results.get('labels', []))
def list_unread_messages(self, batch_size=500):
"""
Query GMAIL API to get the list of messages matching the "is unread" criteria
"""
answer = self.service.users().messages().list(userId='me', q='is:unread', maxResults=batch_size).execute()
while answer['messages']:
yield answer
if 'nextPageToken' not in answer:
break
next_page_token = answer['nextPageToken']
answer = self.service.users().messages().list(userId='me', pageToken=next_page_token).execute()
def mark_as_read(self, message_ids: List[str]):
"""
Ask the GMAIL API to mark as "read" all the messages given as parameters
"""
return self.service.users().messages().batchModify(userId='me', body={
"removeLabelIds": ["UNREAD"],
"ids": message_ids,
"addLabelIds": []
}).execute()
def mark_all_as_read(self):
for answer in self.list_unread_messages():
message_ids = [message['id'] for message in answer['messages']]
print("Marked", message_ids)
self.mark_as_read(message_ids)
if __name__ == '__main__':
classifier = Classification()
print(classifier.get_all_labels())
print(classifier.mark_all_as_read())
|
[
"senmenty@gmail.com"
] |
senmenty@gmail.com
|
2f204f0ae40f767e8123b70b8bde66c2de447642
|
59a27d1244c372d72e2124e634330a62a40d504b
|
/aoc_21/05.py
|
a05bbe7afbea6b204f9021473b59745d3fa5ef85
|
[] |
no_license
|
rliffredo/advent-of-code
|
2037813d6765a2c650c240190267c944e4d5148d
|
3dccb51e252d0e0cc2f627476db69a38be7686de
|
refs/heads/master
| 2022-12-22T05:02:14.708180 | 2022-12-15T08:44:58 | 2022-12-15T08:44:58 | 160,335,685 | 0 | 0 | null | 2021-05-13T21:07:08 | 2018-12-04T09:53:42 |
Python
|
UTF-8
|
Python
| false | false | 1,953 |
py
|
import re
from collections import defaultdict
from common import read_data, print_map
def parse_data():
raw_line_defs = read_data("05", True)
def_pattern = re.compile(r"(\d+),(\d+) -> (\d+),(\d+)")
line_defs = [def_pattern.match(raw_def).groups() for raw_def in raw_line_defs]
int_line_defs = [[int(n) for n in line_def] for line_def in line_defs]
return [((p[0], p[1]), (p[2], p[3])) for p in int_line_defs]
def draw_lines(hv_lines_defs, include_diagonal, print_result):
wind_map = defaultdict(int)
for (start_x, start_y), (end_x, end_y) in hv_lines_defs:
delta_x = end_x - start_x
step_x = 1 if delta_x > 0 else -1
delta_y = end_y - start_y
step_y = 1 if delta_y > 0 else -1
if delta_x == 0 or delta_y == 0:
for x in range(0, delta_x + step_x, step_x):
for y in range(0, delta_y + step_y, step_y):
wind_map[(start_x + x, start_y + y)] += 1
if include_diagonal and abs(delta_x) == abs(delta_y):
for n in range(abs(delta_y) + 1):
wind_map[(start_x + n * step_x, start_y + n * step_y)] += 1
if print_result:
map_sizes = (0, max(c[0] for c in wind_map), 0, max(c[1] for c in wind_map))
print_map(map_sizes, lambda x, y: str(wind_map[(x, y)]).replace("0", "."))
return wind_map
def part_1(print_result: bool = True) -> int:
line_defs = parse_data()
wind_map = draw_lines(line_defs, False, False)
points_with_strong_winds = len([p for p in wind_map.values() if p >= 2])
return points_with_strong_winds
def part_2(print_result: bool = True) -> int:
line_defs = parse_data()
wind_map = draw_lines(line_defs, True, print_result)
points_with_strong_winds = len([p for p in wind_map.values() if p >= 2])
return points_with_strong_winds
SOLUTION_1 = 6710
SOLUTION_2 = 20121
if __name__ == "__main__":
print(part_1())
print(part_2())
|
[
"rliffredo@outlook.com"
] |
rliffredo@outlook.com
|
d4c912b0c3a10a426fe1bfd18cdb628d0d4b164b
|
5e3c74f9b4e8a689a0d558d3958eec54d96eda6a
|
/auto_ml/utils_ensembling.py
|
35ce35936f3eb5c72e4e1139f7b467b4f54af142
|
[
"MIT"
] |
permissive
|
Asdil/auto_ml
|
cff33d953aad8bec7e980d805587c8332782202a
|
1097e4f6417806b1251c61f63c7d9a330e0ddca9
|
refs/heads/master
| 2021-03-22T05:08:02.308109 | 2018-01-21T23:29:17 | 2018-01-21T23:29:17 | 119,356,650 | 1 | 0 | null | 2018-01-29T08:49:45 | 2018-01-29T08:49:45 | null |
UTF-8
|
Python
| false | false | 6,913 |
py
|
import os
import numpy as np
import pandas as pd
import pathos
from sklearn.base import BaseEstimator, TransformerMixin
class Ensembler(BaseEstimator, TransformerMixin):
def __init__(self, ensemble_predictors, type_of_estimator, ensemble_method='average', num_classes=None):
self.ensemble_predictors = ensemble_predictors
self.type_of_estimator = type_of_estimator
self.ensemble_method = ensemble_method
self.num_classes = num_classes
# ################################
# Get a dataframe that is all the predictions from all the sub-models
# ################################
# Note that we will get these predictions in parallel (relatively quick)
def get_all_predictions(self, X):
def get_predictions_for_one_estimator(estimator, X):
estimator_name = estimator.name
if self.type_of_estimator == 'regressor':
predictions = estimator.predict(X)
else:
# For classifiers
predictions = list(estimator.predict_proba(X))
return_obj = {estimator_name: predictions}
return return_obj
# Don't bother parallelizing if this is a single dictionary
if X.shape[0] == 1:
predictions_from_all_estimators = map(lambda predictor: get_predictions_for_one_estimator(predictor, X), self.ensemble_predictors)
else:
# Open a new multiprocessing pool
pool = pathos.multiprocessing.ProcessPool()
# Since we may have already closed the pool, try to restart it
try:
pool.restart()
except AssertionError as e:
pass
# Pathos doesn't like datasets beyond a certain size. So fall back on single, non-parallel predictions instead.
# try:
if os.environ.get('is_test_suite', False) == 'True':
predictions_from_all_estimators = map(lambda predictor: get_predictions_for_one_estimator(predictor, X), self.ensemble_predictors)
else:
predictions_from_all_estimators = pool.map(lambda predictor: get_predictions_for_one_estimator(predictor, X), self.ensemble_predictors)
# except:
# predictions_from_all_estimators = map(lambda predictor: get_predictions_for_one_estimator(predictor, X), self.ensemble_predictors)
# predictions_from_all_estimators = list(predictions_from_all_estimators)
# Once we have gotten all we need from the pool, close it so it's not taking up unnecessary memory
pool.close()
try:
pool.join()
except AssertionError:
pass
predictions_from_all_estimators = list(predictions_from_all_estimators)
results = {}
for result_dict in predictions_from_all_estimators:
results.update(result_dict)
# if this is a single row we are getting predictions from, just return a dictionary with single values for all the predictions
if X.shape[0] == 1:
return results
else:
predictions_df = pd.DataFrame.from_dict(results, orient='columns')
return predictions_df
def fit(self, X, y):
return self
# ################################
# Public API to get a single prediction from each row, where that single prediction is somehow an ensemble of all our trained subpredictors
# ################################
def predict(self, X):
predictions = self.get_all_predictions(X)
# If this is just a single dictionary we're getting predictions from:
if X.shape[0] == 1:
# predictions is just a dictionary where all the values are the predicted values from one of our subpredictors. we'll want that as a list
predicted_vals = list(predictions.values())
if self.ensemble_method == 'median':
return np.median(predicted_vals)
elif self.ensemble_method == 'average' or self.ensemble_method == 'mean' or self.ensemble_method == 'avg':
return np.average(predicted_vals)
elif self.ensemble_method == 'max':
return np.max(predicted_vals)
elif self.ensemble_method == 'min':
return np.min(predicted_vals)
else:
if self.ensemble_method == 'median':
return predictions.apply(np.median, axis=1).values
elif self.ensemble_method == 'average' or self.ensemble_method == 'mean' or self.ensemble_method == 'avg':
return predictions.apply(np.average, axis=1).values
elif self.ensemble_method == 'max':
return predictions.apply(np.max, axis=1).values
elif self.ensemble_method == 'min':
return predictions.apply(np.min, axis=1).values
def get_predictions_by_class(self, predictions):
predictions_by_class = []
for class_idx in range(self.num_classes):
class_preds = [pred[class_idx] for pred in predictions]
predictions_by_class.append(class_preds)
return predictions_by_class
def predict_proba(self, X):
predictions = self.get_all_predictions(X)
# If this is just a single dictionary we're getting predictions from:
if X.shape[0] == 1:
# predictions is just a dictionary where all the values are the predicted values from one of our subpredictors. we'll want that as a list
predicted_vals = list(predictions.values())
predicted_vals = self.get_predictions_by_class(predicted_vals)
if self.ensemble_method == 'median':
return [np.median(class_preds) for class_preds in predicted_vals]
elif self.ensemble_method == 'average' or self.ensemble_method == 'mean' or self.ensemble_method == 'avg':
return [np.average(class_preds) for class_preds in predicted_vals]
elif self.ensemble_method == 'max':
return [np.max(class_preds) for class_preds in predicted_vals]
elif self.ensemble_method == 'min':
return [np.min(class_preds) for class_preds in predicted_vals]
else:
classed_predictions = predictions.apply(self.get_predictions_by_class, axis=1)
if self.ensemble_method == 'median':
return classed_predictions.apply(np.median, axis=1)
elif self.ensemble_method == 'average' or self.ensemble_method == 'mean' or self.ensemble_method == 'avg':
return classed_predictions.apply(np.average, axis=1)
elif self.ensemble_method == 'max':
return classed_predictions.apply(np.max, axis=1)
elif self.ensemble_method == 'min':
return classed_predictions.apply(np.min, axis=1)
|
[
"ClimbsBytes@gmail.com"
] |
ClimbsBytes@gmail.com
|
bde188897443859c283a0e04d1e9bf1a77dd0ec7
|
a2ea43c06bce621ff0ebe1f6c01784667cf66150
|
/notes/migrations/0008_auto__add_field_category_description.py
|
484f83f8556022245d712006cae1a9e34ff53bc2
|
[] |
no_license
|
abhishekkarnani/my-blog
|
c62682bce7c0a0be079658aa68d4284519990365
|
ce0024e5c98f8e68564f37d6b42c8bedcf702c4d
|
refs/heads/master
| 2020-12-31T07:20:00.194492 | 2016-04-11T21:20:35 | 2016-04-11T21:20:35 | 56,007,650 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,476 |
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Category.description'
db.add_column(u'notes_category', 'description',
self.gf('django.db.models.fields.TextField')(default='', null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Category.description'
db.delete_column(u'notes_category', 'description')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'notes.article': {
'Meta': {'ordering': "('-published_at',)", 'object_name': 'Article'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['notes.Category']", 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'ru'", 'max_length': '2'}),
'preview': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'preview_image': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'published_at': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'views_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'notes.category': {
'Meta': {'object_name': 'Category'},
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['notes.Category']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['notes']
|
[
"abhishek.karnani224@gmail.com"
] |
abhishek.karnani224@gmail.com
|
a57fa2b7364d63958571fb4e0853f4351b795d94
|
0add7953d3e3ce2df9e8265102be39b758579753
|
/built-in/TensorFlow/Research/reinforcement-learning/ModelZoo_QMIX_TensorFlow/xt/benchmark/configs/default_xt.py
|
3f596108539b438663d6aaf4a5988cd8513e8366
|
[
"Apache-2.0"
] |
permissive
|
Huawei-Ascend/modelzoo
|
ae161c0b4e581f8b62c77251e9204d958c4cf6c4
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
refs/heads/master
| 2023-04-08T08:17:40.058206 | 2020-12-07T08:04:57 | 2020-12-07T08:04:57 | 319,219,518 | 1 | 1 |
Apache-2.0
| 2023-03-24T22:22:00 | 2020-12-07T06:01:32 |
Python
|
UTF-8
|
Python
| false | false | 428 |
py
|
"""
default configure for benchmark function
"""
class XtBenchmarkConf(object):
"""benchmark conf, user also can re-set it"""
default_db_root = "/tmp/.xt_data/sqlite" # could set path by yourself
default_id = "xt_default_benchmark"
defalut_log_path = "/tmp/.xt_data/logs"
default_tb_path = "/tmp/.xt_data/tensorboard"
default_plot_path = "/tmp/.xt_data/plot"
default_train_interval_per_eval = 200
|
[
"1571856591@qq.com"
] |
1571856591@qq.com
|
751d4ae0e181ca9676e4f39d66901bcf49adcfce
|
a4d8fcfa8084c5d36a862aeb0978327ff4cfe50f
|
/tools/romutil.py
|
21a81fa99d4558f851102beaa543b3581ea8997d
|
[
"Artistic-2.0"
] |
permissive
|
cahirwpz/demoscene
|
a0b548527d89a354b5b8dfd922f39d8b14d61643
|
cd4517ba69e26c96a69e505e305a6d0152972982
|
refs/heads/master
| 2023-03-17T13:06:43.731158 | 2023-03-13T19:48:47 | 2023-03-13T19:48:47 | 3,242,770 | 105 | 21 |
Artistic-2.0
| 2022-10-18T09:43:25 | 2012-01-22T23:03:06 |
C
|
UTF-8
|
Python
| false | false | 2,256 |
py
|
#!/usr/bin/env python3
import argparse
import os
from array import array
from struct import pack
from io import BytesIO
from fsutil import SECTOR, write_pad, Filesystem
#
# In memory format description:
#
# sector 0..1: startup code
# [LONG] initial stack pointer
# [LONG] initial program counter
# [LONG] ROM address of executable file, sector aligned
# [LONG] size of executable file, in bytes
# ... startup code
#
# sector 2..: file system image
#
ROMADDR = 0xf80000
ROMSIZE = 0x080000
def write_startup(rom, startup, exe):
startup = BytesIO(startup)
# Overwrite rom startup hunk file setup
startup.seek(8, os.SEEK_SET)
startup.write(pack('>II', exe.offset + 2 * SECTOR + ROMADDR, exe.size))
# Move to the end and pad it so it takes 2 sectors
startup.seek(0, os.SEEK_END)
write_pad(startup, 2 * SECTOR)
# Write startup to ROM image
rom.write(startup.getvalue())
def write_footer(rom):
rom.seek(-16, os.SEEK_END)
rom.write(bytes.fromhex('471848194f1a531b541c4f1d571e4e1f'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Create ROM file from file system image.')
parser.add_argument(
'startup', metavar='STARTUP', type=str,
help='ROM startup code')
parser.add_argument(
'image', metavar='IMAGE', type=str,
help='File system image file')
parser.add_argument(
'rom', metavar='ROM', type=str,
help='ROM output file')
args = parser.parse_args()
startup = None
if not os.path.isfile(args.startup):
raise SystemExit('ROM startup code file does not exists!')
if os.path.getsize(args.startup) > 2 * SECTOR:
raise SystemExit('ROM startup code is larger than 1024 bytes!')
with open(args.startup, 'rb') as fh:
startup = fh.read()
executable = Filesystem.find_exec(args.image)
if not executable:
raise SystemExit('No AmigaHunk executable found!')
with open(args.rom, 'wb') as rom:
write_startup(rom, startup, executable)
# Write file system image
with open(args.image, 'rb') as img:
rom.write(img.read())
# Complete ROM disk image
write_pad(rom, ROMSIZE)
write_footer(rom)
|
[
"noreply@github.com"
] |
cahirwpz.noreply@github.com
|
b12492e8c5ab34be86a7628f831744f414fe7c0a
|
c8645c4b185097c369070ebc6a476a1a336c0eaa
|
/mpce/src/crawler.py
|
546b50c5d7e29c7d7a828e2d67b3f522f8219292
|
[
"MIT"
] |
permissive
|
dadosjusbr/coletores
|
efc7407da3fdaa562c6c0359e5e2556f68bddbf2
|
3ea228db357b57344d7d742f96c7ca4667e968c2
|
refs/heads/master
| 2022-05-28T15:50:36.572772 | 2022-05-21T13:05:33 | 2022-05-21T13:05:33 | 218,156,279 | 18 | 10 |
MIT
| 2022-05-21T13:05:34 | 2019-10-28T22:23:26 |
Python
|
UTF-8
|
Python
| false | false | 3,326 |
py
|
import requests
import sys
import os
import pathlib
from bs4 import BeautifulSoup
from urllib.request import urlopen
base_url = 'http://wapp.mpce.mp.br/PortalTransparenciaConsultas/Visao/extratos.aspx{}'
url_formats = {
'remu':'?opt=1',
'vi':'?opt=9'
}
url_params = {
'remu': (('opt','1'),),
'vi': (('opt', '9'),)
}
#Armazena informações para o envio de requisições referente aos formatos
class request:
def __init__(self, url, params):
self.url = url
self.params = params
def make_payload(self, viewstate, viewstate_gen, event_validation, ddlmes, ddlano, txtNome):
self.payload = {
#Variáveis responsáveis por identificar a tabela
'__VIEWSTATE': str(viewstate),
'__VIEWSTATEGENERATOR': str(viewstate_gen),
'__EVENTVALIDATION': str(event_validation),
#Variáveis responsáveis por armazenar o mês de ano referente á tabela de coleta.
'ddlMes': str(ddlmes),
'ddlAno': str(ddlano),
#Garante que a busca seja por todos os membros e não apenas um especifico
'txtNome': str(txtNome),
'btnPesquisar': 'Pesquisar'
}
def download(request, file_path):
try:
response = requests.post(request.url , params=request.params, data=request.payload, allow_redirects=True)
except Exception as excep:
sys.stderr.write("Não foi possível fazer o download do arquivo: " + file_path + ' . A requisição foi enviada para a url: ' + request_data['url'] + ' . E o foi retornado status code:' + response.status_code)
try:
with open(file_path, "wb") as file:
file.write(response.content)
file.close()
except Exception as excep:
sys.stderr.write("Não foi possível fazer a escrita do arquivo: " + file_path + ' em disco. O seguinte erro foi gerado: ' + excep )
os._exit(1)
#Este metódo seta os valores necessários para o envio da requisição http
def init_requests(year, month):
requests = {}
for key in url_formats:
#Adquire os valores referentes ás variáveis __VIEWSTATEGENERATOR _VIEWSTATE e _EVENTVALIDATION
url_key = base_url.format(url_formats[key])
page = urlopen(url_key)
soup = BeautifulSoup(page, features='lxml')
view_generator = soup.body.find('input',{'id': '__VIEWSTATEGENERATOR'}).get('value')
view_state = soup.body.find('input',{'id': '__VIEWSTATE'}).get('value')
event_validation = soup.body.find('input',{'id': '__EVENTVALIDATION'}).get('value')
#Cria objeto referente á requisição
params = url_params[key]
request_obj = request(base_url.format(''), params)
request_obj.make_payload(view_state, view_generator, event_validation, month, year, '')
requests[key] = request_obj
return requests
def crawl(year, month, output_path):
files = []
# Realizando download da folha de remunerações simples
requests = init_requests(year, month)
for key in requests:
pathlib.Path(output_path).mkdir(exist_ok=True)
filename = year + '_' + month + '_' + key
file_path = output_path + '/' + filename + '.html'
download(requests[key], file_path)
files.append(file_path)
return files
|
[
"thyago.pereira.silva@ccc.ufcg.edu.br"
] |
thyago.pereira.silva@ccc.ufcg.edu.br
|
0b6a29f79ee9f85fa2b2f650ea17ed3798adbcb2
|
6716eb8116dc5a0d9947e759289c434effabe909
|
/module1/lesson6_step8.py
|
d1ee1429149d55aecaa70de70aa1a36ceecfa2b6
|
[] |
no_license
|
L1verp00ler/stepik-auto-tests-course
|
085e05b0780122f3c2ebadb2e8934ab93875c62a
|
0b89f41b3f809f55d854c9bd43aa477ec6ae4645
|
refs/heads/master
| 2022-12-30T01:55:08.083944 | 2020-10-16T13:44:39 | 2020-10-16T13:44:39 | 287,219,598 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 900 |
py
|
from selenium import webdriver
import time
link = "http://suninjuly.github.io/find_xpath_form"
try:
browser = webdriver.Chrome()
browser.get(link)
input1 = browser.find_element_by_tag_name("input")
input1.send_keys("Ivan")
input2 = browser.find_element_by_name("last_name")
input2.send_keys("Petrov")
input3 = browser.find_element_by_class_name("form-control.city")
input3.send_keys("Smolensk")
input4 = browser.find_element_by_id("country")
input4.send_keys("Russia")
button = browser.find_element_by_xpath("//button[@type='submit']")
button.click()
finally:
# успеваем скопировать код за 30 секунд
time.sleep(15)
# закрываем браузер после всех манипуляций
browser.quit()
# не забываем оставить пустую строку в конце файла
|
[
"andrey.golubev.1991@mail.ru"
] |
andrey.golubev.1991@mail.ru
|
67ffa6937e05e7704b90376cbd3bb100ea85a51e
|
901944f407f4a06a4c4027d6139ce21165976857
|
/neural_net/Neural_Net_CC/main.py
|
41bce1152dcd33c242b3bd32556f6a2f50ee5a48
|
[] |
no_license
|
chriscremer/Other_Code
|
a406da1d567d63bf6ef9fd5fbf0a8f177bc60b05
|
7b394fa87523803b3f4536b316df76cc44f8846e
|
refs/heads/master
| 2021-01-17T02:34:56.215047 | 2020-05-26T13:59:05 | 2020-05-26T13:59:05 | 34,680,279 | 7 | 4 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,354 |
py
|
import numpy as np
import csv
import random
import pickle
from NN_cc import Network
from costs import *
from activations import *
from sklearn import preprocessing
if __name__ == "__main__":
####################################
#Load data
####################################
MY_DATASET = '/data1/morrislab/ccremer/simulated_data/simulated_classification_data_100_samps_1000_feats_3_distinct.csv'
X = []
y = []
header = True
with open(MY_DATASET, 'r') as f:
csvreader = csv.reader(f, delimiter=',', skipinitialspace=True)
for row in csvreader:
if header:
header = False
continue
X.append(map(float,row[1:-1]))
if str(row[-1]) == '0.0':
y.append([1.0,0.0])
else:
y.append([0.0,1.0])
X = np.array(X)
y = np.array(y)
#preprocess
preprocessor = preprocessing.StandardScaler()
preprocessor.fit(X)
X = preprocessor.transform(X)
#X_test = preprocessor.transform(X_test)
training_data= []
for i in range(0,70):
training_data.append((np.array(X[i], ndmin=2).T, np.array(y[i], ndmin=2).T))
evaluation_data= []
for i in range(70,100):
evaluation_data.append((np.array(X[i], ndmin=2).T, np.array(y[i], ndmin=2).T))
print 'Numb of Samples: ' + str(len(training_data))
print 'X shape: ' + str(training_data[0][0].shape)
print 'y shape: ' + str(training_data[0][1].shape)
####################################
#Train Model
####################################
#load pickled model
weights_n_biases = pickle.load( open( "saved/w_n_b.p", "rb" ) )
#weights_n_biases = None
#dimension of input, hidden layer, dimension of output
net = Network(layer_sizes=[len(X[0]), 3, len(y[0])],
activations=[Sigmoid_Activation, Sigmoid_Activation],
cost=CrossEntropyCost,
regularization='l1',
weights_n_biases=weights_n_biases)
evaluation_cost, evaluation_accuracy, training_cost, training_accuracy = net.SGD(training_data=training_data,
epochs=200,
mini_batch_size=2,
learn_rate=0.001,
lmbda=0.001,
monitor_training_cost=True,
monitor_training_accuracy=True,
evaluation_data=evaluation_data,
monitor_evaluation_cost=True,
monitor_evaluation_accuracy=True
)
|
[
"chris.a.cremer@gmail.com"
] |
chris.a.cremer@gmail.com
|
a924f6b20b628e2b9ea28c0fc417daa6d15e58f1
|
dadefab2deac66c1d242af3468e81c4d53ac223b
|
/steps/step21.py
|
d6987ce866bb3a84638b0215ea9759be52075883
|
[] |
no_license
|
SooDevv/DeZero3
|
500df02bdcd6ba50045fc4490053e6419769c656
|
dbda633c4403151b1f6feb59a5e1c970268bcc0d
|
refs/heads/main
| 2023-04-21T23:28:50.528753 | 2021-05-09T15:09:14 | 2021-05-09T15:09:14 | 324,681,879 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,712 |
py
|
import numpy as np
import weakref
class Variable:
def __init__(self, data, name=None):
__array_priority__ = 200
if not isinstance(data, np.ndarray):
raise TypeError
self.data = data
self.grad = None
self.generation = 0
self.creator = None
self.name = name
def set_creator(self, func):
self.creator = func
self.generation = func.generation + 1
def backward(self, retain_grad=False):
if self.grad is None:
self.grad = np.ones_like(self.data)
funcs = []
seen_set = set()
def add_func(f):
if f not in seen_set:
funcs.append(f)
seen_set.add(f)
funcs.sort(key=lambda x: x.generation)
add_func(self.creator)
while funcs:
f = funcs.pop()
gys = [output().grad for output in f.outputs]
gxs = f.backward(*gys)
if not isinstance(gxs, tuple):
gxs = (gxs,)
for x, gx in zip(f.inputs, gxs):
x.grad = gx if x.grad is None else x.grad + gx
if x.creator is not None:
add_func(x.creator)
if not retain_grad:
for y in f.outputs:
y().grad = None
def clear_grad(self):
self.grad = None
@property
def shape(self):
return self.data.shape
@property
def ndim(self):
return self.data.ndim
@property
def size(self):
return self.data.size
@property
def dtype(self):
return self.data.dtype
def __len__(self):
return len(self.data)
def __repr__(self):
if self.data is None:
return 'variable(None)'
p = str(self.data).replace('\n', '\n' + ' ' * 9)
return 'variable(' + p + ')'
def __mul__(self, other):
return mul(self, other)
def __add__(self, other):
return add(self, other)
def __radd__(self, other):
return add(self, other)
def __rmul__(self, other):
return mul(self, other)
def as_array(x):
if np.isscalar(x):
return np.array(x)
return x
def as_variable(obj):
if isinstance(obj, Variable):
return obj
return Variable(obj)
class Config:
enable_backprop = True
class Function:
def __call__(self, *inputs):
inputs = [as_variable(x) for x in inputs]
xs = [x.data for x in inputs]
ys = self.forward(*xs)
if not isinstance(ys, tuple):
ys = (ys,)
outputs = [Variable(as_array(y)) for y in ys]
if Config.enable_backprop:
self.generation = max([x.generation for x in inputs])
for output in outputs:
output.set_creator(self)
self.inputs = inputs
self.outputs = [weakref.ref(output) for output in outputs]
return outputs if len(outputs) > 1 else outputs[0]
def forward(self, *xs):
raise NotImplementedError
def backward(self, gy):
raise NotImplementedError
class Add(Function):
def forward(self, x0, x1):
return x0 + x1
def backward(self, gy):
return gy, gy
class Mul(Function):
def forward(self, x0, x1):
return x0 * x1
def backward(self, gy):
x0, x1 = self.inputs[0].data, self.inputs[1].data
return x1 * gy, x0 * gy
def add(x0, x1):
x1 = as_array(x1)
return Add()(x0, x1)
def mul(x0, x1):
x1 = as_array(x1)
return Mul()(x0, x1)
if __name__ == '__main__':
a = Variable(np.array(2.0))
y = 3.0 * a + 1.0
y1 = np.array([1.0]) + a
print(y)
print(y1)
|
[
"soojung.dev@gmail.com"
] |
soojung.dev@gmail.com
|
8413449ea7eca7950d1df03ee831671e6f6b836c
|
f157ecb6b5f5dc59504babd1ba87f0e46a5c0667
|
/Week_07/G20200343030585/LeetCode_773_585.py
|
47e4a41b048237379318c87e1f1b310047b3fe9e
|
[] |
no_license
|
algorithm006-class01/algorithm006-class01
|
891da256a54640cf1820db93380f4253f6aacecd
|
7ad9b948f8deecf35dced38a62913e462e699b1c
|
refs/heads/master
| 2020-12-27T14:43:13.449713 | 2020-04-20T06:56:33 | 2020-04-20T06:56:33 | 237,936,958 | 19 | 140 | null | 2020-04-20T06:56:34 | 2020-02-03T10:13:53 |
Java
|
UTF-8
|
Python
| false | false | 6,653 |
py
|
# -*- coding:utf-8 -*-
# @lc app=leetcode.cn id=773 lang=python
#
# [773] 滑动谜题
#
# 解题思路
# 1.BFS
# 1.生成0元素移动的映射关系。整个board生成字符串形式方便处理
# 2.每次移动上下左右4个方向,找到对应的结果添加到新的list再进行一层循环
# 2.A*
# 1.与BFS的变化就是使用priorityqueue和估价函数
# @lc code=start
# A* manhattan distance
# import heapq
# class Solution(object):
# def slidingPuzzle(self, board):
# """
# :type board: List[List[int]]
# :rtype: int
# """
# # 取当前board所有分值的和, 当前点值到最终位置的曼哈顿距离,把这个棋盘的每个子都计算一遍求和
# def get_score(board):
# return sum([scores[int(board[i * 3 + j])][i][j] for i in range(2) for j in range(3)])
# goal_pos = {1:(0,0), 2:(0,1), 3:(0,2), 4:(1,0), 5:(1,1), 0:(1,2)}
# scores = [0] * 6
# # 估价函数的目的就是求当前值在每个位置时和目标位置的曼哈顿距离
# for num in range(6):
# scores[num] = [[abs(goal_pos[num][0] - i)+abs(goal_pos[num][1] - j) for j in range(3)] for i in range(2)]
# moves = {0:{1,3}, 1:{0,2,4}, 2:{1,5},3:{0,4},4:{1,3,5}, 5:{2,4}}
# used = set()
# q = []
# s = "".join([str(c) for row in board for c in row])
# heapq.heappush(q, (0, (0, s, s.index('0'))))
# while len(q):
# _, (distance, s, i) = heapq.heappop(q)
# if s == '123450':
# return distance
# arr = [c for c in s]
# for move in moves[i]:
# new_arr = arr[:]
# new_arr[move], new_arr[i] = new_arr[i], new_arr[move]
# new_s = "".join(new_arr)
# if new_s not in used:
# used.add(new_s)
# # print(new_s, s)
# heapq.heappush(q, (get_score(new_s) + distance + 1, (distance + 1, new_s, move)))
# return -1
#BFS
# class Solution(object):
# def slidingPuzzle(self, board):
# """
# :type board: List[List[int]]
# :rtype: int
# """
# # [[1,2,3],[4,0,5]]
# moves = {0:{1,3}, 1:{0,2,4},2:{1,5},3:{0,4},4:{1,3,5}, 5:{2,4}}
# used = set()
# cnt = 0
# s = "".join(str(c) for row in board for c in row)
# q = [(s, s.index("0"))]
# while q:
# new = []
# for s, i in q:
# used.add(s)
# if s == '123450':
# return cnt
# arr = [c for c in s]
# for move in moves[i]:
# new_arr = arr[:]
# new_arr[move], new_arr[i] = s[i], s[move]
# new_s = "".join(new_arr)
# if new_s not in used:
# new.append((new_s, move))
# q = new
# cnt += 1
# return -1
# class Solution:
# def slidingPuzzle(self, board):
# # 使用一个字典来描述0在各个位置可以移动的位置
# moves, used, cnt = {0: {1, 3}, 1:{0, 2, 4}, 2:{1, 5}, 3:{0, 4}, 4:{1, 3, 5}, 5:{2, 4}}, set(), 0
# # 将数组转化为字符串
# s = "".join(str(c) for row in board for c in row)
# # 字符串和0 的位置
# q = [(s, s.index("0"))]
# while q:
# new = []
# for s, i in q:
# used.add(s)
# if s == "123450":
# return cnt
# arr = [c for c in s]
# for move in moves[i]:
# new_arr = arr[:]
# # 新的位置
# new_arr[i], new_arr[move] = new_arr[move], new_arr[i]
# new_s = "".join(new_arr)
# if new_s not in used:
# new.append((new_s, move))
# cnt += 1
# q = new
# return -1
# 3*3 board
class Solution:
def slidingPuzzle(self, board):
# 使用一个字典来描述0在各个位置可以移动的位置
moves = {0: {1, 3}, 1:{0, 2, 4}, 2:{1, 5}, 3:{0, 4, 6}, 4:{1, 3, 5, 7}, 5:{2, 4, 8}, 6:{3,7}, 7:{4,6,8}, 8:{5,7}}
used = set()
cnt = 0
# 将数组转化为字符串
s = "".join(str(c) for row in board for c in row)
# 字符串和0 的位置
q = [(s, s.index("0"))]
while q:
new = []
for s, i in q:
used.add(s)
if s == "123456780":
return cnt
arr = [c for c in s]
for move in moves[i]:
new_arr = arr[:]
# 新的位置
new_arr[i], new_arr[move] = new_arr[move], new_arr[i]
new_s = "".join(new_arr)
if new_s not in used:
new.append((new_s, move))
cnt += 1
q = new
return -1
# import itertools
# import collections
# class Solution(object):
# def slidingPuzzle(self, board):
# R, C = len(board), len(board[0])
# # 将list转化为单个字符的tuple
# start = tuple(itertools.chain(*board))
# # start 为tuple,start.index(0), 第一个元素,stpe =0
# queue = collections.deque([(start, start.index(0), 0)])
# seen = {start}
# # python3不支持,生成一个tuple末尾元素是0
# target = tuple(range(1, R*C) + [0])
# while queue:
# board, posn, depth = queue.popleft()
# if board == target: return depth
# # -1 1 左右,-C C 为col的长度
# for d in (-1, 1, -C, C):
# nei = posn + d
# # 验证新位置合理性
# if abs(nei/C - posn/C) + abs(nei%C - posn%C) != 1:
# continue
# if 0 <= nei < R*C:
# newboard = list(board)
# newboard[posn], newboard[nei] = newboard[nei], newboard[posn]
# newt = tuple(newboard)
# if newt not in seen:
# seen.add(newt)
# queue.append((newt, nei, depth+1))
# return -1
# @lc code=end
if __name__ == "__main__":
obj = Solution()
# ret = obj.slidingPuzzle([[1,2,3],[4,5,6],[7,8,0]])
ret = obj.slidingPuzzle([[0,1,2],[3,4,5],[6,7,8]])
print(ret)
|
[
"zhaoyong-lc@gmail.com"
] |
zhaoyong-lc@gmail.com
|
3bbf9d9b115044f96a486fc02b991a3a7d9d8876
|
0a6f9be53c8f809fe34b8a53bf38b5c5445f60ca
|
/python/pangram/pangram.py
|
0267e25e702b7c4696c42811bb403d6c5a610ce1
|
[] |
no_license
|
JanPretzel/exercism
|
1802b1257cab6add78e557f391457d9be3fd2a4d
|
b53bb402e50514a21bfb4891742df16c38440a4e
|
refs/heads/master
| 2020-05-21T07:05:23.134163 | 2017-03-11T17:05:56 | 2017-03-11T17:05:56 | 84,591,663 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 160 |
py
|
import re
ALPHA_COUNT = 26
def is_pangram(text):
clean = re.sub('[^a-z]+', '', text, flags=re.IGNORECASE)
return len(list(set(clean.lower()))) == 26
|
[
"jan.pretzel@gmail.com"
] |
jan.pretzel@gmail.com
|
bd61c651a5dcd9583cb47782169dbcf572a79d33
|
2b0e87920a4db074ea10520becc60c3afce2a48e
|
/Python_term/aes_script/crypto_demo/des3_cbc.py
|
45012801f4fca99750dcc606d7f967efae1cb1bd
|
[] |
no_license
|
zhajg/zhajg_term
|
1a90d6eb4a4e1acf1afacb46673c38dca2efaa5e
|
2727d5137c9fa8e19c3e7d883b50ba2f1c33029e
|
refs/heads/master
| 2021-01-13T18:08:46.160882 | 2020-02-23T04:13:06 | 2020-02-23T04:13:06 | 242,451,781 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,232 |
py
|
import sys
from Crypto.Cipher import DES3
import math
import binascii
# DES3 CBC
key=("\x01\x23\x45\x67\x89\xab\xcd\xef"
"\x23\x45\x67\x89\xab\xcd\xef\x01"
"\x45\x67\x89\xab\xcd\xef\x01\x23")
iv="\x12\x34\x56\x78\x90\xab\xcd\xef"
raw_buf=("\x4e\x6f\x77\x20\x69\x73\x20\x74"
"\x43\xe9\x34\x00\x8c\x38\x9c\x0f"
"\x68\x37\x88\x49\x9a\x7c\x05\xf6")
iv_length = len(iv)
key_length = len(key)
plain_length = len(raw_buf)
print 'The length of IV is:', iv_length
print 'The length of Key is:', key_length
print 'The length of Plain is:', plain_length
real_plain = binascii.b2a_hex(raw_buf)
print 'The result of plain is:', real_plain
# Encrypt part
des3_obj=DES3.new(key, DES3.MODE_CBC, iv)
encrypt_buf=des3_obj.encrypt(raw_buf)
real_encrypt = binascii.b2a_hex(encrypt_buf)
print 'The result of encrypt is:', real_encrypt
encrypt_text = real_encrypt.decode('hex')
# Decrypt part
des3_obj=DES3.new(key, DES3.MODE_CBC, iv)
decrypt_buf=des3_obj.decrypt(encrypt_text)
real_decrypt = binascii.b2a_hex(decrypt_buf)
print 'The result of decrypt is:', real_decrypt
if real_decrypt == real_plain:
print 'Encrypt and Decrypt test success!'
elif real_decrypt != real_plain:
print 'Encrypt and Decrypt test fail!'
|
[
"nuaaJulian@163.com"
] |
nuaaJulian@163.com
|
0fe492315d95c78217ef6568b723e832915624b0
|
aa46eecc3f501435338fa37210d2749b3132b1e4
|
/XL216G7/pyscript/G7_packit.py
|
4fd523d192d81b01c72cabde07581e99043428bc
|
[] |
no_license
|
fallingelf/1DSpect
|
fcfe40947733e3c475e75e3145d9e9a2f165d23a
|
3d8bbb614a7995907493c9f54e999d4d013dd292
|
refs/heads/master
| 2020-04-26T11:41:40.263968 | 2019-11-22T12:54:55 | 2019-11-22T12:54:55 | 173,525,392 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,005 |
py
|
from pyraf import iraf
import os,shutil
import numpy as np
def expand_name(Files):
if '-' in Files:
part_Files=Files.replace('-',' ').split()
N=1-int(part_Files[0][-3:])+int(part_Files[1])
name_expand=[]
for i in range(N):
name_expand.append([])
name_expand[-1]=part_Files[0][:-3]+'-'+str(int(part_Files[0][-3:])+i).zfill(4)+'.fit'
else:
name_expand=Files[:-3]+'-'+str(Files[-3:]).zfill(4)+'.fit';
N=1
return N,name_expand
def expand_inf_file(inf_file):
N=expand_name(inf_file[-1][0])[0]
name_expand=expand_name(inf_file[-1][0])[1]
list_inf_tail=inf_file[-1][1:]
if N > 1:
for i in range(N):
inf_file[-1]=[name_expand[i]]+list_inf_tail
if i<N-1:
inf_file.append([])
else:
inf_file[-1]=[name_expand]+list_inf_tail
return inf_file
def find_files(PATH,key_name,flag=-1): #from folder(PATH) filter filenames contaning key_name
files_all=os.listdir(PATH);file_object=[]
if flag==0:
for j in range(len(files_all)):
if key_name == files_all[j][:len(key_name)]:
file_object.append(files_all[j])
if flag==-1:
for j in range(len(files_all)):
if key_name == files_all[j][flag*len(key_name):]:
file_object.append(files_all[j])
return sorted(file_object)
def reflesh(list_str,str_unvalu,flag):
j=0
while j < len(list_str):
if list_str[j][flag] == str_unvalu:
iraf.imdel(list_str[j][0],veri=0) #delete the term of list
del list_str[j] #delete associated image
else:
j+=1
return list_str
def distance_points(x1,y1,x2,y2):
x1=x1.replace(':',' ').split();x1=float(x1[0])+float(x1[1])/60+float(x1[2])/3600
y1=y1.replace(':',' ').split();y1=float(y1[0])+float(y1[1])/60+float(y1[2])/3600
x2=x2.replace(':',' ').split();x2=float(x2[0])+float(x2[1])/60+float(x2[2])/3600
y2=y2.replace(':',' ').split();y2=float(y2[0])+float(y2[1])/60+float(y2[2])/3600
return (x1-x2)**2+(y1-y2)**2
def find_stand_path(stand_name,PATH):
stand_star_txt=open(PATH+'stand_stars','r')
stand_star_list=stand_star_txt.readlines()
stand_star_txt.close()
for j in range(len(stand_star_list)):
if stand_name.lower() in stand_star_list[j]:
return '/iraf/iraf/noao/lib/onedstds/'+stand_star_list[j].split()[0]
def mkdir(folder):
if not os.path.exists(folder):
os.makedirs(folder)
def write_files2file(file_name,key_words,rw='w'):
key_words=key_words.split(',')
txt_file_pn=open(file_name,rw)
for i in range(len(key_words)):
txt_file_pn.write('\n'.join(find_files(os.getcwd(),key_words[i])))
if i !=len(key_words):
txt_file_pn.write('\n')
txt_file_pn.close()
return
def write_list2file(file_name,list_name):
with open(file_name,'w') as f:
f.write('\n'.join(list_name))
f.close()
#all you need to revise to suit your mechine.
#--------------------------------------------------------------------
Path_Folder_script='/home/host-name/IRAF/rfscript/'
Path_Folder_Data='/home/host-name/IRAF/spectrum/'
#--------------------------------------------------------------------
i=0;Path_Folder_Obj=Path_Folder_Data+os.listdir(Path_Folder_Data)[i]
os.chdir(Path_Folder_Obj);os.system('cd '+Path_Folder_Obj)
iraf.noao.imred()
iraf.noao.imred.kpnoslit();iraf.noao.onedspec()
result_path=Path_Folder_Obj+'/'+os.listdir(Path_Folder_Data)[i]+'_result';mkdir(result_path)
need_files=find_files(Path_Folder_Obj,'_ca.fits')+find_files(Path_Folder_Obj,'_co.fits')
[shutil.copy(Path_Folder_Obj+'/'+need_files[j],result_path) for j in range(len(need_files))]
iraf.noao.imred()
iraf.noao.imred.kpnoslit();iraf.noao.onedspec()
os.chdir(result_path);os.system('cd '+result_path)
[iraf.noao.imred.kpnoslit.scopy(result_path+'/'+need_files[j],result_path+'/'+need_files[j],format='onedspec',rebin=0) for j in range(len(need_files))]
[iraf.noao.onedspec.wspectext(need_files[j]+'.0001.fits',need_files[j].replace('fits','dat'),header=0,wformat="%0.2f") for j in range(len(need_files))]
[iraf.noao.onedspec.wspectext(need_files[j]+'.0001.fits',need_files[j].replace('fits','txt'),header=1,wformat="%0.2f") for j in range(len(need_files))]
garbage_files=find_files(result_path,'001.fits')
[os.remove(result_path+'/'+garbage_files[j]) for j in range(len(garbage_files))]
mkdir(result_path+'/dat/');mkdir(result_path+'/fits/');mkdir(result_path+'/txt/');
fits_files=find_files(result_path,'.fits');dat_files=find_files(result_path,'.dat');txt_files=find_files(result_path,'.txt')
[shutil.move(result_path+'/'+fits_files[j],result_path+'/fits/') for j in range(len(fits_files))]
[shutil.move(result_path+'/'+dat_files[j],result_path+'/dat/') for j in range(len(dat_files))]
[shutil.move(result_path+'/'+txt_files[j],result_path+'/txt/') for j in range(len(txt_files))]
|
[
"noreply@github.com"
] |
fallingelf.noreply@github.com
|
90abd98297a2ec0223dd17d5387e6b4e56471e66
|
10b4ebc101ac9f829b62ed21aadaa5f80f245114
|
/Lokapróf/1 Basics/Population estimation.py
|
3b88f8512498fcf6d58e128484e886c523676347
|
[] |
no_license
|
nonnikb/verkefni
|
c80537ff904026a6cb508c29f84f3ff0ab16a226
|
0a1654e0c22bb16990da0fdcecae008c69be4c12
|
refs/heads/master
| 2020-03-27T23:31:11.820242 | 2019-01-05T21:54:49 | 2019-01-05T21:54:49 | 147,324,316 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 525 |
py
|
"""Assume that the current US population is 307,357,870
- a birth every 7 seconds
- a death every 13 seconds
- a new immigrant every 35 seconds
Write a program that takes years as input (as an integer) and prints
out estimated population (as integer).
Assume that there are exactly 365 days in a year."""
years = input("Input years: ")
years_int = int(years)
br = 1/7
dr = 1/13
im = 1/35
gr = br + im + dr
growth = gr * 60 * 60 * 24 *365 * years_int
new_pop = 307357870 + growth
print("New population after", years_int)
|
[
"jonb18@ru.is"
] |
jonb18@ru.is
|
3b9fe333e7d4065f42d1f796e206238245df2998
|
99d7a6448a15e7770e3b6f3859da043300097136
|
/src/database/orms/api.py
|
e8c05566f0edc99644329043e8244fbdd6556648
|
[] |
no_license
|
softtrainee/arlab
|
125c5943f83b37bc7431ae985ac7b936e08a8fe4
|
b691b6be8214dcb56921c55daed4d009b0b62027
|
refs/heads/master
| 2020-12-31T07:54:48.447800 | 2013-05-06T02:49:12 | 2013-05-06T02:49:12 | 53,566,313 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 866 |
py
|
#===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from bakeout_orm import *
from device_scan_orm import *
from power_map_orm import *
from power_orm import *
from video_orm import *
|
[
"jirhiker@localhost"
] |
jirhiker@localhost
|
47f754df0fc9ba8f12d44c0b23da5e69d42e1e61
|
44b43d39b31b3eba1cf65cab7c47f72ff68134d1
|
/chapter8/ex38.py
|
e2ffdbc116eb750caa4acffc0e06ee55c032e948
|
[] |
no_license
|
BenoitStef/LearnPython3
|
5ab722b36d01b026ce4bb7ef92240f8ee87fcd50
|
243ebc15985009657b0d1f4e168553f4d9302607
|
refs/heads/master
| 2020-04-24T08:01:14.905149 | 2019-02-26T12:39:08 | 2019-02-26T12:39:08 | 171,817,475 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,157 |
py
|
#-------------------------------------------------------------------------------
# Doing Things to Lists - Learn Python 3 the hard way - page 168
# Author : Benoit Stef
# Date : 25.02.2019
#-------------------------------------------------------------------------------
tenThings = "Apples Oranges Crows Telephone Light Sugar"
#print(tenThings)
print("wait there are not 10 things in that list. Let's fix that.")
#very powerful tool, split strings when spaced and place them to a list with ' '
stuff = tenThings.split()
print(stuff)
moreStuff = ["Day", "Night", "Song", "Frisbee",
"Corn", "Banana", "Girl", "Boy"]
while len(stuff) !=10:
#pop take the last element within a list and reduce the list
nextOne = moreStuff.pop()
print("Adding: ", nextOne)
#extend the item that was popped from previous list to the new list
stuff.append(nextOne)
print("There are {} items now".format(len(stuff)))
print("There we go: ", stuff)
print("Let's do some things with stuff.")
print(stuff[1])
print(stuff[-1]) #whoa! Fancy
print(stuff.pop())
print(' '.join(stuff)) #what? cool!
print('#'.join(stuff[3:6])) #super stellar!
|
[
"benoit.stef@gmx.ch"
] |
benoit.stef@gmx.ch
|
0d625289c5ac0ab9c7e787207ace977e54c123fa
|
38e2dd851bf4e2d846d87859293036f02ddbaef4
|
/pyap/pyap/gunicorn.conf.py
|
e679e9f3a1dd430890b98c245ef1371d9e651e3d
|
[] |
no_license
|
marychev/pyap_v100
|
c154207b7e86d221f50955c9b4872430a7ebbdd5
|
44deb588e07a047f23c21430d149a14b729f9a88
|
refs/heads/master
| 2020-03-14T04:09:14.615123 | 2018-07-15T04:48:43 | 2018-07-15T04:48:43 | 131,435,669 | 5 | 1 | null | 2019-12-03T15:25:51 | 2018-04-28T18:49:29 |
HTML
|
UTF-8
|
Python
| false | false | 53 |
py
|
bind = '127.0.0.1:3100'
workers = 3
user = "nobody"
|
[
"pyapmail@gmail.com"
] |
pyapmail@gmail.com
|
481c19172d01ff3c0f182687e3fb6e049cc46d47
|
0fb169cc1518c63b1e69ff87b8b518beff471361
|
/src/c4/cmany/build_item.py
|
21430c37524eee7f70fc5c0f47641a23dd7f8316
|
[
"MIT"
] |
permissive
|
AvdN/cmany
|
c2befcf8f1b4c9cf53a158f069bf4cf8fb6afea2
|
9369c22e17cfdfc00d8184b7cfce0d9f91980c75
|
refs/heads/master
| 2021-01-24T06:42:36.832093 | 2017-05-12T00:27:50 | 2017-05-12T00:27:50 | 93,314,174 | 0 | 0 | null | 2017-06-04T13:09:19 | 2017-06-04T13:09:19 | null |
UTF-8
|
Python
| false | false | 9,119 |
py
|
from collections import OrderedDict as odict
import re
from . import util
from .named_item import NamedItem
from .build_flags import BuildFlags
from .combination_rules import CombinationRules
# -----------------------------------------------------------------------------
class BuildItem(NamedItem):
"""A base class for build items."""
@staticmethod
def create(map_of_class_name_to_tuple_of_class_and_specs):
items = BuildItemCollection()
for cls_name, (cls, spec_list) in map_of_class_name_to_tuple_of_class_and_specs.items():
if isinstance(spec_list, str):
spec_list = util.splitesc_quoted(spec_list, ',')
for s in spec_list:
items.add_build_item(cls(s))
items.resolve_references()
return items
def is_trivial(self):
if self.name != self.default_str():
return False
if not self.flags.empty():
return False
return True
@staticmethod
def trivial_item(items):
return len(items) == 1 and items[0].is_trivial()
@staticmethod
def no_flags_in_collection(items):
for i in items:
if not i.flags.empty():
return False
return True
def __init__(self, spec):
self.full_specs = spec
self.flag_specs = []
self.refs = []
self.combination_rules = CombinationRules([])
self._resolved_references = False
spec = util.unquote(spec)
spl = spec.split(':')
if len(spl) == 1:
name = spec
self.flags = BuildFlags(name)
super().__init__(name)
return
name = spl[0]
rest = spl[1]
# super().__init__(name) # DON'T!!! will overwrite
self.name = name
self.flags = BuildFlags(name)
spl = util.splitesc_quoted(rest, ' ')
curr = ""
for s in spl:
if s[0] != '@':
curr += " " + s
else:
self.refs.append(s[1:])
if curr:
self.flag_specs.append(curr)
curr = ""
self.flag_specs.append(s)
if curr:
self.flag_specs.append(curr)
def resolve_references(self, item_collection):
if self._resolved_references:
return
for s_ in self.flag_specs:
s = s_.lstrip()
if s[0] == '@':
refname = s[1:]
r = item_collection.lookup_build_item(refname, self.__class__)
if self.name in r.refs:
msg = "circular references found in {} definitions: '{}'x'{}'"
raise Exception(msg.format(self.__class__.__name__, self.name, r.name))
if not r._resolved_references:
r.resolve_references(item_collection)
self.flags.append_flags(r.flags, append_to_name=False)
else:
import argparse
from . import args as c4args
parser = argparse.ArgumentParser()
c4args.add_bundle_flags(parser)
ss = util.splitesc_quoted(s, ' ')
args = parser.parse_args(ss)
tmp = BuildFlags('', **vars(args))
self.flags.append_flags(tmp, append_to_name=False)
cr = []
if hasattr(args, 'combination_rules'):
cr = getattr(args, 'combination_rules')
self.combination_rules = CombinationRules(cr)
self._resolved_references = True
@staticmethod
def parse_args(v_):
"""parse comma-separated build item specs from the command line.
An individual build item spec can have any of the following forms:
* name
* 'name:'
* "name:"
* 'name: <flag_specs...>'
* "name: <flag_specs...>"
So for example, any of these could be valid input to this function:
* foo,bar
* foo,'bar: -X "-a" "-b" (etc)'
* foo,"bar: -X '-a' '-b' (etc)"
* 'foo: -DTHIS_IS_FOO -X "-a"','bar: -X "-a" "-b" (etc)'
* 'foo: -DTHIS_IS_FOO -X "-a"',bar
* etc
In some cases the shell (or argparse? or what?) removes quotes, so we
have to deal with that too.
"""
#util.lognotice("parse_args 0: input=____{}____".format(v_))
# remove start and end quotes if there are any
v = v_
if util.is_quoted(v_):
v = util.unquote(v_)
# print("parse_args 1: unquoted=____{}____".format(v))
if util.has_interior_quotes(v):
# this is the simple case: we assume everything is duly delimited
vli = util.splitesc_quoted(v, ',')
# print("parse_args 2: vli=__{}__".format(vli))
else:
# in the absence of interior quotes, parsing is more complicated.
# Does the string have ':'?
if v.find(':') == -1:
# no ':' was found; a simple split will nicely do
vli = v.split(',')
# print("parse_args 3.1: vli=__{}__".format(vli))
else:
# uh oh. we have ':' in the string, but no quotes in it. This
# means we have to do it the hard way. There's probably a
# less hard way, but for now this is short enough.
# print("parse_args 3.2: parsing manually...")
vli = []
withc = False
b = 0
lastcomma = -1
for i, c in enumerate(v):
if c == ',':
if not withc:
vli.append(v[b:i])
b = i + 1
# print("parse_args 3.2.1: ','@ i={}: v[b:i]={} vli={}".format(i, v[b:i], vli))
lastcomma = i
elif c == ':':
if not withc:
withc = True
else:
vli.append(v[b:(lastcomma + 1)])
b = lastcomma + 1
# print("parse_args 3.2.2: ':'@ i={}: v[b:i]={} vli={}".format(i, v[b:i], vli))
rest = v[b:]
if rest:
vli.append(rest)
# print("parse_args 3.2.3: rest={} vli={}".format(rest, vli))
# print("parse_args 4: vli=", vli)
# unquote split elements
vli = [util.unquote(v).strip(',') for v in vli]
# util.logdone("parse_args 4: input=____{}____ output=__{}__".format(v_, vli))
return vli
def save_config(self, yml_node):
if not self.flags.empty():
self.flags.save_config(yml_node)
def load_config(self, yml_node):
self.flags.load_config(yml_node)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
class BuildItemCollection(odict):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.item_names = odict()
def add_build_item(self, item):
# convert the class name to snake case and append s for plural
# eg Variant-->variants and BuildType --> build_types
# http://stackoverflow.com/questions/1175208/elegant-python-function-to-convert-camelcase-to-snake-case
cls = item.__class__
cls_name = cls.__name__
cls_name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', cls_name)
cls_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', cls_name)
cls_name = cls_name.lower() + 's'
# add the item to the list of the class name
if not self.get(cls_name):
self[cls_name] = []
self[cls_name].append(item)
# store the
if not hasattr(self, 'collections'):
setattr(self, 'collections', [])
if not cls_name in self.collections:
self.collections.append(cls_name)
# add the object to a map to accelerate lookups by item name
if not self.item_names.get(item.name):
self.item_names[item.name] = []
self.item_names[item.name].append(item)
def lookup_build_item(self, item_name, item_cls=None):
items = self.item_names[item_name]
# first look for items with the same class and same name
for i in items:
if i.name == item_name and i.__class__ == item_cls:
return i
# now look for just same name
for i in items:
if i.name == item_name:
return i
# at least one must be found
msg = "item not found: {} (class={})".format(item_name,
item_cls.__name__)
raise Exception(msg)
def resolve_references(self):
for c in self.collections:
for item in self[c]:
item.resolve_references(self)
|
[
"dev@jpmag.me"
] |
dev@jpmag.me
|
e84d438951700c2db6d33da9d697f4a43a7a4aab
|
1329b07939a984c252ab81dd30cebc62968db575
|
/code/traffic--day-vision.py
|
40a7cc934c34051b1e4c19112db88abbbe52b50a
|
[] |
no_license
|
avaish1409/traffic-count
|
a228a051a0d581de287e37daf709465cd9a1098c
|
06ea34fcd607e33e2d961091d8443ffc50715a68
|
refs/heads/main
| 2023-04-04T02:00:11.078145 | 2021-04-02T14:57:57 | 2021-04-02T14:57:57 | 354,013,651 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,633 |
py
|
import numpy as np
import cv2
import pandas as pd
from google.colab import drive
drive.mount('/content/gdrive')
def check(x, y, eq):
res = eq[0]*x + eq[1]*y + eq[2]
org = eq[2]
if (org>0) ^ (res>0):
return False
return True
def getEq(p1, p2):
m = (p1[1]-p2[1])/(p1[0]-p2[0])
c = (m*p2[0])-p1[1]
return [-1*m, 1, c]
def fun(path, linexpos = 0, lineypos = 225, linexpos2 = 900, lineypos2 = 250):
cap = cv2.VideoCapture(path)
frames_count, fps, w, h = cap.get(cv2.CAP_PROP_FRAME_COUNT), cap.get(cv2.CAP_PROP_FPS), cap.get(
cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
height = int(h)
width = int(w)
print('Input Video summary: ', frames_count, fps, width, height)
df = pd.DataFrame(index=range(int(frames_count))) # dataframe of input video frames
df.index.name = "Frames"
# display variables
framenumber = 0
carscrossedup = 0
carscrosseddown = 0
carids = []
caridscrossed = []
totalcars = 0
# processing - create background subtractor
fgbg = cv2.createBackgroundSubtractorMOG2()
# output video
ret, frame = cap.read()
ratio = .5 # resize ratio
image = cv2.resize(frame, (0, 0), None, ratio, ratio)
width2, height2, channels = image.shape
video = cv2.VideoWriter('tcount3.avi', cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps, (height2, width2), 1)
# check night or day
is_light = np.mean(image) > 117
# equation of line
eq = getEq((linexpos, lineypos+25), (linexpos2, lineypos2+25))
print('Main task initiated')
while True:
ret, frame = cap.read()
# check if input
if ret:
image = cv2.resize(frame, (0, 0), None, ratio, ratio) # resize image
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # converts image to gray
fgmask = fgbg.apply(gray) # uses the background subtraction
# applies different thresholds to fgmask to try and isolate cars
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)) # kernel to apply to the morphology
closing = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
dilation = cv2.dilate(opening, kernel)
if is_light:
retvalbin, bins = cv2.threshold(dilation, 220, 255, cv2.THRESH_BINARY) # removes the shadows
else:
retvalbin, bins = cv2.threshold(dilation, 220, 255, cv2.THRESH_BINARY_INV) # removes the car lights
# creating contour
contours, hierarchy = cv2.findContours(bins.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# use convex hull to create polygon around contours
hull = [cv2.convexHull(c) for c in contours]
cv2.drawContours(image, hull, -1, (0, 255, 0), 3)
# line
# lineypos = 225
cv2.line(image, (linexpos, lineypos), (linexpos2, lineypos2), (255, 0, 0), 5)
# lineypos2 = 250
cv2.line(image, (linexpos, lineypos+25), (linexpos2, lineypos2+25), (0, 255, 0), 5)
# min area of object
minarea = 100
# max area of object
maxarea = 50000
# vectors for the x and y locations of contour centroids in current frame
cxx = np.zeros(len(contours))
cyy = np.zeros(len(contours))
for i in range(len(contours)):
if hierarchy[0, i, 3] == -1: # using hierarchy to only count parent contours (contours not within others)
area = cv2.contourArea(contours[i]) # area of contour
if minarea < area < maxarea: # area threshold for contour
# calculating centroids of contours
cnt = contours[i]
M = cv2.moments(cnt)
cx = int(M['m10'] / M['m00'])
cy = int(M['m01'] / M['m00'])
if cy > lineypos: # filters out contours that are above line (y starts at top)
# gets bounding points of contour to create rectangle
# x,y is top left corner and w,h is width and height
x, y, w, h = cv2.boundingRect(cnt)
# creates a rectangle around contour
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 0), 2)
# Prints centroid text in order to double check later on
cv2.putText(image, str(cx) + "," + str(cy), (cx + 10, cy + 10), cv2.FONT_HERSHEY_SIMPLEX,
.3, (0, 0, 255), 1)
cv2.drawMarker(image, (cx, cy), (0, 0, 255), cv2.MARKER_STAR, markerSize=5, thickness=1,
line_type=cv2.LINE_AA)
# adds centroids that passed previous criteria to centroid list
cxx[i] = cx
cyy[i] = cy
# eliminates zero entries (centroids that were not added)
cxx = cxx[cxx != 0]
cyy = cyy[cyy != 0]
# empty list to later check which centroid indices were added to dataframe
minx_index2 = []
miny_index2 = []
# maximum allowable radius for current frame centroid to be considered the same centroid from previous frame
maxrad = 25
# The section below keeps track of the centroids and assigns them to old carids or new carids
if len(cxx): # if there are centroids in the specified area
if not carids: # if carids is empty
for i in range(len(cxx)): # loops through all centroids
carids.append(i) # adds a car id to the empty list carids
df[str(carids[i])] = "" # adds a column to the dataframe corresponding to a carid
# assigns the centroid values to the current frame (row) and carid (column)
df.at[int(framenumber), str(carids[i])] = [cxx[i], cyy[i]]
totalcars = carids[i] + 1 # adds one count to total cars
else: # if there are already car ids
dx = np.zeros((len(cxx), len(carids))) # new arrays to calculate deltas
dy = np.zeros((len(cyy), len(carids))) # new arrays to calculate deltas
for i in range(len(cxx)): # loops through all centroids
for j in range(len(carids)): # loops through all recorded car ids
# acquires centroid from previous frame for specific carid
oldcxcy = df.iloc[int(framenumber - 1)][str(carids[j])]
# acquires current frame centroid that doesn't necessarily line up with previous frame centroid
curcxcy = np.array([cxx[i], cyy[i]])
if not oldcxcy: # checks if old centroid is empty in case car leaves screen and new car shows
continue # continue to next carid
else: # calculate centroid deltas to compare to current frame position later
dx[i, j] = oldcxcy[0] - curcxcy[0]
dy[i, j] = oldcxcy[1] - curcxcy[1]
for j in range(len(carids)): # loops through all current car ids
sumsum = np.abs(dx[:, j]) + np.abs(dy[:, j]) # sums the deltas wrt to car ids
# finds which index carid had the min difference and this is true index
correctindextrue = np.argmin(np.abs(sumsum))
minx_index = correctindextrue
miny_index = correctindextrue
# acquires delta values of the minimum deltas in order to check if it is within radius later on
mindx = dx[minx_index, j]
mindy = dy[miny_index, j]
if mindx == 0 and mindy == 0 and np.all(dx[:, j] == 0) and np.all(dy[:, j] == 0):
# checks if minimum value is 0 and checks if all deltas are zero since this is empty set
# delta could be zero if centroid didn't move
continue # continue to next carid
else:
# if delta values are less than maximum radius then add that centroid to that specific carid
if np.abs(mindx) < maxrad and np.abs(mindy) < maxrad:
# adds centroid to corresponding previously existing carid
df.at[int(framenumber), str(carids[j])] = [cxx[minx_index], cyy[miny_index]]
minx_index2.append(minx_index) # appends all the indices that were added to previous carids
miny_index2.append(miny_index)
for i in range(len(cxx)): # loops through all centroids
# if centroid is not in the minindex list then another car needs to be added
if i not in minx_index2 and miny_index2:
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
elif curcxcy[0] and not oldcxcy and not minx_index2 and not miny_index2:
# checks if current centroid exists but previous centroid does not
# new car to be added in case minx_index2 is empty
df[str(totalcars)] = "" # create another column with total cars
totalcars = totalcars + 1 # adds another total car the count
t = totalcars - 1 # t is a placeholder to total cars
carids.append(t) # append to list of car ids
df.at[int(framenumber), str(t)] = [cxx[i], cyy[i]] # add centroid to the new car id
# The section below labels the centroids on screen
currentcars = 0 # current cars on screen
currentcarsindex = [] # current cars on screen carid index
for i in range(len(carids)): # loops through all carids
if df.at[int(framenumber), str(carids[i])] != '':
# checks the current frame to see which car ids are active
# by checking in centroid exists on current frame for certain car id
currentcars = currentcars + 1 # adds another to current cars on screen
currentcarsindex.append(i) # adds car ids to current cars on screen
for i in range(currentcars): # loops through all current car ids on screen
# grabs centroid of certain carid for current frame
curcent = df.iloc[int(framenumber)][str(carids[currentcarsindex[i]])]
# grabs centroid of certain carid for previous frame
oldcent = df.iloc[int(framenumber - 1)][str(carids[currentcarsindex[i]])]
if curcent: # if there is a current centroid
# On-screen text for current centroid
# cv2.putText(image, "Centroid" + str(curcent[0]) + "," + str(curcent[1]),
# (int(curcent[0]), int(curcent[1])), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
cv2.putText(image, "ID:" + str(carids[currentcarsindex[i]]), (int(curcent[0]), int(curcent[1] - 15)),
cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 255, 255), 2)
cv2.drawMarker(image, (int(curcent[0]), int(curcent[1])), (0, 0, 255), cv2.MARKER_STAR, markerSize=5,
thickness=1, line_type=cv2.LINE_AA)
if oldcent: # checks if old centroid exists
xstart = oldcent[0] - maxrad
ystart = oldcent[1] - maxrad
xwidth = oldcent[0] + maxrad
yheight = oldcent[1] + maxrad
cv2.rectangle(image, (int(xstart), int(ystart)), (int(xwidth), int(yheight)), (0, 125, 0), 1)
# checks if old centroid is on or below line and curcent is on or above line
# if oldcent[1] >= lineypos2 and curcent[1] <= lineypos2 and carids[
# currentcarsindex[i]] not in caridscrossed:
if check(oldcent[0], oldcent[1], eq) and (not check(curcent[0], curcent[1], eq)) and carids[
currentcarsindex[i]] not in caridscrossed:
carscrossedup += 1
cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 255), 5)
caridscrossed.append(
currentcarsindex[i]) # adds car id to list of count cars to prevent double counting
# to count cars and that car hasn't been counted yet
# elif oldcent[1] <= lineypos2 and curcent[1] >= lineypos2 and carids[
# currentcarsindex[i]] not in caridscrossed:
elif (not check(oldcent[0], oldcent[1], eq)) and (check(curcent[0], curcent[1], eq)) and carids[
currentcarsindex[i]] not in caridscrossed:
carscrosseddown += 1
cv2.line(image, (0, lineypos2), (width, lineypos2), (0, 0, 125), 5)
caridscrossed.append(currentcarsindex[i])
# Top left hand corner on-screen text
cv2.rectangle(image, (0, 0), (250, 100), (255, 0, 0), -1) # background rectangle for on-screen text
cv2.putText(image, "Cars in Area: " + str(currentcars), (0, 15), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0), 1)
cv2.putText(image, "Cars Crossed Up: " + str(carscrossedup), (0, 30), cv2.FONT_HERSHEY_SIMPLEX, .5, (0, 170, 0),
1)
cv2.putText(image, "Cars Crossed Down: " + str(carscrosseddown), (0, 45), cv2.FONT_HERSHEY_SIMPLEX, .5,
(0, 170, 0), 1)
framenumber += 1
else:
break
print('Task done.. prepared to terminate')
cap.release()
return
|
[
"noreply@github.com"
] |
avaish1409.noreply@github.com
|
f6a887de38c1172a802ca0e3bb9ff6761cc07b06
|
01372a9d6545747b5f526022e7bf91396bb103bc
|
/RepeatAnalysisTools/sampleReads.py
|
78cee05a631527a668fe7ba0fadea5fd250c1d15
|
[] |
no_license
|
PacificBiosciences/apps-scripts
|
9ebe137bc54e822717b64ac38a9e30cadfa80a22
|
3ab389677ffd6a050bd050586322854691d8592f
|
refs/heads/master
| 2022-05-04T12:52:52.714477 | 2022-04-19T19:02:46 | 2022-04-19T19:02:46 | 81,975,047 | 21 | 12 | null | 2018-07-26T18:01:12 | 2017-02-14T18:15:33 |
Python
|
UTF-8
|
Python
| false | false | 5,217 |
py
|
import pysam,sys
import numpy as np
from collections import Counter
from resources.extract import extractRegion,fqRec,rc
MINCLUSTERSIZE=5
def main(parser):
args = parser.parse_args()
if args.inBAM and args.inFastq:
raise SampleReads_Exception('Only one input, either -b or -q')
if args.inBAM:
bam = pysam.AlignmentFile(args.inBAM,check_sq=False)
names = getReadNamesBam(bam,HP=args.haplotag)
if args.region:
if not args.reference:
raise SampleReads_Exception('Must pass reference for region extraction')
recGen = extractRegion(args.inBAM,
args.reference,
region=args.region,
flanksize=args.flanksize,
revcomp=args.revcomp)
else:
recGen = recIterBam(bam,revcomp=args.revcomp)
elif args.inFastq:
names = getReadNamesFq(args.inFastq)
recGen = recIterFq(args.inFastq,revcomp=args.revcomp)
else:
raise SampleReads_Exception('Must have input, either -b or -q')
if not len(names):
raise SampleReads_Exception('No reads returned')
if len(names)<MINCLUSTERSIZE:
raise SampleReads_Exception('Fewer than %i reads returned' % MINCLUSTERSIZE)
np.random.seed(args.seed)
size = args.nReads if args.nReads else len(names)
selected = Counter(np.random.choice(names,size=size,replace=args.replace))
nrecs = 0
with (open(args.out,'w') if args.out else sys.stdout) as oFile:
for name,seq,qual in recGen:
cname = clipReadName(name)
if cname in selected:
rec = fqRec(cname,seq,qual)
nrecs += 1
for _ in range(selected[cname]):
oFile.write(rec)
if nrecs == size:
break
return None
def recIterBam(bam,revcomp=False):
for rec in bam:
if rec.flag & 0x900:
continue
seq = rec.query_sequence
qual = ''.join([chr(q+33) for q in rec.query_qualities])
if revcomp:
seq = rc(seq)
qual = qual[::-1]
yield rec.query_name,seq,qual
def recIterFq(fastq,revcomp=False):
trs = rc if revcomp else (lambda x:x)
trq = (lambda v:v[::-1]) if revcomp else (lambda x:x)
for rec in pysam.FastxFile(fastq):
yield rec.name,trs(rec.sequence),trq(rec.quality)
def getReadNamesBam(bam,HP=None):
crit = (lambda rec: True) if HP is None else (lambda rec: rec.get_tag('HP')==HP)
try:
return sorted(set(rec.query_name for rec in bam if crit(rec)))
except KeyError:
raise SampleReads_Exception('No HP tag in BAM')
finally:
bam.reset()
def getReadNamesFq(fastq):
return [clipReadName(rec.name) for rec in pysam.FastxFile(fastq)]
def clipReadName(name,nFields=3):
return '/'.join(name.split('/')[:nFields])
class SampleReads_Exception(Exception):
pass
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(prog='sampleBam.py', description='export a random sample of reads in fastq format')
parser.add_argument('-b','--inBAM', dest='inBAM', type=str, default=None,
help='BAM containing reads to sample. Default None')
parser.add_argument('-q','--inFastq', dest='inFastq', type=str, default=None,
help='fastq containing reads to sample. No region or HP filtering, use all reads. Default None')
parser.add_argument('-n','--nReads', dest='nReads', type=int, default=0,
help='Reads to sample. To resample for bootstrapping, use default. Default 0 (all reads)')
parser.add_argument('--reg', dest='region', type=str, default=None,
help='Target region to extract, format \'[chr]:[start]-[stop]\'. Example \'4:3076604-3076660\'. Default None.')
parser.add_argument('--ref', dest='reference', type=str, default=None,
help='Reference fasta used for mapping BAM if extracting region. Must have .fai index. Default None')
parser.add_argument('-f','--flanksize', dest='flanksize', type=int, default=100,
help='Size of flanking sequence mapped for extracting repeat region. Default 100')
parser.add_argument('--rc', dest='revcomp', action='store_true', default=False,
help='Rev-comp extracted region. Default Reference Direction')
parser.add_argument('-H','--haplotag', dest='haplotag', type=int, default=None,
help='Sample from one HP tag value. Default None (all reads)')
parser.add_argument('-s','--seed', dest='seed', type=int, default=17,
help='Random seed. Default 17')
parser.add_argument('-o','--out', dest='out', type=str, default=None,
help='Output file. Default stdout')
parser.add_argument('-r','--replace', dest='replace', action='store_true', default=False,
help='Sample with replacement. default False')
try:
main(parser)
except SampleReads_Exception as e:
print('ERROR: %s' % e)
sys.exit(1)
|
[
"jharting@pacificbiosciences.com"
] |
jharting@pacificbiosciences.com
|
e6d3baf43e45a157c9b75111930070bf6b6888f5
|
e99e6398bbcdc5ee7ecb10ab085d8bd9642be1b9
|
/matlab/python_data_parsing/txt_mention_matlab_singleFile_crawler.py
|
dc705cefe23d8855f39551bd85b0e02bd57fc8cb
|
[
"Apache-2.0"
] |
permissive
|
researchoor/community-evolution-analysis
|
091a2a2010cd5a3375254fe473a60e7586a0d5ff
|
afc25ced825f130c1ffa8817691b12a851f784ab
|
refs/heads/master
| 2023-03-18T15:35:50.563865 | 2016-04-08T17:23:05 | 2016-04-08T17:23:05 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,867 |
py
|
#-------------------------------------------------------------------------------
# Purpose: parsing data straight from the crawler's txt files to a form:
# author1 mentioned1 unixTimestamp\n
# author1 mentioned2,... unixTimestamp\n
# creating a single file without the text content to render the
# matlab functions more efficient.
# Required libs: wxPython GUI toolkit, python-dateutil
# Author: konkonst
#
# Created: 31/05/2013
# Copyright: (c) ITI (CERTH) 2013
# Licence: <apache licence 2.0>
#-------------------------------------------------------------------------------
import glob
import wx
import dateutil.parser
import time
# User selects dataset folder
app = wx.PySimpleApp()
datasetPath = 'E:/konkonst/retriever/crawler_temp/'
dialog = wx.DirDialog(None, "Please select your dataset folder:",defaultPath=datasetPath)
if dialog.ShowModal() == wx.ID_OK:
dataset_path= dialog.GetPath()
dialog.Destroy()
#User selects target folder
targetPath = 'E:/konkonst/retriever/crawler_temp/'
dialog = wx.DirDialog(None, "Please select your target folder:",defaultPath=targetPath)
if dialog.ShowModal() == wx.ID_OK:
target_path= dialog.GetPath()
dialog.Destroy()
###Parsing commences###
my_txt=open(target_path+"/authors_mentions_time.txt","w")
for filename in sorted(glob.glob(dataset_path+"/testnet.txt.*"),reverse=True):
print(filename)
with open(filename,'r') as f:
for line in f:
read_line = line.strip()
splitLine=read_line.split("\t")
dt=dateutil.parser.parse(splitLine[2],fuzzy="True")
mytime=int(time.mktime(dt.timetuple()))
for mentions in splitLine[1].split(","):
my_txt.write(splitLine[0]+"\t"+mentions+"\t"+str(mytime)+"\n") #author singlemention time
my_txt.close()
|
[
"dinos66@yahoo.com"
] |
dinos66@yahoo.com
|
7c8a65f96ec1a7cc32f5f3e36173da0d9484771c
|
86510b47b768d80127adcbd53b06fdff58fd95a4
|
/python/problem_007.py
|
e76b196ef403cf9e93d791f86684f7016143f1c5
|
[] |
no_license
|
Kimbsy/project-euler
|
d018ad759ae599147e11431f818c9bfd3fc82f73
|
e1eda2779b6499a6d33a848eacc5e1c15405bf70
|
refs/heads/master
| 2021-08-27T16:22:19.167892 | 2021-08-16T17:09:08 | 2021-08-16T17:09:08 | 50,948,043 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 432 |
py
|
import math
"""By listing the first six prime numbers: 2, 3, 5, 7, 11, and 13, we can see
that the 6th prime is 13.
What is the 10 001st prime number?
"""
def is_prime(num):
for i in range(2, int(math.sqrt(num)) + 1):
if num % i is 0:
return False
return True
num = 2
count = 1
max_count = 10001
while count < max_count:
num = num + 1
if is_prime(num):
count = count + 1
print(num)
|
[
"lordkimber@gmail.com"
] |
lordkimber@gmail.com
|
b5f3d7ecb4bf3d04b8fa9c694304947ef979480b
|
b7e726b03ed9607380185c5ecf476f725a70beae
|
/廖雪峰教程代码笔记/网络编程/tcpHttpClient.py
|
b68e771f90e393852c35a95f37a2f612456cb6f7
|
[] |
no_license
|
3101010/mycode
|
7c322463e92577a22a90973a18eed0cb114781ad
|
1b9451b5d1147c37908606a25771188742284b4f
|
refs/heads/master
| 2021-06-18T16:50:11.071725 | 2021-01-27T09:04:36 | 2021-01-27T09:04:36 | 147,353,514 | 0 | 0 | null | 2018-09-04T13:48:50 | 2018-09-04T13:48:50 | null |
UTF-8
|
Python
| false | false | 509 |
py
|
import socket
target_host = "wiki.viewcn.cn"
target_port = 80
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((target_host, target_port))
s.send(b'GET / HTTP/1.1\r\nHost: wiki.viewcn.cn\r\nConnection: Close\r\n\r\n')
#res = s.recv(4096)
buffer = []
while True:
d = s.recv(1024)
if d:
buffer.append(d)
else:
break
data = b''.join(buffer)
s.close()
print(data)
header, html = data.split(b'\r\n\r\n',1)
with open('viewcn.html', 'wb') as f:
f.write(html)
|
[
"463266963@qq.com"
] |
463266963@qq.com
|
7db42526f129711e2a485c24693f03700306dda0
|
643bd1f87e132ac0327b74cbcc1f3c44c0949c5b
|
/source/profiles/migrations/0001_initial.py
|
b646cd056ade0e8d30769178a086436baf21477b
|
[] |
no_license
|
nsu-loop/nsu-loop-application
|
5f6320c273635c495b0d616e7dd71616aeb3d444
|
6739b25634d6558098c16e7cb65ab92f19091786
|
refs/heads/master
| 2023-05-01T20:43:48.124817 | 2021-05-09T09:10:10 | 2021-05-09T09:10:10 | 348,506,380 | 0 | 0 | null | 2021-05-08T19:18:13 | 2021-03-16T22:12:01 |
Python
|
UTF-8
|
Python
| false | false | 1,863 |
py
|
# Generated by Django 3.2.1 on 2021-05-05 07:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(blank=True, max_length=200)),
('last_name', models.CharField(blank=True, max_length=200)),
('bio', models.TextField(default='no bio...', max_length=300)),
('email', models.EmailField(blank=True, max_length=200)),
('phone', models.CharField(blank=True, max_length=100, null=True)),
('major', models.CharField(blank=True, max_length=200, null=True)),
('cgpa', models.FloatField(blank=True, null=True)),
('grad_year', models.IntegerField(blank=True, null=True)),
('country', models.CharField(blank=True, max_length=200)),
('avatar', models.ImageField(default='avatar.png', upload_to='avatars/')),
('skills', models.JSONField(blank=True, null=True)),
('slug', models.SlugField(blank=True, unique=True)),
('updated', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
('friends', models.ManyToManyField(blank=True, related_name='friends', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"fatemarai.05@gmail.com"
] |
fatemarai.05@gmail.com
|
3c02a6977dc9c8e271efadbdfcd51e8293c8eb3f
|
9ea4a493abc954220b57e291a589cc42fbec92a4
|
/dolittle-apps/blocks/sources/timestamp_seconds_source.py
|
b8925b3bdbe22d0dea18f9b207258de1fe40ae17
|
[] |
no_license
|
lab11/dolittle
|
7327151dc9ed98b587de06a6087333859f64e795
|
f88efb1e2f9c4f50a0b09e1056be92304a6a3513
|
refs/heads/master
| 2021-03-12T23:54:39.439343 | 2015-07-15T05:50:01 | 2015-07-15T05:50:01 | 31,397,519 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 670 |
py
|
from pyblocks.source import PollingSource
import time
class TimestampSecondsSource(PollingSource):
def __init__(self):
super(TimestampSecondsSource, self).__init__()
self.start_polling()
def poll(self):
one_second = 1
timestamp = int(time.time())
msg = {"type": "timestamp_seconds", "timestamp": timestamp}
self.send(msg)
return one_second
if __name__ == "__main__":
block = TimestampSecondsSource()
block.client.loop_forever()
"""
from dolittle pkg root:
python -m src.lib.sources.timestamp_seconds_source -name 'Timestamp in seconds' -out time/timestamps/seconds
"""
|
[
"meghan.leah.clark@gmail.com"
] |
meghan.leah.clark@gmail.com
|
149aed53ef04fe6d76ede1ef8340dd96c015d78c
|
e9cdf644f02c5f90e5af4ebcdfbd49e5739b379e
|
/lists/urls.py
|
fe220099563a5549be4bb60720a1f07720c68cff
|
[] |
no_license
|
jaeyholic/airbnb-clone
|
14e661012f3650a7c1486e43bbcb314eb0ac1ba1
|
68c1815e2b62bbf70dfe5a4a580d970015eaccc2
|
refs/heads/master
| 2022-12-13T08:11:04.759907 | 2020-02-12T02:22:28 | 2020-02-12T02:22:28 | 236,361,148 | 0 | 0 | null | 2022-12-10T17:29:25 | 2020-01-26T19:04:10 |
Python
|
UTF-8
|
Python
| false | false | 143 |
py
|
from django.urls import path
from . import views
app_name = "trips"
urlpatterns = [
path("", views.ListView.as_view(), name="trips"),
]
|
[
"gabsco208309@hotmail.com"
] |
gabsco208309@hotmail.com
|
af339e6e8cfe428ea337e68bf6d8e1158b929ef8
|
907133d5859dad3c8e0ab40d127f225f8b126625
|
/WebscrapingUsingJson.py
|
77a7f9ef5ae413745caac1b8ec815c268334087c
|
[] |
no_license
|
jatinverma12/web-scraping
|
8cde778892f4c5693df6f91313373d105bd62571
|
9804f898e6d6b1750120fa99e1781d4a3446df55
|
refs/heads/master
| 2020-07-05T15:06:32.508737 | 2019-10-19T03:25:09 | 2019-10-19T03:25:09 | 202,680,712 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,004 |
py
|
a=dict()
a={
'images':[],
'News':[],
'Important Points':'',
'Blog':[]
}
import requests
import json
from bs4 import BeautifulSoup as bs
url='https://iiitd.ac.in/'
data=requests.get(url)
soup=bs(data.text,'html.parser')
for i in soup.find_all('img'):
if i.get('src')[0:5]!='https':
a['images'].append(url+i.get('src'))
a['images'].append(i.get('src'))
#print(a['images'])
a['News']
news=requests.get(url+'research/rsnews')
news1=bs(news.text,'html.parser')
#print(news1.prettify())
for i in news1.find_all('p'):
a['News'].append(i.getText())
#print(a['News'])
link=soup.select('div#block-block-8.block-block')
for i in link:
a['Important Points']=i.getText()
#print(a['Important Points'])
post=requests.get("https://blog.iiitd.ac.in/")
blog=bs(post.text,'html.parser')
for i in blog.find_all('div',class_='post-container'):
a['Blog'].append(i.a.get('href'))
#print(a['Blog'])
with open('webscraping.json','w') as f:
json.dump(a,f,indent=2)
|
[
"noreply@github.com"
] |
jatinverma12.noreply@github.com
|
941e87b25810cb421d89087a72ae996e2536e894
|
e03c6b1f7b7bf895a24774d92d172ba487df1043
|
/Coco/Otros/python advance/clustering-jararquico.py
|
f1b93c982a7e7cb0f95c3387a2aec6e8425a8278
|
[] |
no_license
|
CocoUrbina/Python
|
a3cec6b7d5f3387c5c9d72bf6ddafd7eb5994eb3
|
f5ad88715eb06cc56911aafb42a00e609699b88f
|
refs/heads/main
| 2023-03-05T22:19:52.446674 | 2021-02-18T02:36:44 | 2021-02-18T02:36:44 | 329,175,156 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,363 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 15 14:08:02 2018
@author: oldemarrodriguez
"""
# Se debe instalar el paquete "mglearn" con -> pip install mglearn
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from math import pi
# Import the dendrogram function and the ward clustering function from SciPy
from scipy.cluster.hierarchy import dendrogram, ward, linkage, fcluster
from scipy.spatial.distance import pdist
# Ejemplo 1. Datos de Estudiantes
os.chdir("/Users/oldemarrodriguez/Google Drive/MDCurso/Datos")
print(os.getcwd())
datos = pd.read_csv('EjemploEstudiantes.csv',delimiter=';',decimal=",",index_col=0)
print(datos)
datos.head()
datos.shape
# Agregación de:
# Declara instancias de la clase ward
ward_res = ward(datos)
# Plotea el dendograma
plt.figure(figsize=(13,10))
dendrogram(ward_res,labels= datos.index.tolist())
# Agrega cortes con 2 y 3 clústeres
ax = plt.gca()
limites = ax.get_xbound()
ax.plot(limites, [7.25, 7.25], '--', c='k')
ax.plot(limites, [4, 4], '--', c='k')
ax.text(limites[1], 7.25, ' dos clústeres', va='center', fontdict={'size': 15})
ax.text(limites[1], 4, ' tres clústeres', va='center', fontdict={'size': 15})
plt.xlabel("Orden en el eje X")
plt.ylabel("Distancia o Agregación")
# Interpretación
def centroide(num_cluster, datos, clusters):
ind = clusters == num_cluster
return(pd.DataFrame(datos[ind].mean()).T)
grupos = fcluster(linkage(pdist(datos), method='ward'),3,criterion='maxclust')
centros = np.array(pd.concat([centroide(1,datos,grupos),centroide(2,datos,grupos),centroide(3,datos,grupos)]))
# RADAR PLOT para interpretar
# ===========================
centros_trans = centros.T
centros_trans
df = pd.DataFrame()
for i in range(datos.shape[1]):
df = pd.concat([df,pd.DataFrame({datos.columns[i]:centros_trans[i,:].tolist()})],axis = 1)
df = pd.concat([df,pd.DataFrame({'grupo': ['Cluster-1','Cluster-2','Cluster-3']})],axis = 1)
df
# Variables y Número de Variables
variables=list(df)[0:5]
variables
N = len(variables)
N
# Ángulo de los ejes
angulos = [n / float(N) * 2 * pi for n in range(N)]
angulos+= angulos[:1]
# Inicializa el Radar
ax = plt.subplot(111, polar=True)
# En primer eje en la parte de arriba
ax.set_theta_offset(pi / 2)
ax.set_theta_direction(-1)
# Dibuja los ejes por variables + las etiquetas
plt.xticks(angulos[:-1], variables)
# Dibuja las etiquetas en Y
ax.set_rlabel_position(0)
plt.yticks([1,2,3,4,5,6,7,8,9,10], ["1","2","3","4","5","6","7","8","9","10"], color="grey", size=7)
plt.ylim(0,10)
# Plotea cada cluster (grupo) = una línea de datos
# Cluster 1
valores=df.loc[0].drop('grupo').values.flatten().tolist()
valores += valores[:1]
ax.plot(angulos, valores, linewidth=1, linestyle='solid', label="Cluster-1")
ax.fill(angulos, valores, 'b', alpha=0.1)
# Cluster 2
valores=df.loc[1].drop('grupo').values.flatten().tolist()
valores += valores[:1]
ax.plot(angulos, valores, linewidth=1, linestyle='solid', label="Cluster-2")
ax.fill(angulos, valores, 'r', alpha=0.1)
# Cluster 3
valores=df.loc[2].drop('grupo').values.flatten().tolist()
valores += valores[:1]
ax.plot(angulos, valores, linewidth=1, linestyle='solid', label="Cluster-3")
ax.fill(angulos, valores, 'b', alpha=0.1)
# Agrega la leyenda
plt.legend(loc='upper right', bbox_to_anchor=(0.1, 0.1))
|
[
"Pmontenegro@coopenae.fi.cr"
] |
Pmontenegro@coopenae.fi.cr
|
baf65b2761f55fdc4212eb85a9a1eed2b72cbecc
|
1e4203cd6a1d015300d447553c4bcddf40adfcdd
|
/bin/pilfile.py
|
705b41c2cc9c50c78989351a392b36ccf6a1d0ff
|
[] |
no_license
|
timpandrews/test3
|
65ac973cccc223052fec5b4c5bf835eefbb128f2
|
6073f0e728b53520309864431581523a3cc39746
|
refs/heads/master
| 2020-07-03T16:42:10.522550 | 2016-11-20T01:23:16 | 2016-11-20T01:23:16 | 74,245,316 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,703 |
py
|
#!/Users/timandrews/Desktop/apps/trydjango19/bin/python
#
# The Python Imaging Library.
# $Id$
#
# a utility to identify image files
#
# this script identifies image files, extracting size and
# pixel mode information for known file formats. Note that
# you don't need the PIL C extension to use this module.
#
# History:
# 0.0 1995-09-01 fl Created
# 0.1 1996-05-18 fl Modified options, added debugging mode
# 0.2 1996-12-29 fl Added verify mode
# 0.3 1999-06-05 fl Don't mess up on class exceptions (1.5.2 and later)
# 0.4 2003-09-30 fl Expand wildcards on Windows; robustness tweaks
#
from __future__ import print_function
import getopt
import glob
import logging
import sys
from PIL import Image
if len(sys.argv) == 1:
print("PIL File 0.4/2003-09-30 -- identify image files")
print("Usage: pilfile [option] files...")
print("Options:")
print(" -f list supported file formats")
print(" -i show associated info and tile data")
print(" -v verify file headers")
print(" -q quiet, don't warn for unidentified/missing/broken files")
sys.exit(1)
try:
opt, args = getopt.getopt(sys.argv[1:], "fqivD")
except getopt.error as v:
print(v)
sys.exit(1)
verbose = quiet = verify = 0
logging_level = "WARNING"
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats:")
for i in id:
print(i, end=' ')
sys.exit(1)
elif o == "-i":
verbose = 1
elif o == "-q":
quiet = 1
elif o == "-v":
verify = 1
elif o == "-D":
logging_level = "DEBUG"
logging.basicConfig(level=logging_level)
def globfix(files):
# expand wildcards where necessary
if sys.platform == "win32":
out = []
for file in files:
if glob.has_magic(file):
out.extend(glob.glob(file))
else:
out.append(file)
return out
return files
for file in globfix(args):
try:
im = Image.open(file)
print("%s:" % file, im.format, "%dx%d" % im.size, im.mode, end=' ')
if verbose:
print(im.info, im.tile, end=' ')
print()
if verify:
try:
im.verify()
except:
if not quiet:
print("failed to verify image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
except IOError as v:
if not quiet:
print(file, "failed:", v)
except:
import traceback
if not quiet:
print(file, "failed:", "unexpected error")
traceback.print_exc(file=sys.stdout)
|
[
"tim.andrews@imagineifinc.com"
] |
tim.andrews@imagineifinc.com
|
abdaed23fc7fefa47495aa05bbadf19e44d67629
|
4c53a637c2559cae808bcba8e97c51e8717e527f
|
/replay_memory.py
|
fcc2bd74e02283c3b5b932847805c61ca5e31b3d
|
[] |
no_license
|
jswon/envRobot
|
bf07dceec5f820a122ca18ad6768a07311147f17
|
a156c24ac056770b32cd67ca185cf922b94c1200
|
refs/heads/master
| 2020-12-03T09:29:56.981880 | 2017-10-26T08:32:27 | 2017-10-26T08:32:27 | 95,625,606 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,079 |
py
|
import collections
import event_log
import numpy as np
import sys
import tensorflow as tf
import time
import util
#Batch = collections.namedtuple("Batch", "state_1 action reward terminal_mask state_2 internal_state target_obj_hot")
Batch = collections.namedtuple("Batch", "state_1 action reward terminal_mask state_2")
class ReplayMemory(object):
def __init__(self, buffer_size, state_shape, action_dim, opts, load_factor=1.5):
assert load_factor >= 1.5, "load_factor has to be at least 1.5"
self.buffer_size = buffer_size
self.state_shape = state_shape
self.insert = 0
self.full = False
# the elements of the replay memory. each event represents a row in the following
# five matrices.
self.state_1_idx = np.empty(buffer_size, dtype=np.int32)
self.action = np.empty((buffer_size, action_dim), dtype=np.float32)
self.reward = np.empty((buffer_size, 1), dtype=np.float32)
self.terminal_mask = np.empty((buffer_size, 1), dtype=np.float32)
self.state_2_idx = np.empty(buffer_size, dtype=np.int32)
if opts.use_full_internal_state:
internal_state_dim = opts.internal_state_dim
else:
internal_state_dim = 9
# states themselves, since they can either be state_1 or state_2 in an event
# are stored in a separate matrix. it is sized fractionally larger than the replay
# memory since a rollout of length n contains n+1 states.
self.state_buffer_size = int(buffer_size*load_factor)
if opts.use_raw_pixels :
shape = [self.state_buffer_size] + list(state_shape)
else :
shape = [self.state_buffer_size] + [12]
#shape = [self.state_buffer_size]+list((3,2,7))
self.state = np.empty(shape, dtype=np.float16)
# keep track of free slots in state buffer
self.state_free_slots = list(range(self.state_buffer_size))
# some stats
self.stats = collections.Counter()
def reset_from_event_log(self, log_file):
elr = event_log.EventLogReader(log_file)
num_episodes = 0
num_events = 0
start = time.time()
for episode in elr.entries():
initial_state = None
action_reward_state_sequence = []
for event_id, event in enumerate(episode.event):
if event_id == 0:
assert len(event.action) == 0
assert not event.HasField("reward")
initial_state = event_log.read_state_from_event(event)
else:
action_reward_state_sequence.append((event.action, event.reward,
event_log.read_state_from_event(event)))
num_events += 1
num_episodes += 1
self.add_episode(initial_state, action_reward_state_sequence)
if self.full:
break
print(sys.stderr, "reset_from_event_log \"%s\" num_episodes=%d num_events=%d took %s sec" % (log_file, num_episodes, num_events, time.time()-start))
def add_episode(self, initial_state, action_reward_state_sequence):
self.stats['>add_episode'] += 1
assert len(action_reward_state_sequence) > 0
state_1_idx = self.state_free_slots.pop(0)
self.state[state_1_idx] = initial_state
for n, (action, reward, state_2) in enumerate(action_reward_state_sequence):
terminal = n == len(action_reward_state_sequence)-1
state_2_idx = self._add(state_1_idx, action, reward, terminal, state_2)
state_1_idx = state_2_idx
def _add(self, s1_idx, a, r, t, s2):
# print(">add s1_idx=%s, a=%s, r=%s, t=%s" % (s1_idx, a, r, t)
self.stats['>add'] += 1
assert s1_idx >= 0, s1_idx
assert s1_idx < self.state_buffer_size, s1_idx
assert s1_idx not in self.state_free_slots, s1_idx
if self.full:
# are are about to overwrite an existing entry.
# we always free the state_1 slot we are about to clobber...
self.state_free_slots.append(self.state_1_idx[self.insert])
# print("full; so free slot", self.state_1_idx[self.insert]
# and we free the state_2 slot also if the slot is a terminal event
# (since that implies no other event uses this state_2 as a state_1)
# self.stats['cache_evicted_s1'] += 1
if self.terminal_mask[self.insert] == 0:
self.state_free_slots.append(self.state_2_idx[self.insert])
# print("also, since terminal, free", self.state_2_idx[self.insert]
self.stats['cache_evicted_s2'] += 1
# add s1, a, r
self.state_1_idx[self.insert] = s1_idx
self.action[self.insert] = a
self.reward[self.insert] = r
# if terminal we set terminal mask to 0.0 representing the masking of the righthand
# side of the bellman equation
self.terminal_mask[self.insert] = 0.0 if t else 1.0
# state_2 is fully provided so we need to prepare a new slot for it
s2_idx = self.state_free_slots.pop(0)
self.state_2_idx[self.insert] = s2_idx
self.state[s2_idx] = s2
# move insert ptr forward
self.insert += 1
if self.insert >= self.buffer_size:
self.insert = 0
self.full = True
# print("<add s1_idx=%s, a=%s, r=%s, t=%s s2_idx=%s (free %s)" \
# % (s1_idx, a, r, t, s2_idx,
# util.collapsed_successive_ranges(self.state_free_slots))
return s2_idx
def size(self):
return self.buffer_size if self.full else self.insert
def random_indexes(self, n=1):
if self.full:
return np.random.randint(0, self.buffer_size, n)
elif self.insert == 0: # empty
return []
else:
return np.random.randint(0, self.insert, n)
def batch(self, batch_size=None):
self.stats['>batch'] += 1
idxs = self.random_indexes(batch_size)
return Batch(np.copy(self.state[self.state_1_idx[idxs]]),
np.copy(self.action[idxs]),
np.copy(self.reward[idxs]),
np.copy(self.terminal_mask[idxs]),
np.copy(self.state[self.state_2_idx[idxs]]))
def dump(self):
print(">>>> dump")
print("insert", self.insert)
print("full?", self.full)
print("state free slots", util.collapsed_successive_ranges(self.state_free_slots))
if self.insert==0 and not self.full:
print("EMPTY!")
else:
idxs = range(self.buffer_size if self.full else self.insert)
for idx in idxs:
print("idx", idx,)
# print("state_1_idx", self.state_1_idx[idx],)
# print("state_1", self.state[self.state_1_idx[idx]])
# print("action", self.action[idx],)
# print("reward", self.reward[idx],)
# print("terminal_mask", self.terminal_mask[idx],)
# print("state_2_idx", self.state_2_idx[idx])
# print("state_2", self.state[self.state_2_idx[idx]])
print("<<<< dump")
def current_stats(self):
current_stats = dict(self.stats)
current_stats["free_slots"] = len(self.state_free_slots)
return current_stats
if __name__ == "__main__":
# LATE NIGHT SUPER HACK SOAK TEST. I WILL PAY FOR THIS HACK LATER !!!!
rm = ReplayMemory(buffer_size=43, state_shape=(2,3), action_dim=2)
def s(i): # state for insert i
i = (i * 10) % 199
return [[i+1,0,0],[0,0,0]]
def ars(i): # action, reward, state_2 for insert i
return ((i,0), i, s(i))
def FAILDOG(b, i, d): # dump batch and rm in case of assertion
print("FAILDOG", i, d)
print(b)
rm.dump()
assert False
def check_batch_valid(b): # check batch is valid by consistency of how we build elements
for i in range(3):
r = int(b.reward[i][0])
if b.state_1[i][0][0] != (((r-1)*10)%199)+1: FAILDOG(b, i, "s1")
if b.action[i][0] != r: FAILDOG(b, i, "r")
if b.terminal_mask[i] != (0 if r in terminals else 1): FAILDOG(b, i, "r")
if b.state_2[i][0][0] != ((r*10)%199)+1: FAILDOG(b, i, "s2")
terminals = set()
i = 0
import random
while True:
initial_state = s(i)
action_reward_state_sequence = []
episode_len = int(3 + (random.random() * 5))
for _ in range(episode_len):
i += 1
action_reward_state_sequence.append(ars(i))
rm.add_episode(initial_state, action_reward_state_sequence)
terminals.add(i)
print(rm.stats)
for _ in range(7): check_batch_valid(rm.batch(13))
i += 1
|
[
"mn2888@naver.com"
] |
mn2888@naver.com
|
06556d487799ba9f6f307e576a5be1076b0b4ca9
|
b9dee9aa256cf2e01dc4e82d2b7e7a9e47ded535
|
/Scrapy/查询参数.py
|
ad00fba30eedde8d2e3180d570c43234fc624004
|
[] |
no_license
|
dragon7577/pachong
|
a18bf5c91defe7389afd64640982c86bac1133b8
|
21b861a1ab7a966d1f558057522efe85bbcf547e
|
refs/heads/master
| 2020-04-24T01:22:15.433938 | 2019-02-20T04:36:11 | 2019-02-20T04:36:11 | 171,596,334 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 399 |
py
|
import requests
baseurl = 'https://sz.lianjia.com/ershoufang/s?'
headers = {'User-Agent':"Mozilla/5.0"}
key = input("搜索内容:")
pn = input("输入页数:")
pn = (int(pn) - 1)*10
#wd = key&pn=10
params = {
'wd':key,
'pn':pn,
}
#无需拼接URL,也不用URL编码
res = requests.get(baseurl,params=params,headers=headers)
res.encoding='utf-8'
html = res.text
print(html)
|
[
"sz_huangdong@163.com"
] |
sz_huangdong@163.com
|
5627439b650a6e9da7032486158133713462bcc0
|
d4ac6dd00ee5d46f6bc3cc3c48b88059cbfd5a91
|
/situs/jualtiket/urls.py
|
a0514fa0ae4b54ad65a67fb103fc36fa60680b86
|
[
"MIT"
] |
permissive
|
torikwer/django-penj-tiket
|
26b3e0fdd91ac8ef00dc0d44c9641e9750b7ac9f
|
846ef20d74700f798922f2364df7736b5e41bb82
|
refs/heads/master
| 2020-03-18T04:08:25.788282 | 2018-05-21T13:25:09 | 2018-05-21T13:25:09 | 134,271,700 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 143 |
py
|
from django.urls import path
from . import views
app_name = 'jualtiket'
urlpatterns = [
path('', views.index, name='index'),
]
|
[
"39488116+torikwer@users.noreply.github.com"
] |
39488116+torikwer@users.noreply.github.com
|
c128385b1deff90bf9d4409fc051233c3c3d3a6b
|
51690aef9866cad3d2f8272d44778e29d8dfe101
|
/ingreso.py
|
50477829a0327421ec94c044ca00500487e8407c
|
[] |
no_license
|
mramosv/test-repositorio
|
f1eceaadcf8b787e4195a12295e28dfc99bebc95
|
585792f13ca4ac207e71c2bc78be1a1a28dac358
|
refs/heads/master
| 2023-03-19T14:34:14.772058 | 2021-03-16T19:21:45 | 2021-03-16T19:21:45 | 348,466,911 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 32 |
py
|
a='hola'
print(a)
print(type(a))
|
[
"transistorbipe@gmail.com"
] |
transistorbipe@gmail.com
|
39f9e0d4ddc8672e42e0ab21ce311784b8c69f04
|
3337b552cea857aa49540fd7e2118cf82c3f4ac8
|
/kerckhoff/userprofiles/serializers.py
|
3184c4c0b2971d3ccd40d7d2e2f1ded3131ffa3b
|
[] |
no_license
|
dailybruin/kerckhoff-server
|
176864117067b4ff1c9bbf0239d06572b6dbf5fe
|
21f30e4e0d79843bb67ec244e00e9c27b46f7a7a
|
refs/heads/master
| 2023-04-05T13:56:26.488757 | 2022-11-02T03:58:59 | 2022-11-02T03:58:59 | 168,264,207 | 3 | 0 | null | 2022-11-02T03:59:00 | 2019-01-30T02:17:32 |
Python
|
UTF-8
|
Python
| false | false | 390 |
py
|
from rest_framework import serializers
from .models import UserProfile
class UserProfileSerializer(serializers.ModelSerializer):
class Meta:
model = UserProfile
fields = (
"user",
"title",
"profile_img",
"description",
"linkedin_url",
"github_url",
)
read_only_fields = ("user",)
|
[
"rkhajuria99@gmail.com"
] |
rkhajuria99@gmail.com
|
a227644154cecd4b3a8035f581856edb34d41638
|
7e002be6a1ba145984fe3f827c7a4cda8a1f11eb
|
/swea/d1/2071.py
|
3998fc327eb62f99c3a5121853e262d7a7a15a1d
|
[] |
no_license
|
ok2qw66/TIL
|
ed17712de266291e93b37c5bc700b0fe222656c3
|
f0e623f0ff231edec1088a96d87d26644c7cfb0b
|
refs/heads/master
| 2023-06-22T20:23:21.311599 | 2021-07-22T07:08:24 | 2021-07-22T07:08:24 | 290,894,578 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 226 |
py
|
T = int(input())
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
for test_case in range(1, T + 1):
total = sum(list(map(int,input().split())))
print(f'#{test_case} {round(total/10)}')
|
[
"ok2qw66@gmail.com"
] |
ok2qw66@gmail.com
|
3b2c8a39c47fb25067878ad79635a6e28b0ae266
|
f6e03bc2747e8d1ca686b25e7f34a429886ba3f3
|
/machinelearning/cbct/build_cbct_learner.py
|
073853c6c8166f3e77a0a6c519c82cc5d099f2dc
|
[
"MIT"
] |
permissive
|
randlet/pylinac
|
2b3913d7d549b985a074ddcf291d018cfb1dd5d2
|
df5dd913f429536180d998012b4f5cef8d443f88
|
refs/heads/master
| 2021-06-11T08:50:36.472577 | 2019-06-03T14:23:17 | 2019-06-03T14:23:17 | 151,657,740 | 1 | 0 |
MIT
| 2018-10-05T01:40:17 | 2018-10-05T01:40:16 | null |
UTF-8
|
Python
| false | false | 276 |
py
|
import os.path as osp
from machinelearning.tools import train
path = osp.join(osp.dirname(__file__), 'data', 'CatPhan 600')
parameters = {
'kernel': ['linear'],
'C': [1, 0.1, 5, 10, 50],
}
train(path, train_size=0.95, parameters=parameters, clf_name='catphan600')
|
[
"jkerns100@gmail.com"
] |
jkerns100@gmail.com
|
3a5949afe7182ebdf3349ed8afc3b0e5c4ad6a12
|
5a6c9636af5d0cb93f31bdaff1e77da91d3cf84e
|
/src/youtube.py
|
3051d2cb11bb0e95d4886ff1d089a523e9986b9b
|
[] |
no_license
|
wanggonging/carrot
|
da75ace12696b29f588a57182c7da8f4886c3cf4
|
984821322006c2c3c7a5b7918fa3e0cc40839de8
|
refs/heads/master
| 2020-04-04T22:05:16.591796 | 2018-11-17T05:12:20 | 2018-11-17T05:12:20 | 156,309,927 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,722 |
py
|
from bs4 import BeautifulSoup
import json
import os
import pprint
import time
import urllib2
import unittest
def refresh_channel_index_with_html(index, html, channel_max):
soup = BeautifulSoup(html, "lxml")
now = int(time.time())
i = 0
for h3 in soup.find_all("h3", class_="yt-lockup-title"):
href=unicode(h3.a['href'])
if (len(href) == 20) and (href[0:9] == "/watch?v="):
key=href[9:20]
if not key in index:
title=h3.a.get_text()
published=now-i
index[key] = {'key':key, 'published':published, 'title':title}
i = i+1
if i >= channel_max:
break
def refresh_channel_index_with_apikey(index, apikey, channel_id, channel_max, channel_current_count):
token=''
now = int(time.time())
i = 0
while i < channel_max:
url = 'https://www.googleapis.com/youtube/v3/search?key='+apikey+'&channelId='+channel_id+'&part=snippet,id&order=date&maxResults=50&pageToken='+token
items = json.load(urllib2.urlopen(url))
token=items.pop('nextPageToken', None)
total=items['pageInfo']['totalResults']
for item in items['items']:
if 'videoId' in item['id']:
key = item['id']['videoId']
if not key in index:
title = item['snippet']['title']
publishedAt = item['snippet']['publishedAt']
published=now-i
index[key] = {'key':key, 'published':published, 'publishedAt':publishedAt, 'title':title}
i = i+1
if i>= channel_max:
break
# Load only the first page if this channel has enough items
if channel_current_count >= total or channel_current_count >= channel_max:
break
# Last page?
if token == None:
break
def refresh_channel_index(index, channel_id, channel_max):
url = "https://www.youtube.com/channel/"+channel_id
html = urllib2.urlopen(url).read()
refresh_channel_index_with_html(index, html, channel_max)
class TestYoutube(unittest.TestCase):
def test_refresh_channel_index(self):
if os.path.exists("html.tmp"):
print("Reusing cached html file ...")
with open("html.tmp", "rb") as f:
html = f.read()
else:
url = "https://www.youtube.com/channel/UCZ0oKRSK284apF3AxAw0ahg" # Xulin's channel
print("Downloading " + url + "...")
html = urllib2.urlopen(url).read()
# Uncomment these if you need to fix the code, but don't want to download the url every time
#tmp_file = open("html.tmp","w")
#tmp_file.write(html)
#tmp_file.close()
#print("Saved to cache file")
index = {}
refresh_channel_index_with_html(index, html, 1)
pprint.pprint(index)
assert len(index) == 1
assert index['_vZnN0EaRps'] != None
def test_refresh_channel_index_with_apikey(self):
apikey='AIzaSyCyaIc6wpatDoeuPVsET_2_-yh5arU27NA'
channel_id='UCa6ERCDt3GzkvLye32ar89w'
index={}
refresh_channel_index_with_apikey(index, apikey, channel_id, 99999, 99999)
assert(len(index)==50)
index={}
refresh_channel_index_with_apikey(index, apikey, channel_id, 99999, 10)
i = 0
for item in sorted(index.values(), key=lambda x: x['published'], reverse=True):
print(str(i) + ': '+item['key']+' '+item['title'])
i+=1
assert(len(index)>100)
assert('Subpk2MwYKk' in index)
if __name__ == "__main__":
unittest.main()
|
[
"wanggonging@gmail.com"
] |
wanggonging@gmail.com
|
a05d271bf8219c189d34db4e5ed42131762c5c7b
|
29585537e2e96c169ae83cd660070ba3af0a43a9
|
/tests/market/migrations/0007_generalmanager_headshot.py
|
574c287c3d589885c5abeb61a95e9675418a9419
|
[
"Apache-2.0"
] |
permissive
|
ballke-dev/django-admin-confirm
|
4c400e0d6cb3799e7d9901731db99b4a579ec06e
|
21f5a37c5ecf1fee30f95d8a2ce01207916a22f8
|
refs/heads/main
| 2023-06-23T03:54:50.326670 | 2021-07-22T17:04:13 | 2021-07-22T17:04:13 | 386,659,834 | 0 | 0 |
NOASSERTION
| 2021-07-16T14:11:48 | 2021-07-16T14:11:47 | null |
UTF-8
|
Python
| false | false | 462 |
py
|
# Generated by Django 3.1.6 on 2021-02-24 01:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("market", "0006_auto_20210222_0312"),
]
operations = [
migrations.AddField(
model_name="generalmanager",
name="headshot",
field=models.ImageField(
blank=True, null=True, upload_to="tmp/gm/headshots"
),
),
]
|
[
"noreply@github.com"
] |
ballke-dev.noreply@github.com
|
81406ddda68974435969e35b5983d1e811dcc6d2
|
27f070b80170c6bf42c609300091c75f5c55dccf
|
/EE183DB/DataGathering/Gather.py
|
99f9a9fc738152ad70ee0399413e5c62890d6b36
|
[] |
no_license
|
MishkaMN/Cornhuskers
|
11344d4a30038ab16057e8e6612971ba94b70597
|
9c3b6d1d311add53fb6fd43e8f74d72f701db913
|
refs/heads/master
| 2020-06-16T04:46:58.140678 | 2019-05-28T22:36:05 | 2019-05-28T22:36:05 | 195,482,990 | 0 | 0 | null | 2019-07-06T01:05:19 | 2019-07-06T01:05:19 | null |
UTF-8
|
Python
| false | false | 9,048 |
py
|
from ws4py.client.threadedclient import WebSocketClient
import time, requests
import numpy as np
import math
import numpy as np
import cv2
import cv2.aruco as aruco
import time
import datetime
import csv
current_milli_time = lambda: int(round(time.time() * 1000))
#esp8266host = "ws://192.168.0.104:81/"
esp8266host = "ws://192.168.50.133:81/"
command = ""
class DummyClient(WebSocketClient):
def __init__(self, host):
super(DummyClient, self).__init__(host)
#self.est_state = np.array([0, Kfilter.W/2, Kfilter.L/2]);
#self.P = np.eye(3)
#self.z_init = np.array([0,0,0])
#self.command = np.array([0,0,0])
#self.z_final = np.array([0,0,0])
def opened(self):
print("Socket Opened")
def closed(self, code, reason=None):
print("Socket Closed", code, reason);
def received_message(self, msg):
parts = str(msg).split(",")
if(parts[0] == "Last"):
frontSense = (float(parts[1])-62.4)/.937
sideSense = (float(parts[2])-41.7)/.972
theta = float(parts[3])
self.z_final = [theta, frontSense, sideSense]
#print(self.z_final)
""" START FILTERING """
#print("State:")
#print(self.est_state[0]*180.0/math.pi, self.est_state[1], self.est_state[2])
pwmL, pwmR, dt = command.split(" ")
self.est_state, self.P = Kfilter.aPrioriUpdate(self.est_state, float(dt)/1000.0, self.P, float(pwmR), float(pwmL))
#print(self.est_state)
self.est_state, self.P = Kfilter.aPosterioriUpdate(self.P, self.z_final, self.est_state, float(dt)/1000.0)
print("Filtered State")
print(self.est_state[0]*180.0/math.pi, self.est_state[1], self.est_state[2])
#print(self.P)
def dir_to_cmd(command):
tmp = command.split()
dirs = ['f', 'r', 'b', 'l','a', 'w', 's', 'd' ]
if tmp[0] in dirs:
if tmp[0] == 'f' or tmp[0] == 'w':
cmd = '180 0 ' + tmp[1]
elif tmp[0] == 'b' or tmp[0] == 's':
cmd = '0 180 ' + tmp[1]
elif tmp[0] == 'r' or tmp[0] == 'd':
cmd = '180 180 '+ tmp[1]
elif tmp[0] == 'l' or tmp[0] == 'a':
cmd = '0 0 '+ tmp[1]
else:
cmd = command
return cmd
#helper function: find center of aruco tag from corners
def getCenter(corners, i, j):
center = (int((corners[i][j][0][0] + corners[i][j][1][0] + corners[i][j][2][0] + corners[i][j][3][0])/4), int((corners[i][j][0][1] + corners[i][j][1][1] + corners[i][j][2][1] + corners[i][j][3][1])/4))
return center
#helper function: find center and direction vector from aruco tag
def getPose(corners, pMatrix):
center = np.array(getCenter(corners, 4, 0), dtype='float32')
topCenter = np.array( ((corners[4][0][0][0] + corners[4][0][1][0])/2, (corners[4][0][0][1] + corners[4][0][1][1])/2) , dtype='float32')
pts = np.array([np.array([center, topCenter], dtype='float32')])
newPts = cv2.perspectiveTransform(pts, pMatrix)
center = newPts[0][0]
topCenter = newPts[0][1]
vec = topCenter - center
return center,topCenter,vec
if __name__ == '__main__':
data = []
try:
ws = DummyClient(esp8266host)
ws.connect()
cap = cv2.VideoCapture(1)
envLength = 1219
envWidth = 914
l_inputs = [83, 90, 180]
r_inputs = [101, 90, 85]
l_stop, r_stop = [90, 90]
command_times = range(100, 1500 + 100, 100)
commands = []
l_len = len(l_inputs)
r_len = len(r_inputs)
for l_idx in range(0, l_len):
for r_idx in range(0, r_len):
for t in command_times:
commands.append((l_inputs[l_idx], r_inputs[r_idx], t, True))
commands.append((l_stop, r_stop, 500, False))
commands.append((l_inputs[l_len - l_idx - 1], r_inputs[r_len - r_idx - 1], t, False))
commands.append((l_stop, r_stop, 500, False))
print("Starting...")
flag = False
x = envWidth / 2
y = envLength / 2
theta = 0
l_input = r_input = 90
record = False
command_stop_time = 0
start_time = current_milli_time()
trial_num = 0
filename = time.strftime("%Y%m%d_%H%M%S") + '.csv'
with open(filename, 'w') as csvfile:
writer = csv.writer(csvfile)
while(commands):
ret, frame = cap.read()
#detect aruco tags and find corners
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
if (not ids is None) and (len(ids) == 5):
sortedCorners = [x for _,x in sorted(zip(ids,corners))]
#find centers of environment tags
topLeft = getCenter(sortedCorners, 0, 0)
topRight = getCenter(sortedCorners, 1, 0)
bottomLeft = getCenter(sortedCorners, 2, 0)
bottomRight = getCenter(sortedCorners, 3, 0)
#Perspective transform to correct for angle of camera
pts1 = np.float32([topLeft, topRight, bottomLeft, bottomRight])
pts2 = np.float32([[0,0],[envWidth,0],[0,envLength],[envWidth,envLength]])
M = cv2.getPerspectiveTransform(pts1,pts2)
#perform pose estimates
center, topCenter, vec = getPose(sortedCorners,M)
center = (center[0], envLength - center[1])
angle = np.arctan2(-1*vec[1], vec[0])
x,y = center
theta = angle*180/np.pi
print(x, y, theta)
#warp frames
frame = cv2.warpPerspective(frame,M,(envWidth,envLength))
# Identify red obstacles
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([0, 200, 100])
upper_red1 = np.array([10, 255, 255])
mask1 = cv2.inRange(hsv, lower_red1, upper_red1)
lower_red2 = np.array([160, 100, 100])
upper_red2 = np.array([179, 255, 255])
mask2 = cv2.inRange(hsv, lower_red2, upper_red2)
mask = cv2.addWeighted(mask1, 1.0, mask2, 1.0, 0.0);
isolated = cv2.bitwise_and(frame, frame, mask= mask)
#cv2.imshow("mask", isolated)
_, threshold = cv2.threshold(isolated, 80, 255, cv2.THRESH_BINARY)
imgray = cv2.cvtColor(threshold, cv2.COLOR_BGR2GRAY);
contours, hierarchy = cv2.findContours(imgray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# Find the index of the largest contour
areas = np.array([cv2.contourArea(c) for c in contours])
cnts = [contours[i] for i in np.where(areas > 10)[0]]
for cnt in cnts:
bx,by,bw,bh = cv2.boundingRect(cnt)
cv2.rectangle(frame,(bx,by),(bx+bw,by+bh),(0,255,0),2)
print( (bx+bw/2, envLength - (by+bh/2) ))
cv2.line(frame, (int(center[0]), int(envLength - center[1])), (int(topCenter[0]), int(topCenter[1])), (0,255,0), 3)
cv2.imshow('frame',frame)
current_time = (current_milli_time() - start_time) / 1000 # Time in seconds
current_data = np.array([trial_num, current_time, np.array([l_input, r_input]), np.array([x, y, theta])])
#data.append(current_data)
if record:
writer.writerow([trial_num, current_time, l_input, r_input, x, y, theta])
if current_milli_time() >= command_stop_time:
# Get next command
l_input, r_input, duration, record = commands.pop(0)
trial_num += 1
# Drive motors
command = str(l_input) + ' ' + str(r_input)
ws.send(command)
command_stop_time = current_milli_time() + duration
print('Input ' + str(l_input) + ' ' + str(r_input) + ' for ' + str(duration) + ' ms')
if cv2.waitKey(1) & 0xFF == ord('q'):
break
print('All trials complete')
print('Total inputs: ', trial_num + 1)
except KeyboardInterrupt:
ws.send('90 90')
finally:
ws.send('90 90')
ws.close()
cap.release()
cv2.destroyAllWindows()
# filename = time.strftime("%Y%m%d_%H%M%S")
#data = np.array(data)
#np.save(filename, data)
|
[
"brendantanaka@gmail.com"
] |
brendantanaka@gmail.com
|
559f11931879d3e33276860e06f2a57030381949
|
313ada5e6b8474c6d4ded075c88cbea782847974
|
/Network Data/termcharsendrecv.py
|
c427cda04880be47c7c79b02292cbc09d24dc450
|
[] |
no_license
|
rizalbayyu/Pemjar
|
4aa2398138ff69979ec839c2c2aac917b33f7751
|
625822e21900251231a8840f6e7e8704dc58af75
|
refs/heads/master
| 2021-05-22T14:17:35.635596 | 2020-04-08T14:31:45 | 2020-04-08T14:31:45 | 252,959,643 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 596 |
py
|
def sendalldata (conn, message):
# Tambahkan termination character di akhir message
message = message+"\r\n"
# Kirimkan message
conn.send(message.encode('ascii'))
def recvalldata(conn):
data = ""
# Iterasi untuk membaca data
while True:
# Tampung data
buffer = conn.recv(20)
buffer = buffer.decode('ascii')
if "\r\n" in buffer :
# Bersihkan termchar pada buffer
buffer = buffer.replace("\r\n", "")
data = data + buffer
return data
else:
data = data + buffer
|
[
"noreply@github.com"
] |
rizalbayyu.noreply@github.com
|
6a2be014eb9649c77461f9d7117a20e1f10fb3d6
|
0502750293383c6dae2aaf4013717d9c83f52c62
|
/exercism/python/archive/circular-buffer/circular_buffer.py
|
c143a3cdcdf9880436d13286be272c674f6461d5
|
[] |
no_license
|
sebito91/challenges
|
fcfb680e7fc1abfa9fea9cd5f108c42795da4679
|
b4f2d3b7f8b7c78f02b67d67d4bcb7fad2b7e284
|
refs/heads/master
| 2023-07-08T15:43:42.850679 | 2023-06-26T19:38:51 | 2023-06-26T19:38:51 | 117,160,720 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,199 |
py
|
""" Mdolue to implement a circular-buffer """
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
class BufferFullException(Exception):
""" define execption when buffer is full """
def __init__(self, message=None):
if not message:
message = "buffer is full"
super(BufferFullException, self).__init__(message)
class BufferEmptyException(Exception):
""" define exception when buffer is empty """
def __init__(self, message=None):
if not message:
message = "buffer is empty"
super(BufferEmptyException, self).__init__(message)
class CircularBuffer(object):
""" definition of the back CircularBuffer class """
def __init__(self, datasize):
if datasize <= 0:
raise ValueError("improper size for CircularBuffer: {}".format(datasize))
self.buffer = [None] * datasize
self.capacity = datasize
self.current = (0, 0)
def get_elem(self, index):
""" helper function to increment counters """
temp = self.current[0]
if index == 0:
self.current = ((self.current[0] + 1) % (self.capacity), self.current[1])
else:
temp = self.current[1]
self.current = (self.current[0], (self.current[1] + 1) % (self.capacity))
return temp
def read(self):
""" read function as part of CircularBuffer """
if len(self.buffer) < 1 or all(each is None for each in self.buffer):
raise BufferEmptyException("tried reading from empty buffer")
idx = self.get_elem(0)
data = self.buffer[idx]
self.buffer[idx] = None
return data
def write(self, data):
""" write function as part of CircularBuffer """
if self.current[0] == self.current[1] and self.buffer[self.current[0]]:
raise BufferFullException("cannot add {} to full buffer".format(data))
self.buffer[self.get_elem(1)] = data
def overwrite(self, data):
""" overwrite the oldest data first """
self.buffer[self.get_elem(0)] = data
def clear(self):
""" clear out the buffer """
self.buffer = [None] * self.capacity
|
[
"sebito91@gmail.com"
] |
sebito91@gmail.com
|
4342c672a5e50d7cdadb37ed1b7ea31de4fee43a
|
39e8196888f370b1921a9047231b4bf0a387d382
|
/sqlite.py
|
4986c621f2f6bf2a3b4be1b1a3a9d6d21eb232c2
|
[] |
no_license
|
sumoonx/apollo_server
|
f5f4fb1fae3ea19f5036329bc52f1dbb889ed8ef
|
447bb386b3f330d178b8ca9511fa27bc044d3ea8
|
refs/heads/master
| 2021-01-12T15:09:00.970858 | 2017-03-04T06:38:50 | 2017-03-04T06:38:50 | 69,352,535 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,945 |
py
|
#!/usr/bin/env python3
# coding=utf-8
import sqlite3
import os
DB_FILE_NAME = os.path.abspath('./.data.db')
LED_TABLE = 'led_info'
def get_conn(path):
conn = sqlite3.connect(path)
if os.path.exists(path) and os.path.isfile(path):
return conn
else:
conn = None
return sqlite3.connect(':memory')
def get_cursor(conn):
if conn is not None:
return conn.cursor()
else:
return get_conn('').cursor()
def create_led_table():
conn = get_conn(DB_FILE_NAME)
conn.execute('CREATE TABLE IF NOT EXISTS ' + \
LED_TABLE + ' (uid integer primary key, \
zid integer, \
x float, \
y float, \
z float, \
type integer, \
P0 float)')
def close_all(conn, cu):
try:
if cu is not None:
cu.close()
finally:
if conn is not None:
conn.close()
def get_led_single(uid):
conn = get_conn(DB_FILE_NAME)
cu = get_cursor(conn)
cu.execute('SELECT * FROM ' + LED_TABLE + ' WHERE uid=' + str(uid))
info = cu.fetchone()
close_all(conn, cu)
return info
def get_led_info():
conn = get_conn(DB_FILE_NAME)
cu = get_cursor(conn)
cu.execute('SELECT * FROM ' + LED_TABLE)
infos = cu.fetchall()
close_all(conn, cu)
return infos
def insert_led(led_info):
conn = get_conn(DB_FILE_NAME)
conn.execute('INSERT INTO ' + LED_TABLE + ' values (?,?,?,?,?,?,?)', led_info)
conn.commit()
conn.close()
def delete_led(uid):
conn = get_conn(DB_FILE_NAME)
conn.execute('DELETE FROM ' + LED_TABLE + ' WHERE uid=' + str(uid))
conn.commit()
conn.close()
def update_led(uid, col, value):
conn = get_conn(DB_FILE_NAME)
conn.execute('UPDATE ' + LED_TABLE + ' SET ' + col + '=' + str(value) + ' WHERE uid=' + str(uid))
conn.commit()
conn.close()
|
[
"694726071@qq.com"
] |
694726071@qq.com
|
faa3797b36e9de4a619f95888e624d958787486c
|
b2862c8f21a4c7ce033fc801c003e3d7295cbedb
|
/cnn1/images_test.py
|
1eff5cace8ccd6d3a071205e5e637cd4f9546395
|
[] |
no_license
|
kasprowski/tutorial2019
|
1e160704293165413d10f5208532acec1ecc844b
|
df365fefcc5826c4546bdb3b7c6d8eec51970c80
|
refs/heads/master
| 2020-05-27T15:17:58.391912 | 2019-06-13T13:48:04 | 2019-06-13T13:48:04 | 188,676,973 | 5 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,680 |
py
|
'''
Deep Learning in the Eye Tracking World tutorial source file
https://www.github.com/kasprowski/tutorial2019
Test of classification models using two types of generated images
Uses: data.py to prepare data, models.py to retrieve models
@author: pawel@kasprowski.pl
'''
import cv2
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics.classification import accuracy_score, cohen_kappa_score
from sklearn.model_selection import train_test_split
from data import prepare_samples_areas, prepare_samples_direction, prepare_samples_colors
import matplotlib.pyplot as plt
from models import cnn_network, flat_network, tree
def printResults(testLabels,testResults):
print(confusion_matrix(testLabels.argmax(axis=1), testResults.argmax(axis=1)))
print(classification_report(testLabels.argmax(axis=1), testResults.argmax(axis=1)))
print("Cohen's Kappa: {}".format(cohen_kappa_score(testLabels.argmax(axis=1), testResults.argmax(axis=1))))
return accuracy_score(testLabels.argmax(axis=1), testResults.argmax(axis=1))
###################################################
def plot_accuracy(values):
hfig = cv2.imread('h.jpg')
vfig = cv2.imread('v.jpg')
fig, axs = plt.subplots(2, 3, figsize=(5, 5))
axs[0,0].imshow(hfig)
axs[1,0].imshow(vfig)
axs[0,1].remove()
axs[1,1].remove()
axs[0,2].remove()
axs[1,2].remove()
gs = axs[0,1].get_gridspec()
labels = ["Random Forest","Flat NN","CNN"]
p = fig.add_subplot(gs[:, 1:])
p.bar(labels, values)
p.set_xlabel('Model')
p.set_ylabel('Accuracy')
p.set_xticklabels(labels)
p.set_title('Accuracy for given image types')
fig.tight_layout()
plt.show()
def main():
#samples,labels = prepare_samples_areas()
samples,labels = prepare_samples_direction()
#samples,labels = prepare_samples_colors()
(trainSamples, testSamples, trainLabels, testLabels) = train_test_split(samples, labels, test_size=0.25, random_state=42)
print("TREE")
testResults = tree(trainSamples,trainLabels,testSamples)
accTree = printResults(testLabels, testResults)
print("MLP - FLAT")
testResults = flat_network(trainSamples,trainLabels,testSamples)
accFlat = printResults(testLabels, testResults)
print("CNN")
testResults = cnn_network(trainSamples,trainLabels,testSamples)
accCnn = printResults(testLabels, testResults)
print("Accuracy TREE: {}".format(accTree))
print("Accuracy FLAT: {}".format(accFlat))
print("Accuracy CNN: {}".format(accCnn))
plot_accuracy((accTree,accFlat,accCnn))
if __name__ == "__main__":
main()
|
[
"C:\\mail"
] |
C:\mail
|
903e84bf3d1e5cb2b9700e079cd27e3eac2458d0
|
aeb8f81143a939cdb036be5ba58f417a4695d203
|
/mysite/settings.py
|
e6eea19ffd3aa6a653336f466bc6c275439704b2
|
[] |
no_license
|
lenar95/blog2
|
8380ae06c26ac0ccf007a3919474725f9f6ebb7c
|
2c4c1bb499b612b0cb675eb49c0af266f325980a
|
refs/heads/master
| 2021-05-15T19:06:02.333496 | 2017-10-21T09:47:40 | 2017-10-21T09:47:40 | 107,756,612 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,670 |
py
|
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^0a_)t%!2d#0xyyw_l8pvuz%e0!$2e320(cc!0)1ymqkayhf^z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'lenr.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.vk',
]
SITE_ID = 1
LOGIN_REDIRECT_URL = '/'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Ulyanovsk'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
|
[
"lenar95@mail.ru"
] |
lenar95@mail.ru
|
7d75049d861bf39599fc288e9b7ed4aaa10e238e
|
074d2ccd687def4623e051762cc3a37c939b8b08
|
/smtp_by_user/models/mail_mail.py
|
244c1a8731cfba4a8e8178497f6bf5194340fa9d
|
[] |
no_license
|
infotodoo/tempTGS
|
efc069311f9a18e41c1a440e52db865b06054510
|
5d9abb74f708271767fe312ed19338cdcc3c825c
|
refs/heads/master
| 2022-12-05T10:27:08.699620 | 2020-08-21T23:32:23 | 2020-08-21T23:32:23 | 286,842,332 | 0 | 1 | null | 2020-08-24T22:08:33 | 2020-08-11T20:32:35 |
Python
|
UTF-8
|
Python
| false | false | 2,980 |
py
|
import base64
import datetime
import logging
import psycopg2
import smtplib
import threading
import re
from collections import defaultdict
from odoo import _, api, fields, models
from odoo import tools
from odoo.addons.base.models.ir_mail_server import MailDeliveryException
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class MailMail(models.Model):
_inherit = 'mail.mail'
def send(self, auto_commit=False, raise_exception=False):
""" Sends the selected emails immediately, ignoring their current
state (mails that have already been sent should not be passed
unless they should actually be re-sent).
Emails successfully delivered are marked as 'sent', and those
that fail to be deliver are marked as 'exception', and the
corresponding error mail is output in the server logs.
:param bool auto_commit: whether to force a commit of the mail status
after sending each mail (meant only for scheduler processing);
should never be True during normal transactions (default: False)
:param bool raise_exception: whether to raise an exception if the
email sending process has failed
:return: True
"""
for server_id, batch_ids in self._split_by_server():
smtp_session = None
try:
outgoing_obj = self.env['ir.mail_server'].search([('user_id','=',self.create_uid.id)],limit=1)
server_id = outgoing_obj.id
smtp_session = self.env['ir.mail_server'].connect(mail_server_id=server_id)
_logger.error('server_id-------------------------------------------------------')
_logger.error(server_id)
_logger.error('create_uid-------------------------------------------------------')
_logger.error(self.create_uid)
except Exception as exc:
if raise_exception:
# To be consistent and backward compatible with mail_mail.send() raised
# exceptions, it is encapsulated into an Odoo MailDeliveryException
raise MailDeliveryException(_('Unable to connect to SMTP Server'), exc)
else:
batch = self.browse(batch_ids)
batch.write({'state': 'exception', 'failure_reason': exc})
batch._postprocess_sent_message(success_pids=[], failure_type="SMTP")
else:
self.browse(batch_ids)._send(
auto_commit=auto_commit,
raise_exception=raise_exception,
smtp_session=smtp_session)
_logger.info(
'Sent batch %s emails via mail server ID #%s',
len(batch_ids), server_id)
finally:
if smtp_session:
smtp_session.quit()
|
[
"ob@todoo.co"
] |
ob@todoo.co
|
774c3bbb0f01ff4b1e8b1a61f09146a3638a2ccd
|
748d0c5a45ff74cb916fb0c859b4e71111f5cc5a
|
/kk.py
|
2ed6cc1c30f20151fff1a3ea6b41df51690ee8d7
|
[] |
no_license
|
Xavier-Xiaoru-Liu/CrldrPrune
|
c808c6e86aea3f2726b1c4c5198ff505ae705bc3
|
914672327cc8ddba328c8e8f822a1450524bbdf3
|
refs/heads/master
| 2023-04-12T12:51:35.136620 | 2020-01-03T06:04:24 | 2020-01-03T06:04:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 288 |
py
|
import tensorflow as tf
import numpy as np
def low_precision(tensor):
low = tf.cast(tensor, tf.bfloat16)
return low
a = tf.Variable(-0.2, tf.float32)
a = low_precision(a)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print(sess.run(a))
|
[
"lxr_orz@126.com"
] |
lxr_orz@126.com
|
d6c6f07c20d59e2954fa76d05a86559f3dc82759
|
a31c54cb9b27e315567ed865e07cb720fc1e5c8e
|
/revenge/techniques/native_timeless_tracer/timeless_trace_item.py
|
f94c99a65e749528e119e21889e9b5142e3c5bcd
|
[] |
no_license
|
bannsec/revenge
|
212bc15e09f7d864c837a1829b3dc96410e369d3
|
2073b8fad76ff2ba21a5114be54e959297aa0cf9
|
refs/heads/master
| 2021-06-25T12:26:02.609076 | 2020-05-29T15:46:45 | 2020-05-29T15:46:45 | 188,461,358 | 51 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,456 |
py
|
import logging
logger = logging.getLogger(__name__)
from ... import common
class NativeTimelessTraceItem(object):
def __init__(self, process, context=None, depth=None, previous=None):
"""Class describing a single step of NativeTimelessTracing
Args:
process (revenge.Process): Process object
context (dict): Dictionary describing this step's context
depth (int): Current call depth
previous (NativeTimelessTraceItem, optional): Previous timeless
trace item to use for differential generation
"""
self._process = process
self._previous = previous
self.context = context
self.depth = depth
def __repr__(self):
attrs = ["NativeTimelessTraceItem"]
attrs.append(str(self.context.pc.next.thing))
return "<{}>".format(' '.join(attrs))
@classmethod
@common.validate_argument_types(snapshot=dict)
def from_snapshot(klass, process, snapshot, previous=None):
"""Creates a NativeTimelessTraceItem from a snapshot returned by timeless_snapshot()
Args:
process (revenge.Process): Process object
snapshot (dict): Timeless snapshot dictionary
previous (NativeTimelessTraceItem, optional): Previous timeless
trace item to use for differential generation
"""
if "is_timeless_snapshot" not in snapshot or not snapshot["is_timeless_snapshot"]:
raise RevengeInvalidArgumentType("from_snapshot does not appear to be timeless_snapshot dictionary.")
context = snapshot["context"]
depth = snapshot["depth"]
return klass(process, context=context, depth=depth, previous=previous)
@property
def instruction(self):
"""Returns the assembly instruction object for this item."""
return self.context.pc.next.thing
@property
def context(self):
return self.__context
@context.setter
@common.validate_argument_types(context=(dict, type(None)))
def context(self, context):
diff = self._previous.context if self._previous is not None else None
# TODO: This is an assumption...
if isinstance(context, dict):
self.__context = CPUContext(self._process, diff=diff, **context)
elif context is None:
self.__context = None
from ...exceptions import *
from ...cpu import CPUContext
|
[
"whootandahalf@gmail.com"
] |
whootandahalf@gmail.com
|
c66f07924ab319cc89659afb9e82d85e9c9500f8
|
03f4056ffaa745a2bd0742050494cbdab97858de
|
/ImportaObjMat.py
|
2a5e898476bdb7e0dc5615b89d41f4c7bbc22430
|
[] |
no_license
|
biomedicalengineering/OrtogOnBlender
|
d1dd44852fc39328982720a3d957ac2f6393dc2a
|
fa15e053bfe3c8ef3ab5cdaf50680ecf72a3a1db
|
refs/heads/master
| 2020-12-19T09:04:55.840967 | 2018-07-30T13:39:30 | 2018-07-30T13:39:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,433 |
py
|
import bpy
import platform
def ImportaMaterial(impMaterial, SelObj):
context = bpy.context
obj = context.active_object
scn = context.scene
if platform.system() == "Linux" or platform.system() == "Darwin":
dirScript = bpy.utils.user_resource('SCRIPTS')
blendfile = dirScript+"addons/OrtogOnBlender-master/objetos.blend"
section = "\\Material\\"
object = impMaterial
if platform.system() == "Windows":
dirScript = 'C:/OrtogOnBlender/Blender/2.78/scripts/'
blendfile = dirScript+"addons/OrtogOnBlender-master/objetos.blend"
section = "\\Material\\"
object = impMaterial
filepath = blendfile + section + object
directory = blendfile + section
filename = object
bpy.ops.wm.append(
filepath=filepath,
filename=filename,
directory=directory)
bpy.context.scene.render.engine = 'CYCLES' # Troca tipo renderização
# Etapa de importação de material
# objects = bpy.data.objects
material_01 = bpy.data.materials[impMaterial] # Informa material à def
obj = bpy.data.objects[SelObj] # Informa objeto à def
bpy.ops.object.select_all(action='DESELECT')
obj.select = True
bpy.context.scene.objects.active = obj
bpy.context.object.show_transparent = True
bpy.ops.object.material_slot_add()
obj.data.materials[0] = material_01
mensagem = "Beleza!"
return mensagem
def ImportaLampXRay():
context = bpy.context
obj = context.active_object
scn = context.scene
if platform.system() == "Linux" or platform.system() == "Darwin":
dirScript = bpy.utils.user_resource('SCRIPTS')
blendfile = dirScript+"addons/OrtogOnBlender-master/objetos.blend"
section = "\\Object\\"
object = 'Lamp_X_Ray'
if platform.system() == "Windows":
dirScript = 'C:/OrtogOnBlender/Blender/2.78/scripts/'
blendfile = dirScript+"addons/OrtogOnBlender-master/objetos.blend"
section = "\\Object\\"
object = 'Lamp_X_Ray'
filepath = blendfile + section + object
directory = blendfile + section
filename = object
bpy.ops.wm.append(
filepath=filepath,
filename=filename,
directory=directory)
bpy.ops.object.select_all(action='DESELECT')
#impMaterial = 'SCATTER_bone'
#SelObj = 'Cube'
#ImportaMaterial(impMaterial, SelObj)
def ImportaCefalo():
context = bpy.context
obj = context.active_object
scn = context.scene
if platform.system() == "Linux" or platform.system() == "Darwin":
dirScript = bpy.utils.user_resource('SCRIPTS')
blendfile = dirScript+"addons/OrtogOnBlender-master/objetos.blend"
section = "\\Group\\"
object = '2D_cephalometry_all'
if platform.system() == "Windows":
dirScript = 'C:/OrtogOnBlender/Blender/2.78/scripts/'
blendfile = dirScript+"addons/OrtogOnBlender-master/objetos.blend"
section = "\\Group\\"
object = '2D_cephalometry_all'
filepath = blendfile + section + object
directory = blendfile + section
filename = object
bpy.ops.wm.append(
filepath=filepath,
filename=filename,
directory=directory)
'''
# Apaga objetos a mais
bpy.ops.object.select_all(action='DESELECT')
apagar = [bpy.data.objects['PT_2nd_Mol.001'], bpy.data.objects['PT_1st_Mol.001'], bpy.data.objects['PT_Incisor_low_low.001'], bpy.data.objects['PT_Incisor_low_up.001'], bpy.data.objects['PT_Incisor_up_up.001'], bpy.data.objects['PT_Incisor_up_low.001'], bpy.data.objects['PT_Go.001'], bpy.data.objects['PT_Me.001'], bpy.data.objects['PT_N.001'], bpy.data.objects['PT_A.001'], bpy.data.objects['PT_B.001'], bpy.data.objects['PT_Ls.001'], bpy.data.objects['PT_Pg.001'], bpy.data.objects['PT_Po.001'], bpy.data.objects['PT_Or.001'], bpy.data.objects['PT_S.001']]
a = 0
NumObj = len(apagar)
while a < NumObj:
b = apagar[a].select = True
print(b)
print(a)
a += 1
bpy.context.scene.objects.active = apagar[0]
bpy.ops.object.delete(use_global=False)
'''
def ImportaCamXray():
context = bpy.context
obj = context.active_object
scn = context.scene
if platform.system() == "Linux" or platform.system() == "Darwin":
dirScript = bpy.utils.user_resource('SCRIPTS')
blendfile = dirScript+"addons/OrtogOnBlender-master/objetos.blend"
section = "\\Group\\"
object = 'Cameras_X_Ray'
if platform.system() == "Windows":
dirScript = 'C:/OrtogOnBlender/Blender/2.78/scripts/'
blendfile = dirScript+"addons/OrtogOnBlender-master/objetos.blend"
section = "\\Group\\"
object = 'Cameras_X_Ray'
filepath = blendfile + section + object
directory = blendfile + section
filename = object
bpy.ops.wm.append(
filepath=filepath,
filename=filename,
directory=directory)
def SplitAreaTrabalho():
context = bpy.context
# obj = context.active_object
# scn = context.scene
start_areas = context.screen.areas[:]
bpy.ops.screen.area_split(direction='VERTICAL', factor=0.3)
for area in context.screen.areas:
if area not in start_areas:
area.type = 'VIEW_3D'
return {'FINISHED'}
|
[
"cogitas3d@gmail.com"
] |
cogitas3d@gmail.com
|
c26be84c6627469cad7ae4c3c66e773878b9f22d
|
21fa19fbc7e32b1af7f913114a084a5eced5b2a3
|
/tests/integration/mycode/mypkg/__init__.py
|
c783ff5fc17fcd5ab7ed61667bf60de592c08f32
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
humilis/humilis-stream-gtw
|
2b574cb483463a74328eded280c176fa906ff23b
|
f763d30369297173a0917373f906e1fb0051426a
|
refs/heads/master
| 2021-12-03T13:07:28.010779 | 2021-11-13T19:20:39 | 2021-11-13T19:20:39 | 135,179,379 | 0 | 0 |
MIT
| 2021-11-13T19:20:39 | 2018-05-28T15:32:22 |
Jinja
|
UTF-8
|
Python
| false | false | 630 |
py
|
"""A dummy module for testing purposes."""
import base64
import json
import os
import time
def transform(event, context):
"""Sample Firehose transform."""
output = []
print(json.dumps(event, indent=4))
if "body" in event:
# We are calling this from the REST API
event = event["body"]
for record in event["records"]:
payload = base64.b64decode(record["data"])
output_record = {
"recordId": record["recordId"],
"result": "Ok",
"data": base64.b64encode(payload)
}
output.append(output_record)
return {"records": output}
|
[
"german@innovativetravel.eu"
] |
german@innovativetravel.eu
|
d9aa217c167416aa77add1ba68dc8f3405a49870
|
84fcf87bc93f33ea615ec95ab0dfcba27d47b054
|
/facedancer/USBVendor.py
|
c3ab52b8a577ae5fb7d2bb44ffac87fff177d32f
|
[
"BSD-3-Clause"
] |
permissive
|
xairy/Facedancer
|
871d9dcdca42e52b94e6fa0fdeec74d55f94da99
|
2aca8e6aa5e7a8a71d51c550f4ed3b6fe75cb092
|
refs/heads/master
| 2023-05-11T12:13:20.250843 | 2023-04-03T08:41:15 | 2023-04-03T08:41:15 | 243,107,193 | 1 | 0 |
BSD-3-Clause
| 2020-02-25T21:34:08 | 2020-02-25T21:34:07 | null |
UTF-8
|
Python
| false | false | 591 |
py
|
# USBVendor.py
#
# Contains class definition for USBVendor, intended as a base class (in the OO
# sense) for implementing device vendors.
class USBVendor:
name = "generic USB device vendor"
# maps bRequest to handler function
request_handlers = { }
def __init__(self, verbose=0):
self.device = None
self.verbose = verbose
self.setup_request_handlers()
def set_device(self, device):
self.device = device
def setup_request_handlers(self):
"""To be overridden for subclasses to modify self.request_handlers"""
pass
|
[
"temkink@ainfosec.com"
] |
temkink@ainfosec.com
|
fee4d2124eac9fad1e5d0bc58e3c6649164ab65c
|
e245035c7bff120d5f7a8a26412f14d11a77a46f
|
/huggingface_transformer_src/src/transformers/commands/convert.py
|
2ca5a57ca36d0a5c150959f2f1b7daaec455c17a
|
[
"Apache-2.0"
] |
permissive
|
fuxuelinwudi/R-Drop
|
82f27e623f319065b75b9e2b7ebe285c2aa0582b
|
88bba6386f2edf7aa45ae6795103dbf4be085e99
|
refs/heads/main
| 2023-06-06T14:28:55.773778 | 2021-06-27T05:39:27 | 2021-06-27T05:39:27 | 381,313,887 | 2 | 0 |
MIT
| 2021-06-29T09:43:58 | 2021-06-29T09:43:58 | null |
UTF-8
|
Python
| false | false | 7,555 |
py
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
Returns: ServeCommand
"""
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
)
IMPORT_ERROR_MESSAGE = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class ConvertCommand(BaseTransformersCLICommand):
@staticmethod
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
Args:
parser: Root parser to register command-specific arguments
"""
train_parser = parser.add_parser(
"convert",
help="CLI tool to run convert model from original "
"author checkpoints to Transformers PyTorch checkpoints.",
)
train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
)
train_parser.add_argument(
"--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch saved model output."
)
train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name",
type=str,
default=None,
help="Optional fine-tuning task name if the TF model was a finetuned model.",
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(
self,
model_type: str,
tf_checkpoint: str,
pytorch_dump_output: str,
config: str,
finetuning_task_name: str,
*args
):
self._logger = logging.get_logger("transformers-cli/converting")
self._logger.info(f"Loading model {model_type}")
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.t5.convert_t5_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
if "ckpt" in self._tf_checkpoint.lower():
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ""
convert_transfo_xl_checkpoint_to_pytorch(
TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE
)
elif self._model_type == "gpt2":
try:
from ..models.gpt2.convert_gpt2_original_tf_checkpoint_to_pytorch import (
convert_gpt2_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(IMPORT_ERROR_MESSAGE)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_pytorch_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
else:
raise ValueError(
"--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]"
)
|
[
"dropreg@163.com"
] |
dropreg@163.com
|
fa87a0d787feef651f7e11abd4c4b98770306065
|
50952e470423e334115ff36d1c3041298fc254a7
|
/doppelganger/gan/util.py
|
3d38581652c0cdbceae079511e716dfd01099e6b
|
[] |
no_license
|
elf11/synthetic-data-service
|
a3ee78e83ac5e7be2d6aadf805594001be43d989
|
e55559eb2c5fd321bc112082fb7e4690c140614a
|
refs/heads/master
| 2023-01-29T12:15:26.477250 | 2020-09-30T16:44:36 | 2020-09-30T16:44:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,281 |
py
|
from output import OutputType, Output, Normalization
import numpy as np
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
def renormalize_per_sample(data_feature, data_attribute, data_feature_outputs,
data_attribute_outputs, gen_flags,
num_real_attribute):
"""
-gets correct dimension for generated data attribute (num_ori_attributes)
from num_ori_attributes + num_created_attributes
-renormalizes data_feature and replaces part of the time-series that has ended with zeros
Args:
data_feature: generated data features (num_samples, seq_len, num_features)
data_attribute: generated data attributes (num_samples, num_ori_attributes + num_created_attributes)
data_feature_outputs: list that describes generated feature output type, dimension, normalization
data_attribute_outputs: list that describes generated attribute output type, dimension, normalization
gen_flags: generation flag for generated data
num_real_attribute: number of original attributes
Returns:
data_feature: generated data feature taking into account gen flags
data_attribute: generated data attribute of the original dimension
"""
attr_dim = 0
for i in range(num_real_attribute):
attr_dim += data_attribute_outputs[i].dim
attr_dim_cp = attr_dim
fea_dim = 0
for output in data_feature_outputs:
# for each continous feature
if output.type_ == OutputType.CONTINUOUS:
for _ in range(output.dim):
max_plus_min_d_2 = data_attribute[:, attr_dim]
max_minus_min_d_2 = data_attribute[:, attr_dim + 1]
attr_dim += 2
max_ = max_plus_min_d_2 + max_minus_min_d_2
min_ = max_plus_min_d_2 - max_minus_min_d_2
max_ = np.expand_dims(max_, axis=1)
min_ = np.expand_dims(min_, axis=1)
if output.normalization == Normalization.MINUSONE_ONE:
data_feature[:, :, fea_dim] = \
(data_feature[:, :, fea_dim] + 1.0) / 2.0
data_feature[:, :, fea_dim] = \
data_feature[:, :, fea_dim] * (max_ - min_) + min_
fea_dim += 1
#if feature is discrete, add feature dim
else:
fea_dim += output.dim
tmp_gen_flags = np.expand_dims(gen_flags, axis=2) # (num_sample, seq_len, 1)
data_feature = data_feature * tmp_gen_flags # (num_sample, seq_len, num_features)
# get back only the original attributes
data_attribute = data_attribute[:, 0: attr_dim_cp] # (num_sample, 1)
return data_feature, data_attribute
def normalize_per_sample(data_feature, data_attribute, data_feature_outputs,
data_attribute_outputs):
"""
-adds 2 extra attributes ((max +- min)/2) for each feature for each sample
to the original attribute
-normalizes data feature
Args:
data_feature: original data feature
data_attribute: original data attribute
data_feature_outputs: list that describes original feature output type, dimension, normalization
data_attribute_outputs: list that describes original attribute output type, dimension, normalization
Returns:
data_feature: normalized data feature
data_attribute: original data attribute + newly created attributes
data_attribute_outputs: list that describes original + created attribute output type, dimension, normalization
real_attribute_mask: boolean list specifying if attributes are orginal or newly created
"""
# assume all samples have maximum length
# get max and min for each feature for each sample
data_feature_min = np.nanmin(data_feature, axis=1) # (total_sample, num_features)
data_feature_max = np.nanmax(data_feature, axis=1)
additional_attribute = []
additional_attribute_outputs = []
dim = 0
for output in data_feature_outputs:
#for each feature, we create 2 extra attributes with the min & max
if output.type_ == OutputType.CONTINUOUS:
for _ in range(output.dim):
max_ = data_feature_max[:, dim] # (total_sample, )
min_ = data_feature_min[:, dim]
additional_attribute.append((max_ + min_) / 2.0)
additional_attribute.append((max_ - min_) / 2.0)
additional_attribute_outputs.append(Output(
type_=OutputType.CONTINUOUS,
dim=1,
normalization=output.normalization,
is_gen_flag=False))
additional_attribute_outputs.append(Output(
type_=OutputType.CONTINUOUS,
dim=1,
normalization=Normalization.ZERO_ONE,
is_gen_flag=False))
max_ = np.expand_dims(max_, axis=1)
min_ = np.expand_dims(min_, axis=1)
data_feature[:, :, dim] = \
(data_feature[:, :, dim] - min_) / (max_ - min_ + 1e-7)
if output.normalization == Normalization.MINUSONE_ONE:
data_feature[:, :, dim] = \
data_feature[:, :, dim] * 2.0 - 1.0
dim += 1
else:
dim += output.dim
# create a mask for original attribute and attributed we just created
real_attribute_mask = ([True] * len(data_attribute_outputs) +
[False] * len(additional_attribute_outputs))
additional_attribute = np.stack(additional_attribute, axis=1) # (num_sample, num_continuous_features * 2)
data_attribute = np.concatenate(
[data_attribute, additional_attribute], axis=1) #(num_sample, num_continuous_feature * 2 + num_ori_attribute)
data_attribute_outputs.extend(additional_attribute_outputs)
return data_feature, data_attribute, data_attribute_outputs, \
real_attribute_mask
def add_gen_flag(data_feature, data_gen_flag, data_feature_outputs,
sample_len):
"""
-adds generation flags to the end of original data features
-adds an additional output to the data_feature_outputs list
Args:
data_feature: original data feature
data_gen_flag: original data gen flag
data_feature_outputs: list that describes original feature output type, dimension, normalization
sample_len: max sequence length of time series
Returns:
data_feature: original data feature + gen flag
data_feature_outputs: original data_feature_output + output type, dimension and normalization of gen flag
"""
for output in data_feature_outputs:
if output.is_gen_flag:
raise Exception("is_gen_flag should be False for all"
"feature_outputs")
if (data_feature.shape[2] !=
np.sum([t.dim for t in data_feature_outputs])):
raise Exception("feature dimension does not match feature_outputs")
if len(data_gen_flag.shape) != 2:
raise Exception("data_gen_flag should be 2 dimension")
num_sample, length = data_gen_flag.shape
data_gen_flag = np.expand_dims(data_gen_flag, 2) # (num_sample, seq_len, 1)
data_feature_outputs.append(Output(
type_=OutputType.DISCRETE,
dim=2,
is_gen_flag=True))
shift_gen_flag = np.concatenate(
[data_gen_flag[:, 1:, :],
np.zeros((data_gen_flag.shape[0], 1, 1))],
axis=1) # (num_samples, seq_len, 1)
if length % sample_len != 0:
raise Exception("length must be a multiple of sample_len")
data_gen_flag_t = np.reshape(
data_gen_flag,
[num_sample, int(length / sample_len), sample_len]) # (num_sample, 1, seq_len)
data_gen_flag_t = np.sum(data_gen_flag_t, 2)
data_gen_flag_t = data_gen_flag_t > 0.5
data_gen_flag_t = np.repeat(data_gen_flag_t, sample_len, axis=1)
data_gen_flag_t = np.expand_dims(data_gen_flag_t, 2)
# add the gen_flag and inverse of gen_flag to data_feature
data_feature = np.concatenate(
[data_feature,
shift_gen_flag,
(1 - shift_gen_flag) * data_gen_flag_t],
axis=2) # (num_sample, seq_len, num_features + 2)
return data_feature, data_feature_outputs
|
[
"epcl2@cam.ac.uk"
] |
epcl2@cam.ac.uk
|
d9bbf890f6da0c78331c9d95119381dfc53eec1b
|
a56105311f5fd29ccb064f54bfc024402a3c71e4
|
/active_minimum_margin_degr.py
|
08ce34e85ce8f80821581c33e1e2d5cef966f4cf
|
[] |
no_license
|
tsvvladimir/article_test
|
f4bc310b58e029d0472d2f5ef34fbfe3259c53d4
|
9629dc5b2d3a9d6d12765f7847c22f859f1a841e
|
refs/heads/master
| 2021-05-31T12:37:35.787925 | 2016-04-04T13:57:32 | 2016-04-04T13:57:32 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,248 |
py
|
#clusterize in 20 clusters and choose cluster center for init learning
from diploma_lib import *
from prepare_data import *
import prepare_data
from sklearn.cluster import AgglomerativeClustering
from collections import OrderedDict
from collections import defaultdict
from sklearn.metrics import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances
def active_minimum_margin_degr(foldname):
twenty_train_data = getattr(prepare_data, foldname + '_train_data')
twenty_train_target = getattr(prepare_data, foldname + '_train_target')
twenty_test_data = getattr(prepare_data, foldname + '_test_data')
twenty_test_target = getattr(prepare_data, foldname + '_test_target')
#baseline active learning solution
alpha = 20 #initial training set
betha = int(len(twenty_train_data) / alpha) - 2 #number of iterations
gamma = 20 #sampling volume
#create bad train set
'''
tfidf_transformer = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer())
])
unlabeled_matrix = tfidf_transformer.fit_transform(twenty_train_data)
res = pairwise_distances(unlabeled_matrix)
#print res.shape
#print res
print 'dict start'
d = {}
for i in range(0, 11314):
for j in range(i, 11314):
d[(i, j)] = res[i][j]
#print d
print 'sort start'
sort = sorted(d.items(), key=operator.itemgetter(1))
for i in range(0, 10):
print sort[i], twenty_train_target[i], twenty_train_target[j]
exit()
'''
samples = []
samples = [8792, 8792,1778, 1778, 2488, 2488, 7951, 7951, 9445, 9445, 6837, 10132, 313, 313, 5480, 5480, 1455, 1455, 5487, 5487]
'''
k = 0
for i, cl in enumerate(twenty_train_target):
if cl < 3:
samples.append(i)
k += 1
if k > (alpha - 1):
break
'''
labeled_train_data = []
labeled_train_target = []
#unlabeled_train_data = []
#unlabeled_train_target = []
labeled_train_data, labeled_train_target, unlabeled_train_data, unlabeled_train_target = diploma_range_sampling(labeled_train_data, labeled_train_target, twenty_train_data, twenty_train_target, samples)
baseline_active_clf = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', LinearSVC())
])
baseline_active_clf.fit(labeled_train_data, labeled_train_target)
predicted = baseline_active_clf.predict(twenty_test_data)
score = f1_score(twenty_test_target, predicted, average='macro')
#print 'active cluster svm margin solution'
scores = baseline_active_clf.decision_function(unlabeled_train_data)
#prob = np.divide(1, np.add(1, np.exp(np.multiply(np.array(scores), -1))))
diploma_res_print(foldname, len(labeled_train_data), score)
for t in range(1, betha):
#to do use labeled dataset to train sigmoid
#f1 for labeled set
#pred_lab = baseline_active_clf.predict(labeled_train_data)
#print 'f1 score for labeled:', f1_score(labeled_train_target, pred_lab, average='macro')
#count p1 p2 p3 p4
'''
def count_p(arr):
p1 = arr.min()
p4 = arr.max()
sorted_arr = sorted(arr)
a1 = [i for i in sorted_arr if i < 0]
a2 = [i for i in sorted_arr if i > 0]
p2 = -100500
p3 = +100500
if len(a1) > 0:
p2 = max(a1)
if len(a2) > 0:
p3 = min(a2)
return [p1, p2, p3, p4]
#prom_arr = []
norm_scores = LA.norm(scores)
n_scores = np.divide(scores, norm_scores)
'''
'''
plus_norm = 0
min_norm = 0
for line in scores:
for elem in line:
if (elem > 0):
plus_norm += elem ** 2
else:
min_norm += elem ** 2
plus_norm = math.sqrt(plus_norm)
min_norm = math.sqrt(min_norm)
n_scores = np.array(scores)
for i in range(0, len(n_scores)):
for j in range(0, len(n_scores[i])):
if (n_scores[i][j] > 0):
n_scores[i][j] = n_scores[i][j] / plus_norm
else:
n_scores[i][j] = n_scores[i][j] / min_norm
'''
'''
#print n_scores
prom_arr = []
for lin in range(0, len(n_scores)):
prom_arr.append(count_p(n_scores[lin]))
t_prom_arr = np.transpose(np.array(prom_arr))
#print t_prom_arr
#p1 = np.amin(t_prom_arr[0])
#p2 = np.amax(t_prom_arr[1])
#p3 = np.amin(t_prom_arr[2])
#p4 = np.amax(t_prom_arr[3])
#print 'p1:', p1, 'p2:', p2, 'p3:', p3, 'p4:', p4
'''
#prob = np.divide(1, np.add(1, np.exp(np.multiply(np.array(n_scores), -1))))
#print 'norm matrix min proba:', np.amin(prob), 'norm matrix max proba:', np.amax(prob)
doc_score = {}
for i in range(0, len(unlabeled_train_data)):
last_elems = (sorted(scores[i]))[-2:]
doc_score[i] = np.abs(last_elems[0] - last_elems[1])
sorted_doc_score = sorted(doc_score.items(), key=operator.itemgetter(1))
#print 'sorted doc score minimum active cluster svm margin', sorted_doc_score[0]
sample_numbers = []
for i in range(0, gamma):
sample_numbers = sample_numbers + [sorted_doc_score[i][0]]
labeled_train_data, labeled_train_target, unlabeled_train_data, unlabeled_train_target = diploma_range_sampling(labeled_train_data, labeled_train_target, unlabeled_train_data, unlabeled_train_target, sample_numbers)
baseline_active_clf.fit(labeled_train_data, labeled_train_target)
predicted = baseline_active_clf.predict(twenty_test_data)
score = f1_score(twenty_test_target, predicted, average='macro')
scores = baseline_active_clf.decision_function(unlabeled_train_data)
#prob = np.divide(1, np.add(1, np.exp(np.multiply(np.array(scores), -1))))
#print 'min proba:', np.amin(prob), 'max proba:', np.amax(prob)
diploma_res_print(foldname, len(labeled_train_data), score)
|
[
"tsvvladimir95@gmail.com"
] |
tsvvladimir95@gmail.com
|
48bf76c7aaa37bdc18b3be06c5d1885484e52765
|
3f729f51e0ffd84926b545e91cc2cda17501406f
|
/plots.py
|
be18a258e2bafc9819f8252192c24ca2b7ee7dba
|
[] |
no_license
|
ummigachi/Machine-Learning
|
d23a0b08043fc8a79dc1ca2f8a146264a17feb45
|
8098d703ae21c026fa027df17adb4df407176a53
|
refs/heads/master
| 2020-08-20T16:48:50.680096 | 2019-10-18T21:47:37 | 2019-10-18T21:47:37 | 216,045,447 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,366 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 4 15:16:36 2019
@author: user1
"""
#import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
dataset= pd.read_csv('hour.csv')
#dataset.plot() # plots all columns against index
dataset.plot(kind='scatter', y = 'cnt',x='temp',)
#dataset.set(xlim=(xmin, xmax), ylim=(ymin, ymax))
sns.regplot(dataset['temp'],dataset['cnt'])
plt.xlabel('Temperature')
plt.ylabel('Usage Count')
plt.title('Scatter plot - Usage Count/temperature',fontsize=16)
dataset.plot(kind='scatter', y = 'cnt',x='atemp',) # scatter plot)
sns.regplot(dataset['atemp'],dataset['cnt'])
plt.xlabel('Adjusted Temperature')
plt.ylabel('Usage Count')
plt.title('Scatter plot - Usage Count/Adjusted temperature',fontsize=16)
dataset.plot(kind='scatter', y = 'cnt',x='weathersit',) # scatter plot
sns.regplot(dataset['weathersit'],dataset['cnt'])
plt.xlabel('Weather Situation')
plt.ylabel('Usage Count')
plt.title('Scatter plot -Usage Count/Weather Situation',fontsize=16)
dataset.plot(kind='scatter', y = 'cnt',x='windspeed',) # scatter plot
sns.regplot(dataset['windspeed'],dataset['cnt'])
plt.xlabel('Wind Speed')
plt.ylabel('Usage Count')
plt.title('Scatter plot - Usage Count/Windspeed',fontsize=16)
dataset.plot(kind='scatter', y = 'cnt',x='casual',) # scatter plot
sns.regplot(dataset['casual'],dataset['cnt'])
plt.title('Scatter plot - Usage Count/Casual Users',fontsize=16)
dataset.plot(kind='scatter', y = 'cnt',x='registered',) # scatter plot
sns.regplot(dataset['registered'],dataset['cnt'])
plt.title('Scatter plot -Usage Count/registered Users',fontsize=16)
dataset.plot(kind='scatter', y = 'cnt',x='workingday',) # scatter plot
sns.regplot(dataset['workingday'],dataset['cnt'])
plt.title('Scatter plot -Usage Count/Work day',fontsize=16)
dataset.plot(kind='scatter', y = 'cnt',x='holiday',) # scatter plot
sns.regplot(dataset['holiday'],dataset['cnt'])
plt.title('Scatter plot -Usage Count/holiday',fontsize=16)
dataset.plot(kind='scatter', y = 'cnt',x='yr',) # scatter plot
sns.regplot(dataset['yr'],dataset['cnt'])
plt.title('Scatter plot -Usage Count/Year',fontsize=16)
dataset.plot(kind='scatter', y = 'cnt',x='hum',) # scatter plot
sns.regplot(dataset['hum'],dataset['cnt'])
plt.title('Scatter plot -Usage Count/Humidity',fontsize=16)
|
[
"noreply@github.com"
] |
ummigachi.noreply@github.com
|
4cfbed53820b59e3787cf17000e182a46eb8f405
|
3d11335f92c0b62563b5601cdc8a91bf8235a877
|
/Scientific_Expedition/open_labyrinth.py
|
61df7c4b8c680fe1062755117bab79f0ed4e8e08
|
[] |
no_license
|
matyh/Checkio
|
46b993530045a572bf67ca123c014dab7d8ff018
|
9a138eb0b5a259fcebe25ef6141c712100b22f39
|
refs/heads/master
| 2020-11-30T21:27:53.818469 | 2020-07-27T12:41:18 | 2020-07-27T12:41:18 | 230,484,256 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,973 |
py
|
#!/usr/bin/env checkio --domain=py run open-labyrinth
# "So, the Labyrinth is a piece of cake, is it? Well, let's see how you deal with this little slice..."
#
# Sarah: "You don't by any chance know the way through this labyrinth, do you?"
# The Worm: "Who, me? No, I'm just a worm. Say, come inside, and meet the missus"
#
# "Labyrinth" (1986)
#
# The labyrinth has no walls, but bushes surround the path on each side. If a players move into a bush, they lose. The labyrinth is presented as a matrix (a list of lists): 1 is a bush and 0 is part of the path. The labyrinth's size is 12 x 12 and the outer cells are also bushes. Players start at cell (1,1). The exit is at cell (10,10). You need to find a route through the labyrinth. Players can move in only four directions--South (down [1,0]), North (up [-1,0]), East (right [0,1]), West (left [0, -1]). The route is described as a string consisting of different characters: "S"=South, "N"=North, "E"=East, and "W"=West.
#
# Input:A labyrinth map as a list of lists with 1 and 0.
#
# Output:The route as a string that contains "W", "E", "N" and "S".
#
# Precondition:Outer cells are pits.
# len(labyrinth) == 12
# all(len(row) == 12 for row in labyrinth)
#
#
#
# END_DESC
def checkio(labyrinth):
position = [1, 1]
finish = [10, 10]
route = ''
def lab(position, finish, route, labyrinth):
move = {"S": (1, 0), "N": (-1, 0), "W": (0, -1), "E": (0, 1)}
# check if the position is the end
if position == finish:
return route
# make list of available moves
moves = ["S", "N", "W", "E"]
# helper list to remove opposite move
movesrev = ["N", "S", "E", "W"]
try:
next_move = route[-1]
# delete opposite move so we don't go backwards
del moves[movesrev.index(next_move)]
except IndexError:
pass
x, y = position
for direction in moves:
dx, dy = move.get(direction)
# check if move to valid place
if labyrinth[x + dx][y + dy] != 1:
# if True mark previous position as visited nad move to the next position
labyrinth[x][y] = 1
result = lab([x + dx, y + dy], finish, route + direction, labyrinth)
if result:
return result
return lab(position, finish, route, labyrinth)
if __name__ == '__main__':
# This code using only for self-checking and not necessary for auto-testing
def check_route(func, labyrinth):
MOVE = {"S": (1, 0), "N": (-1, 0), "W": (0, -1), "E": (0, 1)}
# copy maze
route = func([row[:] for row in labyrinth])
pos = (1, 1)
goal = (10, 10)
for i, d in enumerate(route):
move = MOVE.get(d, None)
if not move:
print("Wrong symbol in route")
return False
pos = pos[0] + move[0], pos[1] + move[1]
if pos == goal:
return True
if labyrinth[pos[0]][pos[1]] == 1:
print("Player in the pit")
return False
print("Player did not reach exit")
return False
# These assert are using only for self-testing as examples.
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "First maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "Empty maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "Up and down maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "Dotted maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1],
[1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "Need left maze"
assert check_route(checkio, [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]), "The big dead end."
print("The local tests are done.")
|
[
"martin.hora11gmail.com"
] |
martin.hora11gmail.com
|
e5c0e18570d6495874c69743b5770989a9bd61d0
|
878cb6c2b435522bd7f0f2d48bd3f8657efccf53
|
/elimination.py
|
f9ebd4954ce7b4c3585fee0426166a54bb4d8d75
|
[
"MIT"
] |
permissive
|
joshr17/markov-aggregation
|
7f5f89daf30d50822b9cca9a29f2981a3f594f14
|
9e0b3182c0f6f9f340b70adca13374548e50f512
|
refs/heads/master
| 2020-03-06T17:32:26.161816 | 2018-03-28T19:39:53 | 2018-03-28T19:39:53 | 126,991,628 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 13,227 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 14:36:17 2018
@author: joshua
This file contains the functions required to perform elimination and to use
it to calculate the stationary distribution of a Markov process or the mean
first passage times from every node in a set A to its compelement A^c.
"""
import logging
import numpy as np
import scipy as sp
from scipy.linalg import eig
from sklearn.preprocessing import normalize
LOGGER = logging.getLogger('markagg')
def rand_stoch_matrix(n, den):
""" Returns a scipy sparse random stochastic matrix of shape nxn and
density den.
"""
while True:
# Create uniformly distributed random matrix
matrix = sp.sparse.rand(n, n, density=den, format='lil')
if np.any(matrix.sum(axis=1) < 0.001):
pass
#try again - rows cannot be zero - loop unil it works
else:
matrix = normalize(matrix, norm='l1', axis=1)
break
return matrix
def rand_trans_times(n):
""" Returns a scipy sparse random matrix of shape nx1 and density den.
"""
transTimes = sp.sparse.rand(n, 1, density=1, format='lil')
return transTimes
def augment_matrix(P, T):
""" Returns the augmented matrix [I-P, T]. This object encapsulates all the
required information about a semi-Markov process.
"""
n = P.shape[0]
#identity matrix as a sparse matrix
identity = sp.sparse.eye(n, n, format='lil')
#this matrix is the matrix [I-P T]
augmented_matrix = sp.sparse.lil_matrix((n, n+1))
augmented_matrix[:, 0:n] = identity - P
augmented_matrix[:, n:n+1] = T
return augmented_matrix.tocsc()
def get_P(M):
""" Returns P when given [I-P, T].
"""
n = M.shape[0]
identity = sp.sparse.eye(n, n, format='lil')
return(identity - M[:, range(n)])
def get_T(M):
""" Returns T when given [I-P, T].
"""
n = M.shape[0]
return(M[:, n])
""" Each of the following 7 functions returns a submatrix of a given matrix M.
"""
def block_11(M, i):
return M[0:i, 0:i]
def block_12(M, i):
(_, m) = M.shape
return M[0:i, i:m-1]
def block_12var(M, i, c):
(_, m) = M.shape
return M[0:i, i:m-c-1]
def block_13(M, i):
(_, m) = M.shape
return M[0:i, m-1:m]
def block_21(M, i):
(n, _) = M.shape
return M[i:n, 0:i]
def block_22(M, i):
(n, m) = M.shape
return M[i:n, i:m-1]
def block_23(M, i):
(n, m) = M.shape
return M[i:n, m-1:m]
def nodes_into_A(P, A):
""" Returns a subset of [1,n] of nodes which are connected to edges leading
into the set A of nodes as judged by the directed adjacency matrix P.
"""
nodes = [node for node in range(0, P.shape[0]) if np.any(P[node, A])]
return nodes
def nodes_out_of_A(P, A):
""" Returns a subset of [1,n] of nodes which are connected to edges leading
out of the set A of nodes as judged by the directed adjacency matrix P.
"""
nodes = [node for node in range(0, P.shape[0]) if np.any(P[A, node])]
return nodes
class LUdecomp:
""" A class used to compute the updated transition probabilities and
mean waiting times after elimination via block LU decomposition as
described on page 10 of:
S. MacKay, R & D. Robinson, J. (2018). Aggregation of Markov flows I:
theory. Philosophical Transactions of The Royal Society A Mathematical
Physical and Engineering Sciences. 376. 20170232. 10.1098/rsta.2017.0232.
Attributes:
aug_matrix: a sparse augmented matrix of the type formed by the
function augment_matrix.
nx: An integer denoting the number of noes to be eliminated.
"""
def __init__(self, aug_matrix, nx):
""" Initialises LUdecomp class.
"""
self.aug_matrix = aug_matrix
self.nx = nx
def LYX(self):
""" Returns the matrix L_{YX} from paper.
"""
B11 = block_11(self.aug_matrix, self.nx).tocsc()
LOGGER.debug('B11 = ')
LOGGER.debug(B11)
B21 = block_21(self.aug_matrix, self.nx).tocsc()
LOGGER.debug('B21 = ')
LOGGER.debug(B21)
trans_B11 = sp.sparse.csc_matrix.transpose(B11).tocsc()
LOGGER.debug(type(trans_B11))
minus_trans_B21 = sp.sparse.csc_matrix.transpose(-B21).tocsc()
LOGGER.debug(type(minus_trans_B21))
trans_LYX = sp.sparse.linalg.spsolve(trans_B11, minus_trans_B21)
ny = self.aug_matrix.shape[0] - self.nx
if trans_LYX.shape != (self.nx, ny):
trans_LYX = trans_LYX.reshape(self.nx, ny)
trans_LYX = sp.sparse.csc_matrix(trans_LYX)
LYX = sp.sparse.csc_matrix.transpose(trans_LYX)
return LYX #this is a csr sparse matrix
def new_PYY(self, LYX):
""" Returns the updated transition probabilities for the remaining
nodes.
"""
B22 = block_22(self.aug_matrix, self.nx)
cB22 = sp.sparse.csc_matrix(B22)
B12 = block_12(self.aug_matrix, self.nx)
cB12 = sp.sparse.csc_matrix(-B12)
new_PYY = sp.sparse.eye(*B22.shape, format='csc') - cB22 + LYX*cB12
return new_PYY.tocsc()
def new_TY(self, LYX):
""" Returns the updated mean waiting times for the remaining nodes.
"""
B13 = block_13(self.aug_matrix, self.nx)
cB13 = sp.sparse.csc_matrix(B13)
B23 = block_23(self.aug_matrix, self.nx)
cB23 = sp.sparse.csc_matrix(B23)
new_TY = cB23 + LYX*cB13
return new_TY.tocsc()
@staticmethod
def new_aug_matrix(new_PYY, new_TY):
""" Returns the augmented for the eliminated Markov process.
"""
return augment_matrix(new_PYY, new_TY).tocsc()
def L(self, LYX):
""" Returns the L matrix from the block LU decomposition form.
"""
ny = self.aug_matrix.shape[0] - self.nx
L21 = -LYX
L = sp.sparse.bmat([[sp.sparse.eye(self.nx, self.nx), None], [L21, sp.sparse.eye(ny, ny)]])
return L
def U(self, new_PYY, new_TY):
""" Returns the U matrix from the block LU decomposition form.
"""
U11 = block_11(self.aug_matrix, self.nx)
U12 = block_12(self.aug_matrix, self.nx)
U13 = block_13(self.aug_matrix, self.nx)
U22 = sp.sparse.eye(*new_PYY.shape) -new_PYY
U23 = new_TY
U = sp.sparse.bmat([[U11, U12, U13], [None, U22, U23]])
return U
def get_rhox(LYX, rhoy):
"""Returns the (unnormalised) stationary probabilities for the nodes in
the eliminated set X, given the (unnormalised) stationary probabilities for
nodes in Y = X^c.
"""
rhoyT = rhoy.transpose()
rhox = rhoyT*LYX
crhox = sp.sparse.csr_matrix(rhox.transpose())
return crhox
def get_PiFromRho(rho, T):
""" Returns the actual stationary distribution by reweighting rho according
to T.
"""
pi = np.multiply(rho, T)
return pi
def elimination_pi(P, T):
""" Returns the stationary distribution of the Markov process (P,T) by
eliminating a single node at a time in the order speficied by the index
of the transition matrix P. Refer to paper for theory.
Args:
P (scipy.sparse.lil_matrix): Matrix of transition probabilities.
T (scipy.sparse.lil_matrix): Vector of mean waiting times.
Returns:
numpy.ndarray: The stationary distribution of the Markov process.
"""
M = augment_matrix(P, T)
N = M.shape[0]
currentAugMatrix = M.tocsc()
seq_LYX = []
for i in range(0, N-1):
decomp = LUdecomp(currentAugMatrix, 1)
new_LYX = decomp.LYX()
new_PYY = decomp.new_PYY(new_LYX)
new_TY = decomp.new_TY(new_LYX)
currentAugMatrix = decomp.new_aug_matrix(new_PYY, new_TY)
if i == N-2:
finalAugMatrix = decomp.new_aug_matrix(new_PYY, new_TY)
rhoy = sp.sparse.csr_matrix([1/finalAugMatrix[0, 1]])
seq_LYX.append(new_LYX)
for i in reversed(range(0, N-1)):
current_LYX = seq_LYX[i]
rhox = get_rhox(current_LYX, rhoy)
rhoy = sp.sparse.bmat([[rhox], [rhoy]])
rho_final = rhoy
rho_final_arr = rho_final.toarray()
T_arr = T.toarray()
final = np.multiply(rho_final_arr, T_arr)
return final
def general_elimination_pi(P, T, order_to_eliminate):
""" Returns the stationary distribution of the Markov process (P,T) by
eliminating an arbitrary given number of nodes at a time in the order
speficied by the index of the transition matrix P. Refer to paper for
theory.
Args:
P (scipy.sparse.lil_matrix): Matrix of transition probabilities.
T (scipy.sparse.lil_matrix): Vector of mean waiting times.
order_to_eliminate (list of integers): Specifies how many nodes to
eliminate at each iteration, order_to_eliminate[0] number being
eliminated in the first iteration. The sum of the numbers should
amount to no more than N = P.shape[0].
Returns:
numpy.matrixlib.defmatrix.matrix: The stationary distribution of the
Markov process.
"""
M = augment_matrix(P, T)
iterations = len(order_to_eliminate)
currentAugMatrix = M.tocsc()
seq_LYX = []
for i in range(0, iterations-1):
decomp = LUdecomp(currentAugMatrix, order_to_eliminate[i])
new_LYX = decomp.LYX()
new_PYY = decomp.new_PYY(new_LYX)
new_TY = decomp.new_TY(new_LYX)
if i == iterations-2:
final_P = new_PYY.todense()
LOGGER.debug(final_P.shape)
[S, U] = eig(final_P.transpose())
stationary = np.matrix(U[:, np.where(np.abs(S - 1.) < 1e-8)[0][0]].flat)
new_TY_dense = new_TY.todense().transpose()
unnormalised_dist = np.multiply(stationary, new_TY_dense).transpose()
rhoy = np.matrix(stationary)/(sum(unnormalised_dist))
rhoy = rhoy.transpose()
currentAugMatrix = augment_matrix(new_PYY, new_TY)
seq_LYX.append(new_LYX)
for i in reversed(range(0, iterations-1)):
current_LYX = seq_LYX[i]
rhox = get_rhox(current_LYX, rhoy)
rhoy = sp.sparse.bmat([[rhox], [rhoy]])
rho_final = rhoy
rho_final_arr = rho_final.toarray()
T_arr = T.toarray()
final = np.multiply(rho_final_arr, T_arr)
return np.asmatrix(final)
def calc_stationary_dist(P, T):
""" Returns the stationary distribution of a Markov processes computed
using scipy.linalg, an interface for the LAPACK and BLAS libraries. Used
for comparison and testing of elimination method.
Args:
P (scipy.sparse.lil_matrix): Matrix of transition probabilities.
T (scipy.sparse.lil_matrix): Vector of mean waiting times.
Returns:
numpy.ndarray: The stationary distribution of the Markov process.
"""
P = P.toarray()
P = np.asmatrix(P)
[S, U] = eig(P.T)
stationary = np.array(U[:, np.where(np.abs(S - 1.) < 1e-8)[0][0]].flat)
stationary = stationary / np.sum(stationary)
T_arr = T.transpose().toarray()
unnormalised_dist = np.multiply(stationary, T_arr).transpose()
return unnormalised_dist/sum(unnormalised_dist)
def calc_TAB(P, T, c):
""" Returns the MFPT from A = V\B to B where B is a prespecified subest
of the nodes. It is assumed that a permutation preprocesing step has been
performed so that B corresponds to the indexes 0,1,..., c-1 of P. Nodes
are eliminated one at a time. Refer to paper for theory.
Args:
P (scipy.sparse.lil_matrix): Matrix of transition probabilities.
T (scipy.sparse.lil_matrix): Vector of mean waiting times.
c (int): Speficfies B as the set 0,1,...,c-1.
Returns:
scipy.sparse.coo.coo_matrix: A vector of length |A| of the mean time
taken to reach B from each element of A.
"""
M = augment_matrix(P, T)
N = M.shape[1]
seq_M = []
seq_I_PXX = []
seq_TX = []
seq_PXC = []
new_M = M
for i in range(0, N-c-1):
seq_M.append(new_M)
seq_I_PXX.append(block_11(new_M, 1))
seq_TX.append(block_13(new_M, 1))
seq_PXC.append(-block_12var(new_M, 1, c))
seq_I_PXX[i] = sp.sparse.csr_matrix(seq_I_PXX[i])
seq_TX[i] = sp.sparse.csr_matrix(seq_TX[i])
seq_PXC[i] = sp.sparse.csr_matrix(seq_PXC[i])
decomp = LUdecomp(new_M, 1)
new_LYX = decomp.LYX()
new_PYY = decomp.new_PYY(new_LYX)
new_TY = decomp.new_TY(new_LYX)
new_M = decomp.new_aug_matrix(new_PYY, new_TY)
TB = sp.sparse.csr_matrix([sp.sparse.linalg.spsolve(seq_I_PXX[N-c-2], seq_TX[N-c-2])])
for i in reversed(range(0, N-c-2)):
TXB = sp.sparse.linalg.spsolve(seq_I_PXX[i], seq_TX[i] + seq_PXC[i]*TB)
TB = sp.sparse.vstack([TXB, TB])
return TB
|
[
"noreply@github.com"
] |
joshr17.noreply@github.com
|
ee5f8ba3eaf4192de32aab293b57156dedae8772
|
b037e1c315c9b4e93ae082e6765fa3872a5e4fee
|
/p_12/social/migrations/0001_initial.py
|
046abbc64c917203f9d04614670b8c33904a7de2
|
[] |
no_license
|
cbaldwin20/project_12
|
7976bec11afefb5330a405232945f7bd6bdb39bf
|
4e268e99e7ce9e7ba88d37b3b37e6addcbdd3114
|
refs/heads/master
| 2020-03-17T10:36:56.190411 | 2018-06-12T09:03:33 | 2018-06-12T09:03:33 | 133,518,515 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,868 |
py
|
# Generated by Django 2.0.5 on 2018-06-11 04:15
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=255, unique=True)),
('active', models.BooleanField(default=True)),
('staff', models.BooleanField(default=False)),
('admin', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('accepted', models.BooleanField(default=False)),
('rejected', models.BooleanField(default=False)),
('applied_date', models.DateField(auto_now_add=True)),
('person_applying', models.ForeignKey(on_delete=True, related_name='applications', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
('time_of_notification', models.DateField(auto_now_add=True)),
('already_seen', models.BooleanField(default=False)),
('person_notifying', models.ForeignKey(on_delete=True, related_name='notifications', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OutsideProject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_name', models.CharField(max_length=255)),
('url', models.URLField()),
('creator', models.ForeignKey(on_delete=True, related_name='outsideproject_owner', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Position',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position_name', models.CharField(max_length=255)),
('position_description', models.TextField()),
('hours_per_week', models.IntegerField(default=0)),
('position_filled_user', models.ForeignKey(blank=True, null=True, on_delete=True, related_name='my_position_for_project', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('description', models.TextField()),
('image', models.ImageField(blank=True, default='', upload_to='C:\\Users\\cbaldwin\\Documents\\teamTreehouseProjects\\Python\\project_12\\p_12\\media')),
('url_slug', models.SlugField(unique=True)),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('project_name', models.CharField(max_length=255)),
('description', models.TextField()),
('project_timeline', models.TextField()),
('application_requirements', models.TextField()),
('url_slug', models.SlugField(unique=True)),
('creator', models.ForeignKey(on_delete=True, related_name='project_owner', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Skill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='profile',
name='projects',
field=models.ManyToManyField(related_name='user_projects', to='social.Project'),
),
migrations.AddField(
model_name='profile',
name='skills',
field=models.ManyToManyField(related_name='user_skills', to='social.Skill'),
),
migrations.AddField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=True, related_name='profile_user', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='position',
name='project',
field=models.ForeignKey(on_delete=True, related_name='project_positions', to='social.Project'),
),
migrations.AddField(
model_name='application',
name='position',
field=models.ForeignKey(on_delete=True, related_name='position_applications', to='social.Position'),
),
migrations.AlterUniqueTogether(
name='application',
unique_together={('position', 'person_applying')},
),
]
|
[
"cbaldwin20@yandex.com"
] |
cbaldwin20@yandex.com
|
aa5c4a98512495974fb7608726a4f591fddd94e6
|
8e39a4f4ae1e8e88d3b2d731059689ad5b201a56
|
/dev-util/itstool/itstool-2.0.2.py
|
98413b632277dc442d4cbfa589f28591e237e38c
|
[] |
no_license
|
wdysln/new
|
d5f5193f81a1827769085932ab7327bb10ef648e
|
b643824b26148e71859a1afe4518fe05a79d333c
|
refs/heads/master
| 2020-05-31T00:12:05.114056 | 2016-01-04T11:38:40 | 2016-01-04T11:38:40 | 37,287,357 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 219 |
py
|
metadata = """
summary @ XML to PO and back again
homepage @ http://itstool.org/
license @ GPL3
src_url @ http://files.itstool.org/$name/$fullname.tar.bz2
arch @ ~x86_64
"""
depends = """
build @ dev-libs/libxml2
"""
|
[
"zirkovandersen@gmail.com"
] |
zirkovandersen@gmail.com
|
90631f42779c41c73a822a47c4134829fd8877c7
|
46c8a1028d85f3da4101aec17f62af3f8811a580
|
/Ally_Financial.py
|
1fc5112994f0c081d93de4c9ae1c7bfac4daa906
|
[] |
no_license
|
trssssrt/Interview_Assignments
|
632e426e0dd4d83cf7f13afc06bd6d986b422425
|
dec95f5d73c802d1f5cacd6b6bcf11b7db4df98b
|
refs/heads/master
| 2020-06-23T22:32:42.596627 | 2020-06-15T21:39:55 | 2020-06-15T21:39:55 | 198,773,203 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,660 |
py
|
"""
Run file in command line involving the following arguments:
Below is a sample command:
python3 Ally_Financial.py -g loc
python3 Ally_Financial.py -g pass -n
python3 Ally_Financial.py -g people
________
Here is an article about Ally’s new CIO (https://www.wsj.com/articles/new-ally-financial-cio-prioritizes-ai-11579615200)
Below is the python screening task that the director of Artificial Intelligence gave me to screen candidates. The task is somewhat open ended and there is room for interpretation on how to complete it. Please don’t overthink it. Read the prompt and then complete it in the most effective way that you find possible. Let me know if you have any questions!
Details
There is an API (http://api.open-notify.org/) that provides information on the International Space Station. Documentation is provided via the website, along with sample request/response.
Task
Implement a Python script that will accept the following command line arguments, along with any required information, and print the expected results
loc
print the current location of the ISS
Example: “The ISS current location at {time} is {LAT, LONG}”
pass
print the passing details of the ISS for a given location
Example: “The ISS will be overhead {LAT, LONG} at {time} for {duration}”
people
for each craft print the details of those people that are currently in space
"""
import requests
import json
class AllyFinancial_Screening():
def __init__(self):
self.base_url = 'http://api.open-notify.org/'
self.simple_values = {
'loc': 'iss-now.json',
'pass': 'iss-pass.json',
'people': 'astros.json'
}
self.default_coordinates = {
'lon': str(45.0),
'lat': str(-12.3)
}
def getResponse(self, _key, _input = ''):
req = requests.get(self.base_url + self.simple_values[_key] + _input)
return req.json()
def printResponse(self, args):
for key in args:
if args[key] == 'pass':
pass_inputs = ['lat', 'lon', 'alt', 'n']
_input = '?'
for pI in pass_inputs:
if ('-' + pI) in args:
if '=' in _input:
_input += '&'
_input += pI + '='
try:
_input += args['-' + pI]
except:
_input += self.default_coordinates[pI]
if _input == '?':
_input += 'lat' + '=' + self.default_coordinates['lat']
_input += '&' +'lon' + '=' + self.default_coordinates['lon']
elif 'lon' in _input and 'lat' not in _input:
_input += '&' + 'lat' + '=' + self.default_coordinates['lat']
elif 'lon' not in _input and 'lat' in _input:
_input += '&' + 'lon' + '=' + self.default_coordinates['lon']
resp = self.getResponse(args[key], _input)
# Print Results
print(f"The ISS will be overhead ({resp['request']['latitude']}, {resp['request']['longitude']}) at {resp['request']['datetime']} and makes {resp['request']['passes']} at an altitude of {resp['request']['altitude']} meters:")
print("Risetime\t\t\tDuration (seconds)")
print("_"*60) #Silly Formatting
for _ in resp['response']:
print(f"{_['risetime']}\t\t\t{_['duration']}")
elif args[key] == 'loc':
resp = self.getResponse(args[key])
print(f"The ISS current location at {str(resp['timestamp'])} is ({str(resp['iss_position']['latitude'])}, {str(resp['iss_position']['longitude'])})")
elif args[key] == 'people':
resp = self.getResponse(args[key])
print('craft\t\t\tname')
print('_'*40) #Silly Formatting
for person in resp['people']:
print(f"{person['craft']}\t\t\t{person['name']}")
def getopts(argv):
"""Collect command-line options in a dictionary"""
opts = {} # Empty dictionary to store key-value pairs.
while argv: # While there are arguments left to parse...
if argv[0][0] == '-': # Found a "-name value" pair.
if argv[0] in opts: # Check if "-name" already exists in opts
if not isinstance(opts[argv[0]], list): # If not a list, turn into a list
opts[argv[0]] = [opts[argv[0]]]
opts[argv[0]].append(argv[1])
else:
opts[argv[0]] = argv[1] # Add key and value to the dictionary.
# opts[argv[0]] = argv[1]
argv = argv[1:] # Reduce the argument list by copying it starting from index 1.
if ('-g' not in opts) == ('-get' not in opts):
return False
return opts
if __name__ == '__main__':
from sys import argv
try:
myargs = getopts(argv)
if isinstance(myargs, bool):
print('Error: Command Line Input MUST have either "-g" or "-get"')
else:
ally_financial_screening = AllyFinancial_Screening()
ally_financial_screening.printResponse(myargs)
except IndexError as error:
# Output expected IndexErrors.
print('Data not provided after "-" input sequence')
print(f'IndexError: {error}')
except Exception as exception:
# Output unexpected Exceptions.
print(f'Exception: {exception}')
|
[
"trss@byu.edu"
] |
trss@byu.edu
|
9bb91005f84d4d67416db899318bf4ddb657920e
|
463febc26f9f6e09d51206c87c7450476b1dfa7c
|
/0x0C-nqueens/0-nqueens.py
|
b48a68afd83f68b7c5b517b2fcb380f754708017
|
[] |
no_license
|
Nahi-Terefe/holbertonschool-interview
|
77a5fd0e668cabaa2f986ded265996061fcbc9f8
|
e4842430f346d5b18e407ac468ba225aaeaae9d8
|
refs/heads/master
| 2023-02-17T13:31:31.389980 | 2021-01-12T00:24:42 | 2021-01-12T00:24:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,734 |
py
|
#!/usr/bin/python3
""" Solving n queens """
import sys
# error handling for argv[1]
if __name__ == "__main__":
if len(sys.argv) == 1 or len(sys.argv) > 2:
print("Usage: nqueens N")
sys.exit(1)
N = sys.argv[1]
try:
N_int = int(N)
except ValueError:
print("N must be a number")
sys.exit(1)
if N_int < 4:
print("N must be at least 4")
sys.exit(1)
# n queens methods
coords = []
def isSafe(coords, row, col):
""" Checks if queen can be placed in coord of board.
Returns True if can, else False
"""
rows = []
cols = []
diag_r = []
diag_l = []
for square in coords:
rows.append(square[0])
cols.append(square[1])
diag_r.append(square[0] + square[1])
diag_l.append(square[1] - square[0])
if row in rows or col in cols:
return False
if row + col in diag_r or col - row in diag_l:
return False
return True
def solveNqueens(coords, col, safe_queens=[]):
""" Creates array of queen positions
Returns array
"""
for x in range(N_int):
if isSafe(coords, x, col):
coords.append([x, col])
if col == N_int - 1:
safe_queens.append(coords.copy())
del coords[-1]
else:
solveNqueens(coords, col + 1)
if len(coords):
del coords[-1]
return safe_queens
# sets base case for recursion
coords = solveNqueens(coords, 0)
# prints coords of squares for safe queens
for squares in coords:
print(squares)
|
[
"977@holbertonschool.com"
] |
977@holbertonschool.com
|
1c21ccd9a3a24e6884b6bb3a62c4d4c2e83cc4e7
|
e84cbd06421127f08046b393d9eaa87293745629
|
/predictor.py
|
04bbf4cf9a10af9ec729aa7138bb0105cb331210
|
[] |
no_license
|
royaljava/dsrn
|
b8bbfe2335b896b50613a43f81d5320b40325ae4
|
db21d57dfab57de3608f0372e749c6488b6b305d
|
refs/heads/master
| 2020-04-18T11:53:12.813682 | 2018-11-09T12:56:27 | 2018-11-09T13:01:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,644 |
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from google3.protobuf import text_format
from lingvo.core import inference_graph_pb2
from lingvo.core import py_utils
import os
import skimage
import skimage.io
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('checkpoint', '',
"""Path to model checkpoint.""")
tf.app.flags.DEFINE_string('inference_graph', '',
"""Path to model inference_graph def.""")
tf.app.flags.DEFINE_string('image_path', '',
"""Path to input image in a format supported by tf.image.decode_image.""")
tf.app.flags.DEFINE_string('output_dir', '',
"""Output directory, results will be written as png files.""")
def LoadInferenceGraph(path):
inference_graph = inference_graph_pb2.InferenceGraph()
with tf.gfile.Open(path, "r") as f:
text_format.Parse(f.read(), inference_graph)
return inference_graph
class Predictor(object):
def __init__(self,
inference_graph,
checkpoint):
"""Initialize the predictor,
Args:
inferece_graph: a text file containing an inference_graph proto.
checkpoint: actual model checkpoint (without '.meta').
"""
inference_graph = LoadInferenceGraph(inference_graph)
self._checkpoint = checkpoint
self._graph = tf.Graph()
with self._graph.as_default():
self._saver = tf.train.Saver(saver_def=inference_graph.saver_def)
with tf.device("cpu:0" % "cpu"):
tf.import_graph_def(inference_graph.graph_def, name="")
self._graph.finalize()
subgraph = inference_graph.subgraphs['default']
assert 'img_str' in subgraph.feeds
assert 'hr_image' in subgraph.fetches
self._sess = tf.Session(graph=self._graph)
self._saver.restore(self._sess, self._checkpoint)
def Run(self, image_name):
"""Runs predictor
Args:
image_name: full path to the image file.
Returns:
A numpy array of the output image.
"""
img_raw_str = tf.gfile.Open(image_name, 'rb').read()
hr_output = self._sess.run('hr_image', feed_dict={'img_str': [img_raw_str]})
return hr_output[0]
def main(_):
predictor = Predictor(FLAGS.inference_graph,
FLAGS.checkpoint)
out_image = predictor.Run(FLAGS.image_path)
image_name = os.path.splitext(os.path.basename(FLAGS.image_path))[0]
out_image_path = os.path.join(FLAGS.out_dir, image_name + '.png')
skimage.io.imsave(out_image_path, out_image)
if __name__ == "__main__":
tf.app.run(main)
|
[
"weihan@uiuc.edu"
] |
weihan@uiuc.edu
|
dc623bf3d465a281c0a8beddbec8d2e3465b048d
|
974086c201747e1fa43022065f20268876deb38c
|
/backend/src/settings.py
|
9048599cd1665ed35bbf1b1bb771aa22870f5d2b
|
[
"Apache-2.0"
] |
permissive
|
davidnyberg/Healthify-Project
|
f6a1ab01e46b21278d160974877652230cc2ef47
|
4bdb4e7586b9b896826af45f981c77d73eca74c0
|
refs/heads/master
| 2023-03-28T18:41:57.905101 | 2020-12-13T14:20:59 | 2020-12-13T14:20:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,496 |
py
|
# -*- coding: utf-8 -*-
"""Application configuration."""
import os
from datetime import timedelta
from apispec import APISpec
from apispec.ext.marshmallow import MarshmallowPlugin
class Config(object):
"""Base configuration."""
SECRET_KEY = os.environ.get('SRC_SECRET', 'secret-key')
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
TEST_PATH = os.path.join(PROJECT_ROOT, 'tests')
BCRYPT_LOG_ROUNDS = 4
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple'
SQLALCHEMY_TRACK_MODIFICATIONS = False
JWT_AUTH_USERNAME_KEY = 'email'
JWT_AUTH_HEADER_PREFIX = 'Token'
CORS_ORIGIN_WHITELIST = [
'http://0.0.0.0:4100',
'http://localhost:4100',
'http://0.0.0.0:8000',
'http://localhost:8000',
'http://0.0.0.0:4200',
'http://localhost:4200',
'http://0.0.0.0:4000',
'http://localhost:4000',
'http://localhost:80',
'http://0.0.0.0:80',
]
JWT_HEADER_TYPE = 'Token'
APISPEC_SPEC = APISpec(
title='RESTful API - Healthify',
version='v1',
openapi_version='2.0',
info=dict(description='Swagger docs for exploring the API'),
plugins=[MarshmallowPlugin()],
)
APISPEC_SWAGGER_URL = "/api/swagger"
APISPEC_SWAGGER_UI_URL = "/swagger-ui"
EHR_USER = os.getenv('ehr_user')
EHR_USER_PASS = os.getenv('ehr_user_pass')
EHR_ADMIN = os.getenv('ehr_admin')
EHR_ADMIN_PASS = os.getenv('ehr_admin_pass')
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
PROPAGATE_EXCEPTIONS = True
# <service-name>.<namespace-name>.svc.cluster.local
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL',
'postgresql://postgresadmin:admin123@postgres.tddc88-company-2-2020.svc.cluster.local:5432/postgres')
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
CACHE_TYPE = 'simple'
JWT_ACCESS_TOKEN_EXPIRES = timedelta(10 ** 6)
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 4
|
[
"davfo018@student.liu.se"
] |
davfo018@student.liu.se
|
e5864a2b83c01f7034aa3be5885b5d669876f36d
|
2536dc92dff2eea7ebe7dcd3d10a6bc89ac78178
|
/venv/Lib/site-packages/crypto/SelfTest/Cipher/test_DES.py
|
62fcc20ffbee6a7ab5b8dad8638d507e6f5d20a8
|
[] |
no_license
|
AlamParihar/Cryptography-Challenges
|
1eb2028b942d5a04a9aa27286e8d61b875d96021
|
05631e31285549b8c65c54c9397e09fb9bd22561
|
refs/heads/master
| 2020-03-27T23:22:01.674910 | 2018-09-24T04:50:26 | 2018-09-24T04:50:26 | 147,312,736 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,904 |
py
|
# -*- coding: utf-8 -*-
#
# SelfTest/Cipher/DES.py: Self-test for the (Single) DES cipher
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.DES"""
from crypto.Util.py3compat import *
import unittest
# This is a list of (plaintext, ciphertext, key, description) tuples.
SP800_17_B1_KEY = '01' * 8
SP800_17_B2_PT = '00' * 8
test_data = [
# Test vectors from Appendix A of NIST SP 800-17
# "Modes of Operation Validation System (MOVS): Requirements and Procedures"
# http://csrc.nist.gov/publications/nistpubs/800-17/800-17.pdf
# Appendix A - "Sample Round Outputs for the DES"
('0000000000000000', '82dcbafbdeab6602', '10316e028c8f3b4a',
"NIST SP800-17 A"),
# Table B.1 - Variable Plaintext Known Answer Test
('8000000000000000', '95f8a5e5dd31d900', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #0'),
('4000000000000000', 'dd7f121ca5015619', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #1'),
('2000000000000000', '2e8653104f3834ea', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #2'),
('1000000000000000', '4bd388ff6cd81d4f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #3'),
('0800000000000000', '20b9e767b2fb1456', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #4'),
('0400000000000000', '55579380d77138ef', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #5'),
('0200000000000000', '6cc5defaaf04512f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #6'),
('0100000000000000', '0d9f279ba5d87260', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #7'),
('0080000000000000', 'd9031b0271bd5a0a', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #8'),
('0040000000000000', '424250b37c3dd951', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #9'),
('0020000000000000', 'b8061b7ecd9a21e5', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #10'),
('0010000000000000', 'f15d0f286b65bd28', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #11'),
('0008000000000000', 'add0cc8d6e5deba1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #12'),
('0004000000000000', 'e6d5f82752ad63d1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #13'),
('0002000000000000', 'ecbfe3bd3f591a5e', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #14'),
('0001000000000000', 'f356834379d165cd', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #15'),
('0000800000000000', '2b9f982f20037fa9', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #16'),
('0000400000000000', '889de068a16f0be6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #17'),
('0000200000000000', 'e19e275d846a1298', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #18'),
('0000100000000000', '329a8ed523d71aec', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #19'),
('0000080000000000', 'e7fce22557d23c97', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #20'),
('0000040000000000', '12a9f5817ff2d65d', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #21'),
('0000020000000000', 'a484c3ad38dc9c19', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #22'),
('0000010000000000', 'fbe00a8a1ef8ad72', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #23'),
('0000008000000000', '750d079407521363', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #24'),
('0000004000000000', '64feed9c724c2faf', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #25'),
('0000002000000000', 'f02b263b328e2b60', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #26'),
('0000001000000000', '9d64555a9a10b852', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #27'),
('0000000800000000', 'd106ff0bed5255d7', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #28'),
('0000000400000000', 'e1652c6b138c64a5', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #29'),
('0000000200000000', 'e428581186ec8f46', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #30'),
('0000000100000000', 'aeb5f5ede22d1a36', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #31'),
('0000000080000000', 'e943d7568aec0c5c', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #32'),
('0000000040000000', 'df98c8276f54b04b', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #33'),
('0000000020000000', 'b160e4680f6c696f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #34'),
('0000000010000000', 'fa0752b07d9c4ab8', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #35'),
('0000000008000000', 'ca3a2b036dbc8502', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #36'),
('0000000004000000', '5e0905517bb59bcf', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #37'),
('0000000002000000', '814eeb3b91d90726', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #38'),
('0000000001000000', '4d49db1532919c9f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #39'),
('0000000000800000', '25eb5fc3f8cf0621', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #40'),
('0000000000400000', 'ab6a20c0620d1c6f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #41'),
('0000000000200000', '79e90dbc98f92cca', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #42'),
('0000000000100000', '866ecedd8072bb0e', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #43'),
('0000000000080000', '8b54536f2f3e64a8', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #44'),
('0000000000040000', 'ea51d3975595b86b', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #45'),
('0000000000020000', 'caffc6ac4542de31', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #46'),
('0000000000010000', '8dd45a2ddf90796c', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #47'),
('0000000000008000', '1029d55e880ec2d0', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #48'),
('0000000000004000', '5d86cb23639dbea9', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #49'),
('0000000000002000', '1d1ca853ae7c0c5f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #50'),
('0000000000001000', 'ce332329248f3228', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #51'),
('0000000000000800', '8405d1abe24fb942', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #52'),
('0000000000000400', 'e643d78090ca4207', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #53'),
('0000000000000200', '48221b9937748a23', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #54'),
('0000000000000100', 'dd7c0bbd61fafd54', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #55'),
('0000000000000080', '2fbc291a570db5c4', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #56'),
('0000000000000040', 'e07c30d7e4e26e12', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #57'),
('0000000000000020', '0953e2258e8e90a1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #58'),
('0000000000000010', '5b711bc4ceebf2ee', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #59'),
('0000000000000008', 'cc083f1e6d9e85f6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #60'),
('0000000000000004', 'd2fd8867d50d2dfe', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #61'),
('0000000000000002', '06e7ea22ce92708f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #62'),
('0000000000000001', '166b40b44aba4bd6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #63'),
# Table B.2 - Variable Key Known Answer Test
(SP800_17_B2_PT, '95a8d72813daa94d', '8001010101010101',
'NIST SP800-17 B.2 #0'),
(SP800_17_B2_PT, '0eec1487dd8c26d5', '4001010101010101',
'NIST SP800-17 B.2 #1'),
(SP800_17_B2_PT, '7ad16ffb79c45926', '2001010101010101',
'NIST SP800-17 B.2 #2'),
(SP800_17_B2_PT, 'd3746294ca6a6cf3', '1001010101010101',
'NIST SP800-17 B.2 #3'),
(SP800_17_B2_PT, '809f5f873c1fd761', '0801010101010101',
'NIST SP800-17 B.2 #4'),
(SP800_17_B2_PT, 'c02faffec989d1fc', '0401010101010101',
'NIST SP800-17 B.2 #5'),
(SP800_17_B2_PT, '4615aa1d33e72f10', '0201010101010101',
'NIST SP800-17 B.2 #6'),
(SP800_17_B2_PT, '2055123350c00858', '0180010101010101',
'NIST SP800-17 B.2 #7'),
(SP800_17_B2_PT, 'df3b99d6577397c8', '0140010101010101',
'NIST SP800-17 B.2 #8'),
(SP800_17_B2_PT, '31fe17369b5288c9', '0120010101010101',
'NIST SP800-17 B.2 #9'),
(SP800_17_B2_PT, 'dfdd3cc64dae1642', '0110010101010101',
'NIST SP800-17 B.2 #10'),
(SP800_17_B2_PT, '178c83ce2b399d94', '0108010101010101',
'NIST SP800-17 B.2 #11'),
(SP800_17_B2_PT, '50f636324a9b7f80', '0104010101010101',
'NIST SP800-17 B.2 #12'),
(SP800_17_B2_PT, 'a8468ee3bc18f06d', '0102010101010101',
'NIST SP800-17 B.2 #13'),
(SP800_17_B2_PT, 'a2dc9e92fd3cde92', '0101800101010101',
'NIST SP800-17 B.2 #14'),
(SP800_17_B2_PT, 'cac09f797d031287', '0101400101010101',
'NIST SP800-17 B.2 #15'),
(SP800_17_B2_PT, '90ba680b22aeb525', '0101200101010101',
'NIST SP800-17 B.2 #16'),
(SP800_17_B2_PT, 'ce7a24f350e280b6', '0101100101010101',
'NIST SP800-17 B.2 #17'),
(SP800_17_B2_PT, '882bff0aa01a0b87', '0101080101010101',
'NIST SP800-17 B.2 #18'),
(SP800_17_B2_PT, '25610288924511c2', '0101040101010101',
'NIST SP800-17 B.2 #19'),
(SP800_17_B2_PT, 'c71516c29c75d170', '0101020101010101',
'NIST SP800-17 B.2 #20'),
(SP800_17_B2_PT, '5199c29a52c9f059', '0101018001010101',
'NIST SP800-17 B.2 #21'),
(SP800_17_B2_PT, 'c22f0a294a71f29f', '0101014001010101',
'NIST SP800-17 B.2 #22'),
(SP800_17_B2_PT, 'ee371483714c02ea', '0101012001010101',
'NIST SP800-17 B.2 #23'),
(SP800_17_B2_PT, 'a81fbd448f9e522f', '0101011001010101',
'NIST SP800-17 B.2 #24'),
(SP800_17_B2_PT, '4f644c92e192dfed', '0101010801010101',
'NIST SP800-17 B.2 #25'),
(SP800_17_B2_PT, '1afa9a66a6df92ae', '0101010401010101',
'NIST SP800-17 B.2 #26'),
(SP800_17_B2_PT, 'b3c1cc715cb879d8', '0101010201010101',
'NIST SP800-17 B.2 #27'),
(SP800_17_B2_PT, '19d032e64ab0bd8b', '0101010180010101',
'NIST SP800-17 B.2 #28'),
(SP800_17_B2_PT, '3cfaa7a7dc8720dc', '0101010140010101',
'NIST SP800-17 B.2 #29'),
(SP800_17_B2_PT, 'b7265f7f447ac6f3', '0101010120010101',
'NIST SP800-17 B.2 #30'),
(SP800_17_B2_PT, '9db73b3c0d163f54', '0101010110010101',
'NIST SP800-17 B.2 #31'),
(SP800_17_B2_PT, '8181b65babf4a975', '0101010108010101',
'NIST SP800-17 B.2 #32'),
(SP800_17_B2_PT, '93c9b64042eaa240', '0101010104010101',
'NIST SP800-17 B.2 #33'),
(SP800_17_B2_PT, '5570530829705592', '0101010102010101',
'NIST SP800-17 B.2 #34'),
(SP800_17_B2_PT, '8638809e878787a0', '0101010101800101',
'NIST SP800-17 B.2 #35'),
(SP800_17_B2_PT, '41b9a79af79ac208', '0101010101400101',
'NIST SP800-17 B.2 #36'),
(SP800_17_B2_PT, '7a9be42f2009a892', '0101010101200101',
'NIST SP800-17 B.2 #37'),
(SP800_17_B2_PT, '29038d56ba6d2745', '0101010101100101',
'NIST SP800-17 B.2 #38'),
(SP800_17_B2_PT, '5495c6abf1e5df51', '0101010101080101',
'NIST SP800-17 B.2 #39'),
(SP800_17_B2_PT, 'ae13dbd561488933', '0101010101040101',
'NIST SP800-17 B.2 #40'),
(SP800_17_B2_PT, '024d1ffa8904e389', '0101010101020101',
'NIST SP800-17 B.2 #41'),
(SP800_17_B2_PT, 'd1399712f99bf02e', '0101010101018001',
'NIST SP800-17 B.2 #42'),
(SP800_17_B2_PT, '14c1d7c1cffec79e', '0101010101014001',
'NIST SP800-17 B.2 #43'),
(SP800_17_B2_PT, '1de5279dae3bed6f', '0101010101012001',
'NIST SP800-17 B.2 #44'),
(SP800_17_B2_PT, 'e941a33f85501303', '0101010101011001',
'NIST SP800-17 B.2 #45'),
(SP800_17_B2_PT, 'da99dbbc9a03f379', '0101010101010801',
'NIST SP800-17 B.2 #46'),
(SP800_17_B2_PT, 'b7fc92f91d8e92e9', '0101010101010401',
'NIST SP800-17 B.2 #47'),
(SP800_17_B2_PT, 'ae8e5caa3ca04e85', '0101010101010201',
'NIST SP800-17 B.2 #48'),
(SP800_17_B2_PT, '9cc62df43b6eed74', '0101010101010180',
'NIST SP800-17 B.2 #49'),
(SP800_17_B2_PT, 'd863dbb5c59a91a0', '0101010101010140',
'NIST SP800-17 B.2 #50'),
(SP800_17_B2_PT, 'a1ab2190545b91d7', '0101010101010120',
'NIST SP800-17 B.2 #51'),
(SP800_17_B2_PT, '0875041e64c570f7', '0101010101010110',
'NIST SP800-17 B.2 #52'),
(SP800_17_B2_PT, '5a594528bebef1cc', '0101010101010108',
'NIST SP800-17 B.2 #53'),
(SP800_17_B2_PT, 'fcdb3291de21f0c0', '0101010101010104',
'NIST SP800-17 B.2 #54'),
(SP800_17_B2_PT, '869efd7f9f265a09', '0101010101010102',
'NIST SP800-17 B.2 #55'),
]
class RonRivestTest(unittest.TestCase):
""" Ronald L. Rivest's DES test, see
http://people.csail.mit.edu/rivest/Destest.txt
ABSTRACT
--------
We present a simple way to test the correctness of a DES implementation:
Use the recurrence relation:
X0 = 9474B8E8C73BCA7D (hexadecimal)
X(i+1) = IF (i is even) THEN E(Xi,Xi) ELSE D(Xi,Xi)
to compute a sequence of 64-bit values: X0, X1, X2, ..., X16. Here
E(X,K) denotes the DES encryption of X using key K, and D(X,K) denotes
the DES decryption of X using key K. If you obtain
X16 = 1B1A2DDB4C642438
your implementation does not have any of the 36,568 possible single-fault
errors described herein.
"""
def runTest(self):
from crypto.Cipher import DES
from binascii import b2a_hex
X = []
X[0:] = [b('\x94\x74\xB8\xE8\xC7\x3B\xCA\x7D')]
for i in range(16):
c = DES.new(X[i],DES.MODE_ECB)
if not (i&1): # (num&1) returns 1 for odd numbers
X[i+1:] = [c.encrypt(X[i])] # even
else:
X[i+1:] = [c.decrypt(X[i])] # odd
self.assertEqual(b2a_hex(X[16]),
b2a_hex(b('\x1B\x1A\x2D\xDB\x4C\x64\x24\x38')))
def get_tests(config={}):
from crypto.Cipher import DES
from .common import make_block_tests
return make_block_tests(DES, "DES", test_data) + [RonRivestTest()]
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
[
"alamjyot1@gmail.com"
] |
alamjyot1@gmail.com
|
b7bfcbd8582ad1efbfb7e5ce9ad844ace6fcf2cf
|
2cdb3871d73b4294b779c81f09e3273d0728e390
|
/src/azure-cli/azure/cli/command_modules/network/aaz/latest/network/application_gateway/waf_policy/custom_rule/_list.py
|
fcf113fd8845b31b348d69defabcbf94b62b429f
|
[
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] |
permissive
|
jiasli/azure-cli
|
5a834f799478ec1aa577b1cbcb1f85c8e0075859
|
314f72a847cead06adf6f4589ace0ac8c701732a
|
refs/heads/dev
| 2023-08-16T16:46:49.031504 | 2023-05-30T06:24:42 | 2023-05-30T06:24:42 | 199,984,491 | 2 | 1 |
MIT
| 2022-08-01T06:28:45 | 2019-08-01T05:41:34 |
Python
|
UTF-8
|
Python
| false | false | 174,786 |
py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network application-gateway waf-policy custom-rule list",
)
class List(AAZCommand):
"""List application gateway WAF policy custom rules.
:example: List application gateway WAF policy custom rules.
az network application-gateway waf-policy custom-rule list --policy-name MyPolicy --resource-group MyResourceGroup
"""
_aaz_info = {
"version": "2022-09-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/applicationgatewaywebapplicationfirewallpolicies/{}", "2022-09-01", "properties.customRules"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self.SubresourceSelector(ctx=self.ctx, name="subresource")
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.policy_name = AAZStrArg(
options=["--policy-name"],
help="Name of the application gateway WAF policy.",
required=True,
fmt=AAZStrArgFormat(
max_length=128,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.WebApplicationFirewallPoliciesGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.selectors.subresource.required(), client_flatten=True)
return result
class SubresourceSelector(AAZJsonSelector):
def _get(self):
result = self.ctx.vars.instance
return result.properties.customRules
def _set(self, value):
result = self.ctx.vars.instance
result.properties.customRules = value
return
class WebApplicationFirewallPoliciesGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"policyName", self.ctx.args.policy_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-09-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_ListHelper._build_schema_web_application_firewall_policy_read(cls._schema_on_200)
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
_schema_application_gateway_backend_address_pool_read = None
@classmethod
def _build_schema_application_gateway_backend_address_pool_read(cls, _schema):
if cls._schema_application_gateway_backend_address_pool_read is not None:
_schema.etag = cls._schema_application_gateway_backend_address_pool_read.etag
_schema.id = cls._schema_application_gateway_backend_address_pool_read.id
_schema.name = cls._schema_application_gateway_backend_address_pool_read.name
_schema.properties = cls._schema_application_gateway_backend_address_pool_read.properties
_schema.type = cls._schema_application_gateway_backend_address_pool_read.type
return
cls._schema_application_gateway_backend_address_pool_read = _schema_application_gateway_backend_address_pool_read = AAZObjectType()
application_gateway_backend_address_pool_read = _schema_application_gateway_backend_address_pool_read
application_gateway_backend_address_pool_read.etag = AAZStrType(
flags={"read_only": True},
)
application_gateway_backend_address_pool_read.id = AAZStrType()
application_gateway_backend_address_pool_read.name = AAZStrType()
application_gateway_backend_address_pool_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
application_gateway_backend_address_pool_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_application_gateway_backend_address_pool_read.properties
properties.backend_addresses = AAZListType(
serialized_name="backendAddresses",
)
properties.backend_ip_configurations = AAZListType(
serialized_name="backendIPConfigurations",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
backend_addresses = _schema_application_gateway_backend_address_pool_read.properties.backend_addresses
backend_addresses.Element = AAZObjectType()
_element = _schema_application_gateway_backend_address_pool_read.properties.backend_addresses.Element
_element.fqdn = AAZStrType()
_element.ip_address = AAZStrType(
serialized_name="ipAddress",
)
backend_ip_configurations = _schema_application_gateway_backend_address_pool_read.properties.backend_ip_configurations
backend_ip_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_ip_configuration_read(backend_ip_configurations.Element)
_schema.etag = cls._schema_application_gateway_backend_address_pool_read.etag
_schema.id = cls._schema_application_gateway_backend_address_pool_read.id
_schema.name = cls._schema_application_gateway_backend_address_pool_read.name
_schema.properties = cls._schema_application_gateway_backend_address_pool_read.properties
_schema.type = cls._schema_application_gateway_backend_address_pool_read.type
_schema_application_gateway_custom_error_read = None
@classmethod
def _build_schema_application_gateway_custom_error_read(cls, _schema):
if cls._schema_application_gateway_custom_error_read is not None:
_schema.custom_error_page_url = cls._schema_application_gateway_custom_error_read.custom_error_page_url
_schema.status_code = cls._schema_application_gateway_custom_error_read.status_code
return
cls._schema_application_gateway_custom_error_read = _schema_application_gateway_custom_error_read = AAZObjectType()
application_gateway_custom_error_read = _schema_application_gateway_custom_error_read
application_gateway_custom_error_read.custom_error_page_url = AAZStrType(
serialized_name="customErrorPageUrl",
)
application_gateway_custom_error_read.status_code = AAZStrType(
serialized_name="statusCode",
)
_schema.custom_error_page_url = cls._schema_application_gateway_custom_error_read.custom_error_page_url
_schema.status_code = cls._schema_application_gateway_custom_error_read.status_code
_schema_application_gateway_header_configuration_read = None
@classmethod
def _build_schema_application_gateway_header_configuration_read(cls, _schema):
if cls._schema_application_gateway_header_configuration_read is not None:
_schema.header_name = cls._schema_application_gateway_header_configuration_read.header_name
_schema.header_value = cls._schema_application_gateway_header_configuration_read.header_value
return
cls._schema_application_gateway_header_configuration_read = _schema_application_gateway_header_configuration_read = AAZObjectType()
application_gateway_header_configuration_read = _schema_application_gateway_header_configuration_read
application_gateway_header_configuration_read.header_name = AAZStrType(
serialized_name="headerName",
)
application_gateway_header_configuration_read.header_value = AAZStrType(
serialized_name="headerValue",
)
_schema.header_name = cls._schema_application_gateway_header_configuration_read.header_name
_schema.header_value = cls._schema_application_gateway_header_configuration_read.header_value
_schema_application_gateway_ip_configuration_read = None
@classmethod
def _build_schema_application_gateway_ip_configuration_read(cls, _schema):
if cls._schema_application_gateway_ip_configuration_read is not None:
_schema.etag = cls._schema_application_gateway_ip_configuration_read.etag
_schema.id = cls._schema_application_gateway_ip_configuration_read.id
_schema.name = cls._schema_application_gateway_ip_configuration_read.name
_schema.properties = cls._schema_application_gateway_ip_configuration_read.properties
_schema.type = cls._schema_application_gateway_ip_configuration_read.type
return
cls._schema_application_gateway_ip_configuration_read = _schema_application_gateway_ip_configuration_read = AAZObjectType()
application_gateway_ip_configuration_read = _schema_application_gateway_ip_configuration_read
application_gateway_ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
application_gateway_ip_configuration_read.id = AAZStrType()
application_gateway_ip_configuration_read.name = AAZStrType()
application_gateway_ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
application_gateway_ip_configuration_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_application_gateway_ip_configuration_read.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_sub_resource_read(properties.subnet)
_schema.etag = cls._schema_application_gateway_ip_configuration_read.etag
_schema.id = cls._schema_application_gateway_ip_configuration_read.id
_schema.name = cls._schema_application_gateway_ip_configuration_read.name
_schema.properties = cls._schema_application_gateway_ip_configuration_read.properties
_schema.type = cls._schema_application_gateway_ip_configuration_read.type
_schema_application_gateway_ssl_policy_read = None
@classmethod
def _build_schema_application_gateway_ssl_policy_read(cls, _schema):
if cls._schema_application_gateway_ssl_policy_read is not None:
_schema.cipher_suites = cls._schema_application_gateway_ssl_policy_read.cipher_suites
_schema.disabled_ssl_protocols = cls._schema_application_gateway_ssl_policy_read.disabled_ssl_protocols
_schema.min_protocol_version = cls._schema_application_gateway_ssl_policy_read.min_protocol_version
_schema.policy_name = cls._schema_application_gateway_ssl_policy_read.policy_name
_schema.policy_type = cls._schema_application_gateway_ssl_policy_read.policy_type
return
cls._schema_application_gateway_ssl_policy_read = _schema_application_gateway_ssl_policy_read = AAZObjectType()
application_gateway_ssl_policy_read = _schema_application_gateway_ssl_policy_read
application_gateway_ssl_policy_read.cipher_suites = AAZListType(
serialized_name="cipherSuites",
)
application_gateway_ssl_policy_read.disabled_ssl_protocols = AAZListType(
serialized_name="disabledSslProtocols",
)
application_gateway_ssl_policy_read.min_protocol_version = AAZStrType(
serialized_name="minProtocolVersion",
)
application_gateway_ssl_policy_read.policy_name = AAZStrType(
serialized_name="policyName",
)
application_gateway_ssl_policy_read.policy_type = AAZStrType(
serialized_name="policyType",
)
cipher_suites = _schema_application_gateway_ssl_policy_read.cipher_suites
cipher_suites.Element = AAZStrType()
disabled_ssl_protocols = _schema_application_gateway_ssl_policy_read.disabled_ssl_protocols
disabled_ssl_protocols.Element = AAZStrType()
_schema.cipher_suites = cls._schema_application_gateway_ssl_policy_read.cipher_suites
_schema.disabled_ssl_protocols = cls._schema_application_gateway_ssl_policy_read.disabled_ssl_protocols
_schema.min_protocol_version = cls._schema_application_gateway_ssl_policy_read.min_protocol_version
_schema.policy_name = cls._schema_application_gateway_ssl_policy_read.policy_name
_schema.policy_type = cls._schema_application_gateway_ssl_policy_read.policy_type
_schema_application_security_group_read = None
@classmethod
def _build_schema_application_security_group_read(cls, _schema):
if cls._schema_application_security_group_read is not None:
_schema.etag = cls._schema_application_security_group_read.etag
_schema.id = cls._schema_application_security_group_read.id
_schema.location = cls._schema_application_security_group_read.location
_schema.name = cls._schema_application_security_group_read.name
_schema.properties = cls._schema_application_security_group_read.properties
_schema.tags = cls._schema_application_security_group_read.tags
_schema.type = cls._schema_application_security_group_read.type
return
cls._schema_application_security_group_read = _schema_application_security_group_read = AAZObjectType()
application_security_group_read = _schema_application_security_group_read
application_security_group_read.etag = AAZStrType(
flags={"read_only": True},
)
application_security_group_read.id = AAZStrType()
application_security_group_read.location = AAZStrType()
application_security_group_read.name = AAZStrType(
flags={"read_only": True},
)
application_security_group_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
application_security_group_read.tags = AAZDictType()
application_security_group_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_application_security_group_read.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
tags = _schema_application_security_group_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_application_security_group_read.etag
_schema.id = cls._schema_application_security_group_read.id
_schema.location = cls._schema_application_security_group_read.location
_schema.name = cls._schema_application_security_group_read.name
_schema.properties = cls._schema_application_security_group_read.properties
_schema.tags = cls._schema_application_security_group_read.tags
_schema.type = cls._schema_application_security_group_read.type
_schema_extended_location_read = None
@classmethod
def _build_schema_extended_location_read(cls, _schema):
if cls._schema_extended_location_read is not None:
_schema.name = cls._schema_extended_location_read.name
_schema.type = cls._schema_extended_location_read.type
return
cls._schema_extended_location_read = _schema_extended_location_read = AAZObjectType()
extended_location_read = _schema_extended_location_read
extended_location_read.name = AAZStrType()
extended_location_read.type = AAZStrType()
_schema.name = cls._schema_extended_location_read.name
_schema.type = cls._schema_extended_location_read.type
_schema_frontend_ip_configuration_read = None
@classmethod
def _build_schema_frontend_ip_configuration_read(cls, _schema):
if cls._schema_frontend_ip_configuration_read is not None:
_schema.etag = cls._schema_frontend_ip_configuration_read.etag
_schema.id = cls._schema_frontend_ip_configuration_read.id
_schema.name = cls._schema_frontend_ip_configuration_read.name
_schema.properties = cls._schema_frontend_ip_configuration_read.properties
_schema.type = cls._schema_frontend_ip_configuration_read.type
_schema.zones = cls._schema_frontend_ip_configuration_read.zones
return
cls._schema_frontend_ip_configuration_read = _schema_frontend_ip_configuration_read = AAZObjectType()
frontend_ip_configuration_read = _schema_frontend_ip_configuration_read
frontend_ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
frontend_ip_configuration_read.id = AAZStrType()
frontend_ip_configuration_read.name = AAZStrType()
frontend_ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
frontend_ip_configuration_read.type = AAZStrType(
flags={"read_only": True},
)
frontend_ip_configuration_read.zones = AAZListType()
properties = _schema_frontend_ip_configuration_read.properties
properties.gateway_load_balancer = AAZObjectType(
serialized_name="gatewayLoadBalancer",
)
cls._build_schema_sub_resource_read(properties.gateway_load_balancer)
properties.inbound_nat_pools = AAZListType(
serialized_name="inboundNatPools",
flags={"read_only": True},
)
properties.inbound_nat_rules = AAZListType(
serialized_name="inboundNatRules",
flags={"read_only": True},
)
properties.load_balancing_rules = AAZListType(
serialized_name="loadBalancingRules",
flags={"read_only": True},
)
properties.outbound_rules = AAZListType(
serialized_name="outboundRules",
flags={"read_only": True},
)
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_address_version = AAZStrType(
serialized_name="privateIPAddressVersion",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.public_ip_address)
properties.public_ip_prefix = AAZObjectType(
serialized_name="publicIPPrefix",
)
cls._build_schema_sub_resource_read(properties.public_ip_prefix)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
inbound_nat_pools = _schema_frontend_ip_configuration_read.properties.inbound_nat_pools
inbound_nat_pools.Element = AAZObjectType()
cls._build_schema_sub_resource_read(inbound_nat_pools.Element)
inbound_nat_rules = _schema_frontend_ip_configuration_read.properties.inbound_nat_rules
inbound_nat_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(inbound_nat_rules.Element)
load_balancing_rules = _schema_frontend_ip_configuration_read.properties.load_balancing_rules
load_balancing_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(load_balancing_rules.Element)
outbound_rules = _schema_frontend_ip_configuration_read.properties.outbound_rules
outbound_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(outbound_rules.Element)
zones = _schema_frontend_ip_configuration_read.zones
zones.Element = AAZStrType()
_schema.etag = cls._schema_frontend_ip_configuration_read.etag
_schema.id = cls._schema_frontend_ip_configuration_read.id
_schema.name = cls._schema_frontend_ip_configuration_read.name
_schema.properties = cls._schema_frontend_ip_configuration_read.properties
_schema.type = cls._schema_frontend_ip_configuration_read.type
_schema.zones = cls._schema_frontend_ip_configuration_read.zones
_schema_ip_configuration_read = None
@classmethod
def _build_schema_ip_configuration_read(cls, _schema):
if cls._schema_ip_configuration_read is not None:
_schema.etag = cls._schema_ip_configuration_read.etag
_schema.id = cls._schema_ip_configuration_read.id
_schema.name = cls._schema_ip_configuration_read.name
_schema.properties = cls._schema_ip_configuration_read.properties
return
cls._schema_ip_configuration_read = _schema_ip_configuration_read = AAZObjectType()
ip_configuration_read = _schema_ip_configuration_read
ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
ip_configuration_read.id = AAZStrType()
ip_configuration_read.name = AAZStrType()
ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = _schema_ip_configuration_read.properties
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.public_ip_address)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
_schema.etag = cls._schema_ip_configuration_read.etag
_schema.id = cls._schema_ip_configuration_read.id
_schema.name = cls._schema_ip_configuration_read.name
_schema.properties = cls._schema_ip_configuration_read.properties
_schema_network_interface_ip_configuration_read = None
@classmethod
def _build_schema_network_interface_ip_configuration_read(cls, _schema):
if cls._schema_network_interface_ip_configuration_read is not None:
_schema.etag = cls._schema_network_interface_ip_configuration_read.etag
_schema.id = cls._schema_network_interface_ip_configuration_read.id
_schema.name = cls._schema_network_interface_ip_configuration_read.name
_schema.properties = cls._schema_network_interface_ip_configuration_read.properties
_schema.type = cls._schema_network_interface_ip_configuration_read.type
return
cls._schema_network_interface_ip_configuration_read = _schema_network_interface_ip_configuration_read = AAZObjectType()
network_interface_ip_configuration_read = _schema_network_interface_ip_configuration_read
network_interface_ip_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
network_interface_ip_configuration_read.id = AAZStrType()
network_interface_ip_configuration_read.name = AAZStrType()
network_interface_ip_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_interface_ip_configuration_read.type = AAZStrType()
properties = _schema_network_interface_ip_configuration_read.properties
properties.application_gateway_backend_address_pools = AAZListType(
serialized_name="applicationGatewayBackendAddressPools",
)
properties.application_security_groups = AAZListType(
serialized_name="applicationSecurityGroups",
)
properties.gateway_load_balancer = AAZObjectType(
serialized_name="gatewayLoadBalancer",
)
cls._build_schema_sub_resource_read(properties.gateway_load_balancer)
properties.load_balancer_backend_address_pools = AAZListType(
serialized_name="loadBalancerBackendAddressPools",
)
properties.load_balancer_inbound_nat_rules = AAZListType(
serialized_name="loadBalancerInboundNatRules",
)
properties.primary = AAZBoolType()
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_address_version = AAZStrType(
serialized_name="privateIPAddressVersion",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.private_link_connection_properties = AAZObjectType(
serialized_name="privateLinkConnectionProperties",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.public_ip_address)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
properties.virtual_network_taps = AAZListType(
serialized_name="virtualNetworkTaps",
)
application_gateway_backend_address_pools = _schema_network_interface_ip_configuration_read.properties.application_gateway_backend_address_pools
application_gateway_backend_address_pools.Element = AAZObjectType()
cls._build_schema_application_gateway_backend_address_pool_read(application_gateway_backend_address_pools.Element)
application_security_groups = _schema_network_interface_ip_configuration_read.properties.application_security_groups
application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(application_security_groups.Element)
load_balancer_backend_address_pools = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools
load_balancer_backend_address_pools.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties
properties.backend_ip_configurations = AAZListType(
serialized_name="backendIPConfigurations",
flags={"read_only": True},
)
properties.drain_period_in_seconds = AAZIntType(
serialized_name="drainPeriodInSeconds",
)
properties.inbound_nat_rules = AAZListType(
serialized_name="inboundNatRules",
flags={"read_only": True},
)
properties.load_balancer_backend_addresses = AAZListType(
serialized_name="loadBalancerBackendAddresses",
)
properties.load_balancing_rules = AAZListType(
serialized_name="loadBalancingRules",
flags={"read_only": True},
)
properties.location = AAZStrType()
properties.outbound_rule = AAZObjectType(
serialized_name="outboundRule",
)
cls._build_schema_sub_resource_read(properties.outbound_rule)
properties.outbound_rules = AAZListType(
serialized_name="outboundRules",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.tunnel_interfaces = AAZListType(
serialized_name="tunnelInterfaces",
)
properties.virtual_network = AAZObjectType(
serialized_name="virtualNetwork",
)
cls._build_schema_sub_resource_read(properties.virtual_network)
backend_ip_configurations = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.backend_ip_configurations
backend_ip_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_ip_configuration_read(backend_ip_configurations.Element)
inbound_nat_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.inbound_nat_rules
inbound_nat_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(inbound_nat_rules.Element)
load_balancer_backend_addresses = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses
load_balancer_backend_addresses.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element.properties
properties.admin_state = AAZStrType(
serialized_name="adminState",
)
properties.inbound_nat_rules_port_mapping = AAZListType(
serialized_name="inboundNatRulesPortMapping",
flags={"read_only": True},
)
properties.ip_address = AAZStrType(
serialized_name="ipAddress",
)
properties.load_balancer_frontend_ip_configuration = AAZObjectType(
serialized_name="loadBalancerFrontendIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.load_balancer_frontend_ip_configuration)
properties.network_interface_ip_configuration = AAZObjectType(
serialized_name="networkInterfaceIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.network_interface_ip_configuration)
properties.subnet = AAZObjectType()
cls._build_schema_sub_resource_read(properties.subnet)
properties.virtual_network = AAZObjectType(
serialized_name="virtualNetwork",
)
cls._build_schema_sub_resource_read(properties.virtual_network)
inbound_nat_rules_port_mapping = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element.properties.inbound_nat_rules_port_mapping
inbound_nat_rules_port_mapping.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancer_backend_addresses.Element.properties.inbound_nat_rules_port_mapping.Element
_element.backend_port = AAZIntType(
serialized_name="backendPort",
)
_element.frontend_port = AAZIntType(
serialized_name="frontendPort",
)
_element.inbound_nat_rule_name = AAZStrType(
serialized_name="inboundNatRuleName",
)
load_balancing_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.load_balancing_rules
load_balancing_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(load_balancing_rules.Element)
outbound_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.outbound_rules
outbound_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(outbound_rules.Element)
tunnel_interfaces = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.tunnel_interfaces
tunnel_interfaces.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_backend_address_pools.Element.properties.tunnel_interfaces.Element
_element.identifier = AAZIntType()
_element.port = AAZIntType()
_element.protocol = AAZStrType()
_element.type = AAZStrType()
load_balancer_inbound_nat_rules = _schema_network_interface_ip_configuration_read.properties.load_balancer_inbound_nat_rules
load_balancer_inbound_nat_rules.Element = AAZObjectType()
_element = _schema_network_interface_ip_configuration_read.properties.load_balancer_inbound_nat_rules.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_ip_configuration_read.properties.load_balancer_inbound_nat_rules.Element.properties
properties.backend_address_pool = AAZObjectType(
serialized_name="backendAddressPool",
)
cls._build_schema_sub_resource_read(properties.backend_address_pool)
properties.backend_ip_configuration = AAZObjectType(
serialized_name="backendIPConfiguration",
)
cls._build_schema_network_interface_ip_configuration_read(properties.backend_ip_configuration)
properties.backend_port = AAZIntType(
serialized_name="backendPort",
)
properties.enable_floating_ip = AAZBoolType(
serialized_name="enableFloatingIP",
)
properties.enable_tcp_reset = AAZBoolType(
serialized_name="enableTcpReset",
)
properties.frontend_ip_configuration = AAZObjectType(
serialized_name="frontendIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.frontend_ip_configuration)
properties.frontend_port = AAZIntType(
serialized_name="frontendPort",
)
properties.frontend_port_range_end = AAZIntType(
serialized_name="frontendPortRangeEnd",
)
properties.frontend_port_range_start = AAZIntType(
serialized_name="frontendPortRangeStart",
)
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.protocol = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
private_link_connection_properties = _schema_network_interface_ip_configuration_read.properties.private_link_connection_properties
private_link_connection_properties.fqdns = AAZListType(
flags={"read_only": True},
)
private_link_connection_properties.group_id = AAZStrType(
serialized_name="groupId",
flags={"read_only": True},
)
private_link_connection_properties.required_member_name = AAZStrType(
serialized_name="requiredMemberName",
flags={"read_only": True},
)
fqdns = _schema_network_interface_ip_configuration_read.properties.private_link_connection_properties.fqdns
fqdns.Element = AAZStrType()
virtual_network_taps = _schema_network_interface_ip_configuration_read.properties.virtual_network_taps
virtual_network_taps.Element = AAZObjectType()
cls._build_schema_virtual_network_tap_read(virtual_network_taps.Element)
_schema.etag = cls._schema_network_interface_ip_configuration_read.etag
_schema.id = cls._schema_network_interface_ip_configuration_read.id
_schema.name = cls._schema_network_interface_ip_configuration_read.name
_schema.properties = cls._schema_network_interface_ip_configuration_read.properties
_schema.type = cls._schema_network_interface_ip_configuration_read.type
_schema_network_interface_tap_configuration_read = None
@classmethod
def _build_schema_network_interface_tap_configuration_read(cls, _schema):
if cls._schema_network_interface_tap_configuration_read is not None:
_schema.etag = cls._schema_network_interface_tap_configuration_read.etag
_schema.id = cls._schema_network_interface_tap_configuration_read.id
_schema.name = cls._schema_network_interface_tap_configuration_read.name
_schema.properties = cls._schema_network_interface_tap_configuration_read.properties
_schema.type = cls._schema_network_interface_tap_configuration_read.type
return
cls._schema_network_interface_tap_configuration_read = _schema_network_interface_tap_configuration_read = AAZObjectType()
network_interface_tap_configuration_read = _schema_network_interface_tap_configuration_read
network_interface_tap_configuration_read.etag = AAZStrType(
flags={"read_only": True},
)
network_interface_tap_configuration_read.id = AAZStrType()
network_interface_tap_configuration_read.name = AAZStrType()
network_interface_tap_configuration_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_interface_tap_configuration_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_tap_configuration_read.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.virtual_network_tap = AAZObjectType(
serialized_name="virtualNetworkTap",
)
cls._build_schema_virtual_network_tap_read(properties.virtual_network_tap)
_schema.etag = cls._schema_network_interface_tap_configuration_read.etag
_schema.id = cls._schema_network_interface_tap_configuration_read.id
_schema.name = cls._schema_network_interface_tap_configuration_read.name
_schema.properties = cls._schema_network_interface_tap_configuration_read.properties
_schema.type = cls._schema_network_interface_tap_configuration_read.type
_schema_network_interface_read = None
@classmethod
def _build_schema_network_interface_read(cls, _schema):
if cls._schema_network_interface_read is not None:
_schema.etag = cls._schema_network_interface_read.etag
_schema.extended_location = cls._schema_network_interface_read.extended_location
_schema.id = cls._schema_network_interface_read.id
_schema.location = cls._schema_network_interface_read.location
_schema.name = cls._schema_network_interface_read.name
_schema.properties = cls._schema_network_interface_read.properties
_schema.tags = cls._schema_network_interface_read.tags
_schema.type = cls._schema_network_interface_read.type
return
cls._schema_network_interface_read = _schema_network_interface_read = AAZObjectType()
network_interface_read = _schema_network_interface_read
network_interface_read.etag = AAZStrType(
flags={"read_only": True},
)
network_interface_read.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(network_interface_read.extended_location)
network_interface_read.id = AAZStrType()
network_interface_read.location = AAZStrType()
network_interface_read.name = AAZStrType(
flags={"read_only": True},
)
network_interface_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_interface_read.tags = AAZDictType()
network_interface_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties
properties.auxiliary_mode = AAZStrType(
serialized_name="auxiliaryMode",
)
properties.disable_tcp_state_tracking = AAZBoolType(
serialized_name="disableTcpStateTracking",
)
properties.dns_settings = AAZObjectType(
serialized_name="dnsSettings",
)
properties.dscp_configuration = AAZObjectType(
serialized_name="dscpConfiguration",
)
cls._build_schema_sub_resource_read(properties.dscp_configuration)
properties.enable_accelerated_networking = AAZBoolType(
serialized_name="enableAcceleratedNetworking",
)
properties.enable_ip_forwarding = AAZBoolType(
serialized_name="enableIPForwarding",
)
properties.hosted_workloads = AAZListType(
serialized_name="hostedWorkloads",
flags={"read_only": True},
)
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.mac_address = AAZStrType(
serialized_name="macAddress",
flags={"read_only": True},
)
properties.migration_phase = AAZStrType(
serialized_name="migrationPhase",
)
properties.network_security_group = AAZObjectType(
serialized_name="networkSecurityGroup",
)
cls._build_schema_network_security_group_read(properties.network_security_group)
properties.nic_type = AAZStrType(
serialized_name="nicType",
)
properties.primary = AAZBoolType(
flags={"read_only": True},
)
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
cls._build_schema_private_endpoint_read(properties.private_endpoint)
properties.private_link_service = AAZObjectType(
serialized_name="privateLinkService",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.tap_configurations = AAZListType(
serialized_name="tapConfigurations",
flags={"read_only": True},
)
properties.virtual_machine = AAZObjectType(
serialized_name="virtualMachine",
)
cls._build_schema_sub_resource_read(properties.virtual_machine)
properties.vnet_encryption_supported = AAZBoolType(
serialized_name="vnetEncryptionSupported",
flags={"read_only": True},
)
properties.workload_type = AAZStrType(
serialized_name="workloadType",
)
dns_settings = _schema_network_interface_read.properties.dns_settings
dns_settings.applied_dns_servers = AAZListType(
serialized_name="appliedDnsServers",
flags={"read_only": True},
)
dns_settings.dns_servers = AAZListType(
serialized_name="dnsServers",
)
dns_settings.internal_dns_name_label = AAZStrType(
serialized_name="internalDnsNameLabel",
)
dns_settings.internal_domain_name_suffix = AAZStrType(
serialized_name="internalDomainNameSuffix",
flags={"read_only": True},
)
dns_settings.internal_fqdn = AAZStrType(
serialized_name="internalFqdn",
flags={"read_only": True},
)
applied_dns_servers = _schema_network_interface_read.properties.dns_settings.applied_dns_servers
applied_dns_servers.Element = AAZStrType()
dns_servers = _schema_network_interface_read.properties.dns_settings.dns_servers
dns_servers.Element = AAZStrType()
hosted_workloads = _schema_network_interface_read.properties.hosted_workloads
hosted_workloads.Element = AAZStrType()
ip_configurations = _schema_network_interface_read.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_ip_configuration_read(ip_configurations.Element)
private_link_service = _schema_network_interface_read.properties.private_link_service
private_link_service.etag = AAZStrType(
flags={"read_only": True},
)
private_link_service.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(private_link_service.extended_location)
private_link_service.id = AAZStrType()
private_link_service.location = AAZStrType()
private_link_service.name = AAZStrType(
flags={"read_only": True},
)
private_link_service.properties = AAZObjectType(
flags={"client_flatten": True},
)
private_link_service.tags = AAZDictType()
private_link_service.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties.private_link_service.properties
properties.alias = AAZStrType(
flags={"read_only": True},
)
properties.auto_approval = AAZObjectType(
serialized_name="autoApproval",
)
properties.enable_proxy_protocol = AAZBoolType(
serialized_name="enableProxyProtocol",
)
properties.fqdns = AAZListType()
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.load_balancer_frontend_ip_configurations = AAZListType(
serialized_name="loadBalancerFrontendIpConfigurations",
)
properties.network_interfaces = AAZListType(
serialized_name="networkInterfaces",
flags={"read_only": True},
)
properties.private_endpoint_connections = AAZListType(
serialized_name="privateEndpointConnections",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.visibility = AAZObjectType()
auto_approval = _schema_network_interface_read.properties.private_link_service.properties.auto_approval
auto_approval.subscriptions = AAZListType()
subscriptions = _schema_network_interface_read.properties.private_link_service.properties.auto_approval.subscriptions
subscriptions.Element = AAZStrType()
fqdns = _schema_network_interface_read.properties.private_link_service.properties.fqdns
fqdns.Element = AAZStrType()
ip_configurations = _schema_network_interface_read.properties.private_link_service.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
_element = _schema_network_interface_read.properties.private_link_service.properties.ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties.private_link_service.properties.ip_configurations.Element.properties
properties.primary = AAZBoolType()
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_address_version = AAZStrType(
serialized_name="privateIPAddressVersion",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
load_balancer_frontend_ip_configurations = _schema_network_interface_read.properties.private_link_service.properties.load_balancer_frontend_ip_configurations
load_balancer_frontend_ip_configurations.Element = AAZObjectType()
cls._build_schema_frontend_ip_configuration_read(load_balancer_frontend_ip_configurations.Element)
network_interfaces = _schema_network_interface_read.properties.private_link_service.properties.network_interfaces
network_interfaces.Element = AAZObjectType()
cls._build_schema_network_interface_read(network_interfaces.Element)
private_endpoint_connections = _schema_network_interface_read.properties.private_link_service.properties.private_endpoint_connections
private_endpoint_connections.Element = AAZObjectType()
_element = _schema_network_interface_read.properties.private_link_service.properties.private_endpoint_connections.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_interface_read.properties.private_link_service.properties.private_endpoint_connections.Element.properties
properties.link_identifier = AAZStrType(
serialized_name="linkIdentifier",
flags={"read_only": True},
)
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
cls._build_schema_private_endpoint_read(properties.private_endpoint)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
)
cls._build_schema_private_link_service_connection_state_read(properties.private_link_service_connection_state)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
visibility = _schema_network_interface_read.properties.private_link_service.properties.visibility
visibility.subscriptions = AAZListType()
subscriptions = _schema_network_interface_read.properties.private_link_service.properties.visibility.subscriptions
subscriptions.Element = AAZStrType()
tags = _schema_network_interface_read.properties.private_link_service.tags
tags.Element = AAZStrType()
tap_configurations = _schema_network_interface_read.properties.tap_configurations
tap_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_tap_configuration_read(tap_configurations.Element)
tags = _schema_network_interface_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_network_interface_read.etag
_schema.extended_location = cls._schema_network_interface_read.extended_location
_schema.id = cls._schema_network_interface_read.id
_schema.location = cls._schema_network_interface_read.location
_schema.name = cls._schema_network_interface_read.name
_schema.properties = cls._schema_network_interface_read.properties
_schema.tags = cls._schema_network_interface_read.tags
_schema.type = cls._schema_network_interface_read.type
_schema_network_security_group_read = None
@classmethod
def _build_schema_network_security_group_read(cls, _schema):
if cls._schema_network_security_group_read is not None:
_schema.etag = cls._schema_network_security_group_read.etag
_schema.id = cls._schema_network_security_group_read.id
_schema.location = cls._schema_network_security_group_read.location
_schema.name = cls._schema_network_security_group_read.name
_schema.properties = cls._schema_network_security_group_read.properties
_schema.tags = cls._schema_network_security_group_read.tags
_schema.type = cls._schema_network_security_group_read.type
return
cls._schema_network_security_group_read = _schema_network_security_group_read = AAZObjectType()
network_security_group_read = _schema_network_security_group_read
network_security_group_read.etag = AAZStrType(
flags={"read_only": True},
)
network_security_group_read.id = AAZStrType()
network_security_group_read.location = AAZStrType()
network_security_group_read.name = AAZStrType(
flags={"read_only": True},
)
network_security_group_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
network_security_group_read.tags = AAZDictType()
network_security_group_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_security_group_read.properties
properties.default_security_rules = AAZListType(
serialized_name="defaultSecurityRules",
flags={"read_only": True},
)
properties.flow_logs = AAZListType(
serialized_name="flowLogs",
flags={"read_only": True},
)
properties.flush_connection = AAZBoolType(
serialized_name="flushConnection",
)
properties.network_interfaces = AAZListType(
serialized_name="networkInterfaces",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.security_rules = AAZListType(
serialized_name="securityRules",
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
default_security_rules = _schema_network_security_group_read.properties.default_security_rules
default_security_rules.Element = AAZObjectType()
cls._build_schema_security_rule_read(default_security_rules.Element)
flow_logs = _schema_network_security_group_read.properties.flow_logs
flow_logs.Element = AAZObjectType()
_element = _schema_network_security_group_read.properties.flow_logs.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.location = AAZStrType()
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_network_security_group_read.properties.flow_logs.Element.properties
properties.enabled = AAZBoolType()
properties.flow_analytics_configuration = AAZObjectType(
serialized_name="flowAnalyticsConfiguration",
)
properties.format = AAZObjectType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.retention_policy = AAZObjectType(
serialized_name="retentionPolicy",
)
properties.storage_id = AAZStrType(
serialized_name="storageId",
flags={"required": True},
)
properties.target_resource_guid = AAZStrType(
serialized_name="targetResourceGuid",
flags={"read_only": True},
)
properties.target_resource_id = AAZStrType(
serialized_name="targetResourceId",
flags={"required": True},
)
flow_analytics_configuration = _schema_network_security_group_read.properties.flow_logs.Element.properties.flow_analytics_configuration
flow_analytics_configuration.network_watcher_flow_analytics_configuration = AAZObjectType(
serialized_name="networkWatcherFlowAnalyticsConfiguration",
)
network_watcher_flow_analytics_configuration = _schema_network_security_group_read.properties.flow_logs.Element.properties.flow_analytics_configuration.network_watcher_flow_analytics_configuration
network_watcher_flow_analytics_configuration.enabled = AAZBoolType()
network_watcher_flow_analytics_configuration.traffic_analytics_interval = AAZIntType(
serialized_name="trafficAnalyticsInterval",
)
network_watcher_flow_analytics_configuration.workspace_id = AAZStrType(
serialized_name="workspaceId",
)
network_watcher_flow_analytics_configuration.workspace_region = AAZStrType(
serialized_name="workspaceRegion",
)
network_watcher_flow_analytics_configuration.workspace_resource_id = AAZStrType(
serialized_name="workspaceResourceId",
)
format = _schema_network_security_group_read.properties.flow_logs.Element.properties.format
format.type = AAZStrType()
format.version = AAZIntType()
retention_policy = _schema_network_security_group_read.properties.flow_logs.Element.properties.retention_policy
retention_policy.days = AAZIntType()
retention_policy.enabled = AAZBoolType()
tags = _schema_network_security_group_read.properties.flow_logs.Element.tags
tags.Element = AAZStrType()
network_interfaces = _schema_network_security_group_read.properties.network_interfaces
network_interfaces.Element = AAZObjectType()
cls._build_schema_network_interface_read(network_interfaces.Element)
security_rules = _schema_network_security_group_read.properties.security_rules
security_rules.Element = AAZObjectType()
cls._build_schema_security_rule_read(security_rules.Element)
subnets = _schema_network_security_group_read.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_subnet_read(subnets.Element)
tags = _schema_network_security_group_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_network_security_group_read.etag
_schema.id = cls._schema_network_security_group_read.id
_schema.location = cls._schema_network_security_group_read.location
_schema.name = cls._schema_network_security_group_read.name
_schema.properties = cls._schema_network_security_group_read.properties
_schema.tags = cls._schema_network_security_group_read.tags
_schema.type = cls._schema_network_security_group_read.type
_schema_private_endpoint_read = None
@classmethod
def _build_schema_private_endpoint_read(cls, _schema):
if cls._schema_private_endpoint_read is not None:
_schema.etag = cls._schema_private_endpoint_read.etag
_schema.extended_location = cls._schema_private_endpoint_read.extended_location
_schema.id = cls._schema_private_endpoint_read.id
_schema.location = cls._schema_private_endpoint_read.location
_schema.name = cls._schema_private_endpoint_read.name
_schema.properties = cls._schema_private_endpoint_read.properties
_schema.tags = cls._schema_private_endpoint_read.tags
_schema.type = cls._schema_private_endpoint_read.type
return
cls._schema_private_endpoint_read = _schema_private_endpoint_read = AAZObjectType()
private_endpoint_read = _schema_private_endpoint_read
private_endpoint_read.etag = AAZStrType(
flags={"read_only": True},
)
private_endpoint_read.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(private_endpoint_read.extended_location)
private_endpoint_read.id = AAZStrType()
private_endpoint_read.location = AAZStrType()
private_endpoint_read.name = AAZStrType(
flags={"read_only": True},
)
private_endpoint_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
private_endpoint_read.tags = AAZDictType()
private_endpoint_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_private_endpoint_read.properties
properties.application_security_groups = AAZListType(
serialized_name="applicationSecurityGroups",
)
properties.custom_dns_configs = AAZListType(
serialized_name="customDnsConfigs",
)
properties.custom_network_interface_name = AAZStrType(
serialized_name="customNetworkInterfaceName",
)
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.manual_private_link_service_connections = AAZListType(
serialized_name="manualPrivateLinkServiceConnections",
)
properties.network_interfaces = AAZListType(
serialized_name="networkInterfaces",
flags={"read_only": True},
)
properties.private_link_service_connections = AAZListType(
serialized_name="privateLinkServiceConnections",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
application_security_groups = _schema_private_endpoint_read.properties.application_security_groups
application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(application_security_groups.Element)
custom_dns_configs = _schema_private_endpoint_read.properties.custom_dns_configs
custom_dns_configs.Element = AAZObjectType()
_element = _schema_private_endpoint_read.properties.custom_dns_configs.Element
_element.fqdn = AAZStrType()
_element.ip_addresses = AAZListType(
serialized_name="ipAddresses",
)
ip_addresses = _schema_private_endpoint_read.properties.custom_dns_configs.Element.ip_addresses
ip_addresses.Element = AAZStrType()
ip_configurations = _schema_private_endpoint_read.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
_element = _schema_private_endpoint_read.properties.ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_private_endpoint_read.properties.ip_configurations.Element.properties
properties.group_id = AAZStrType(
serialized_name="groupId",
)
properties.member_name = AAZStrType(
serialized_name="memberName",
)
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
manual_private_link_service_connections = _schema_private_endpoint_read.properties.manual_private_link_service_connections
manual_private_link_service_connections.Element = AAZObjectType()
cls._build_schema_private_link_service_connection_read(manual_private_link_service_connections.Element)
network_interfaces = _schema_private_endpoint_read.properties.network_interfaces
network_interfaces.Element = AAZObjectType()
cls._build_schema_network_interface_read(network_interfaces.Element)
private_link_service_connections = _schema_private_endpoint_read.properties.private_link_service_connections
private_link_service_connections.Element = AAZObjectType()
cls._build_schema_private_link_service_connection_read(private_link_service_connections.Element)
tags = _schema_private_endpoint_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_private_endpoint_read.etag
_schema.extended_location = cls._schema_private_endpoint_read.extended_location
_schema.id = cls._schema_private_endpoint_read.id
_schema.location = cls._schema_private_endpoint_read.location
_schema.name = cls._schema_private_endpoint_read.name
_schema.properties = cls._schema_private_endpoint_read.properties
_schema.tags = cls._schema_private_endpoint_read.tags
_schema.type = cls._schema_private_endpoint_read.type
_schema_private_link_service_connection_state_read = None
@classmethod
def _build_schema_private_link_service_connection_state_read(cls, _schema):
if cls._schema_private_link_service_connection_state_read is not None:
_schema.actions_required = cls._schema_private_link_service_connection_state_read.actions_required
_schema.description = cls._schema_private_link_service_connection_state_read.description
_schema.status = cls._schema_private_link_service_connection_state_read.status
return
cls._schema_private_link_service_connection_state_read = _schema_private_link_service_connection_state_read = AAZObjectType()
private_link_service_connection_state_read = _schema_private_link_service_connection_state_read
private_link_service_connection_state_read.actions_required = AAZStrType(
serialized_name="actionsRequired",
)
private_link_service_connection_state_read.description = AAZStrType()
private_link_service_connection_state_read.status = AAZStrType()
_schema.actions_required = cls._schema_private_link_service_connection_state_read.actions_required
_schema.description = cls._schema_private_link_service_connection_state_read.description
_schema.status = cls._schema_private_link_service_connection_state_read.status
_schema_private_link_service_connection_read = None
@classmethod
def _build_schema_private_link_service_connection_read(cls, _schema):
if cls._schema_private_link_service_connection_read is not None:
_schema.etag = cls._schema_private_link_service_connection_read.etag
_schema.id = cls._schema_private_link_service_connection_read.id
_schema.name = cls._schema_private_link_service_connection_read.name
_schema.properties = cls._schema_private_link_service_connection_read.properties
_schema.type = cls._schema_private_link_service_connection_read.type
return
cls._schema_private_link_service_connection_read = _schema_private_link_service_connection_read = AAZObjectType()
private_link_service_connection_read = _schema_private_link_service_connection_read
private_link_service_connection_read.etag = AAZStrType(
flags={"read_only": True},
)
private_link_service_connection_read.id = AAZStrType()
private_link_service_connection_read.name = AAZStrType()
private_link_service_connection_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
private_link_service_connection_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_private_link_service_connection_read.properties
properties.group_ids = AAZListType(
serialized_name="groupIds",
)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
)
cls._build_schema_private_link_service_connection_state_read(properties.private_link_service_connection_state)
properties.private_link_service_id = AAZStrType(
serialized_name="privateLinkServiceId",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.request_message = AAZStrType(
serialized_name="requestMessage",
)
group_ids = _schema_private_link_service_connection_read.properties.group_ids
group_ids.Element = AAZStrType()
_schema.etag = cls._schema_private_link_service_connection_read.etag
_schema.id = cls._schema_private_link_service_connection_read.id
_schema.name = cls._schema_private_link_service_connection_read.name
_schema.properties = cls._schema_private_link_service_connection_read.properties
_schema.type = cls._schema_private_link_service_connection_read.type
_schema_public_ip_address_read = None
@classmethod
def _build_schema_public_ip_address_read(cls, _schema):
if cls._schema_public_ip_address_read is not None:
_schema.etag = cls._schema_public_ip_address_read.etag
_schema.extended_location = cls._schema_public_ip_address_read.extended_location
_schema.id = cls._schema_public_ip_address_read.id
_schema.location = cls._schema_public_ip_address_read.location
_schema.name = cls._schema_public_ip_address_read.name
_schema.properties = cls._schema_public_ip_address_read.properties
_schema.sku = cls._schema_public_ip_address_read.sku
_schema.tags = cls._schema_public_ip_address_read.tags
_schema.type = cls._schema_public_ip_address_read.type
_schema.zones = cls._schema_public_ip_address_read.zones
return
cls._schema_public_ip_address_read = _schema_public_ip_address_read = AAZObjectType()
public_ip_address_read = _schema_public_ip_address_read
public_ip_address_read.etag = AAZStrType(
flags={"read_only": True},
)
public_ip_address_read.extended_location = AAZObjectType(
serialized_name="extendedLocation",
)
cls._build_schema_extended_location_read(public_ip_address_read.extended_location)
public_ip_address_read.id = AAZStrType()
public_ip_address_read.location = AAZStrType()
public_ip_address_read.name = AAZStrType(
flags={"read_only": True},
)
public_ip_address_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
public_ip_address_read.sku = AAZObjectType()
public_ip_address_read.tags = AAZDictType()
public_ip_address_read.type = AAZStrType(
flags={"read_only": True},
)
public_ip_address_read.zones = AAZListType()
properties = _schema_public_ip_address_read.properties
properties.ddos_settings = AAZObjectType(
serialized_name="ddosSettings",
)
properties.delete_option = AAZStrType(
serialized_name="deleteOption",
)
properties.dns_settings = AAZObjectType(
serialized_name="dnsSettings",
)
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.ip_address = AAZStrType(
serialized_name="ipAddress",
)
properties.ip_configuration = AAZObjectType(
serialized_name="ipConfiguration",
)
cls._build_schema_ip_configuration_read(properties.ip_configuration)
properties.ip_tags = AAZListType(
serialized_name="ipTags",
)
properties.linked_public_ip_address = AAZObjectType(
serialized_name="linkedPublicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.linked_public_ip_address)
properties.migration_phase = AAZStrType(
serialized_name="migrationPhase",
)
properties.nat_gateway = AAZObjectType(
serialized_name="natGateway",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address_version = AAZStrType(
serialized_name="publicIPAddressVersion",
)
properties.public_ip_allocation_method = AAZStrType(
serialized_name="publicIPAllocationMethod",
)
properties.public_ip_prefix = AAZObjectType(
serialized_name="publicIPPrefix",
)
cls._build_schema_sub_resource_read(properties.public_ip_prefix)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.service_public_ip_address = AAZObjectType(
serialized_name="servicePublicIPAddress",
)
cls._build_schema_public_ip_address_read(properties.service_public_ip_address)
ddos_settings = _schema_public_ip_address_read.properties.ddos_settings
ddos_settings.ddos_protection_plan = AAZObjectType(
serialized_name="ddosProtectionPlan",
)
cls._build_schema_sub_resource_read(ddos_settings.ddos_protection_plan)
ddos_settings.protection_mode = AAZStrType(
serialized_name="protectionMode",
)
dns_settings = _schema_public_ip_address_read.properties.dns_settings
dns_settings.domain_name_label = AAZStrType(
serialized_name="domainNameLabel",
)
dns_settings.fqdn = AAZStrType()
dns_settings.reverse_fqdn = AAZStrType(
serialized_name="reverseFqdn",
)
ip_tags = _schema_public_ip_address_read.properties.ip_tags
ip_tags.Element = AAZObjectType()
_element = _schema_public_ip_address_read.properties.ip_tags.Element
_element.ip_tag_type = AAZStrType(
serialized_name="ipTagType",
)
_element.tag = AAZStrType()
nat_gateway = _schema_public_ip_address_read.properties.nat_gateway
nat_gateway.etag = AAZStrType(
flags={"read_only": True},
)
nat_gateway.id = AAZStrType()
nat_gateway.location = AAZStrType()
nat_gateway.name = AAZStrType(
flags={"read_only": True},
)
nat_gateway.properties = AAZObjectType(
flags={"client_flatten": True},
)
nat_gateway.sku = AAZObjectType()
nat_gateway.tags = AAZDictType()
nat_gateway.type = AAZStrType(
flags={"read_only": True},
)
nat_gateway.zones = AAZListType()
properties = _schema_public_ip_address_read.properties.nat_gateway.properties
properties.idle_timeout_in_minutes = AAZIntType(
serialized_name="idleTimeoutInMinutes",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_addresses = AAZListType(
serialized_name="publicIpAddresses",
)
properties.public_ip_prefixes = AAZListType(
serialized_name="publicIpPrefixes",
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
public_ip_addresses = _schema_public_ip_address_read.properties.nat_gateway.properties.public_ip_addresses
public_ip_addresses.Element = AAZObjectType()
cls._build_schema_sub_resource_read(public_ip_addresses.Element)
public_ip_prefixes = _schema_public_ip_address_read.properties.nat_gateway.properties.public_ip_prefixes
public_ip_prefixes.Element = AAZObjectType()
cls._build_schema_sub_resource_read(public_ip_prefixes.Element)
subnets = _schema_public_ip_address_read.properties.nat_gateway.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_sub_resource_read(subnets.Element)
sku = _schema_public_ip_address_read.properties.nat_gateway.sku
sku.name = AAZStrType()
tags = _schema_public_ip_address_read.properties.nat_gateway.tags
tags.Element = AAZStrType()
zones = _schema_public_ip_address_read.properties.nat_gateway.zones
zones.Element = AAZStrType()
sku = _schema_public_ip_address_read.sku
sku.name = AAZStrType()
sku.tier = AAZStrType()
tags = _schema_public_ip_address_read.tags
tags.Element = AAZStrType()
zones = _schema_public_ip_address_read.zones
zones.Element = AAZStrType()
_schema.etag = cls._schema_public_ip_address_read.etag
_schema.extended_location = cls._schema_public_ip_address_read.extended_location
_schema.id = cls._schema_public_ip_address_read.id
_schema.location = cls._schema_public_ip_address_read.location
_schema.name = cls._schema_public_ip_address_read.name
_schema.properties = cls._schema_public_ip_address_read.properties
_schema.sku = cls._schema_public_ip_address_read.sku
_schema.tags = cls._schema_public_ip_address_read.tags
_schema.type = cls._schema_public_ip_address_read.type
_schema.zones = cls._schema_public_ip_address_read.zones
_schema_security_rule_read = None
@classmethod
def _build_schema_security_rule_read(cls, _schema):
if cls._schema_security_rule_read is not None:
_schema.etag = cls._schema_security_rule_read.etag
_schema.id = cls._schema_security_rule_read.id
_schema.name = cls._schema_security_rule_read.name
_schema.properties = cls._schema_security_rule_read.properties
_schema.type = cls._schema_security_rule_read.type
return
cls._schema_security_rule_read = _schema_security_rule_read = AAZObjectType()
security_rule_read = _schema_security_rule_read
security_rule_read.etag = AAZStrType(
flags={"read_only": True},
)
security_rule_read.id = AAZStrType()
security_rule_read.name = AAZStrType()
security_rule_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
security_rule_read.type = AAZStrType()
properties = _schema_security_rule_read.properties
properties.access = AAZStrType(
flags={"required": True},
)
properties.description = AAZStrType()
properties.destination_address_prefix = AAZStrType(
serialized_name="destinationAddressPrefix",
)
properties.destination_address_prefixes = AAZListType(
serialized_name="destinationAddressPrefixes",
)
properties.destination_application_security_groups = AAZListType(
serialized_name="destinationApplicationSecurityGroups",
)
properties.destination_port_range = AAZStrType(
serialized_name="destinationPortRange",
)
properties.destination_port_ranges = AAZListType(
serialized_name="destinationPortRanges",
)
properties.direction = AAZStrType(
flags={"required": True},
)
properties.priority = AAZIntType(
flags={"required": True},
)
properties.protocol = AAZStrType(
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.source_address_prefix = AAZStrType(
serialized_name="sourceAddressPrefix",
)
properties.source_address_prefixes = AAZListType(
serialized_name="sourceAddressPrefixes",
)
properties.source_application_security_groups = AAZListType(
serialized_name="sourceApplicationSecurityGroups",
)
properties.source_port_range = AAZStrType(
serialized_name="sourcePortRange",
)
properties.source_port_ranges = AAZListType(
serialized_name="sourcePortRanges",
)
destination_address_prefixes = _schema_security_rule_read.properties.destination_address_prefixes
destination_address_prefixes.Element = AAZStrType()
destination_application_security_groups = _schema_security_rule_read.properties.destination_application_security_groups
destination_application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(destination_application_security_groups.Element)
destination_port_ranges = _schema_security_rule_read.properties.destination_port_ranges
destination_port_ranges.Element = AAZStrType()
source_address_prefixes = _schema_security_rule_read.properties.source_address_prefixes
source_address_prefixes.Element = AAZStrType()
source_application_security_groups = _schema_security_rule_read.properties.source_application_security_groups
source_application_security_groups.Element = AAZObjectType()
cls._build_schema_application_security_group_read(source_application_security_groups.Element)
source_port_ranges = _schema_security_rule_read.properties.source_port_ranges
source_port_ranges.Element = AAZStrType()
_schema.etag = cls._schema_security_rule_read.etag
_schema.id = cls._schema_security_rule_read.id
_schema.name = cls._schema_security_rule_read.name
_schema.properties = cls._schema_security_rule_read.properties
_schema.type = cls._schema_security_rule_read.type
_schema_sub_resource_read = None
@classmethod
def _build_schema_sub_resource_read(cls, _schema):
if cls._schema_sub_resource_read is not None:
_schema.id = cls._schema_sub_resource_read.id
return
cls._schema_sub_resource_read = _schema_sub_resource_read = AAZObjectType()
sub_resource_read = _schema_sub_resource_read
sub_resource_read.id = AAZStrType()
_schema.id = cls._schema_sub_resource_read.id
_schema_subnet_read = None
@classmethod
def _build_schema_subnet_read(cls, _schema):
if cls._schema_subnet_read is not None:
_schema.etag = cls._schema_subnet_read.etag
_schema.id = cls._schema_subnet_read.id
_schema.name = cls._schema_subnet_read.name
_schema.properties = cls._schema_subnet_read.properties
_schema.type = cls._schema_subnet_read.type
return
cls._schema_subnet_read = _schema_subnet_read = AAZObjectType()
subnet_read = _schema_subnet_read
subnet_read.etag = AAZStrType(
flags={"read_only": True},
)
subnet_read.id = AAZStrType()
subnet_read.name = AAZStrType()
subnet_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
subnet_read.type = AAZStrType()
properties = _schema_subnet_read.properties
properties.address_prefix = AAZStrType(
serialized_name="addressPrefix",
)
properties.address_prefixes = AAZListType(
serialized_name="addressPrefixes",
)
properties.application_gateway_ip_configurations = AAZListType(
serialized_name="applicationGatewayIPConfigurations",
)
properties.delegations = AAZListType()
properties.ip_allocations = AAZListType(
serialized_name="ipAllocations",
)
properties.ip_configuration_profiles = AAZListType(
serialized_name="ipConfigurationProfiles",
flags={"read_only": True},
)
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
flags={"read_only": True},
)
properties.nat_gateway = AAZObjectType(
serialized_name="natGateway",
)
cls._build_schema_sub_resource_read(properties.nat_gateway)
properties.network_security_group = AAZObjectType(
serialized_name="networkSecurityGroup",
)
cls._build_schema_network_security_group_read(properties.network_security_group)
properties.private_endpoint_network_policies = AAZStrType(
serialized_name="privateEndpointNetworkPolicies",
)
properties.private_endpoints = AAZListType(
serialized_name="privateEndpoints",
flags={"read_only": True},
)
properties.private_link_service_network_policies = AAZStrType(
serialized_name="privateLinkServiceNetworkPolicies",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.purpose = AAZStrType(
flags={"read_only": True},
)
properties.resource_navigation_links = AAZListType(
serialized_name="resourceNavigationLinks",
flags={"read_only": True},
)
properties.route_table = AAZObjectType(
serialized_name="routeTable",
)
properties.service_association_links = AAZListType(
serialized_name="serviceAssociationLinks",
flags={"read_only": True},
)
properties.service_endpoint_policies = AAZListType(
serialized_name="serviceEndpointPolicies",
)
properties.service_endpoints = AAZListType(
serialized_name="serviceEndpoints",
)
address_prefixes = _schema_subnet_read.properties.address_prefixes
address_prefixes.Element = AAZStrType()
application_gateway_ip_configurations = _schema_subnet_read.properties.application_gateway_ip_configurations
application_gateway_ip_configurations.Element = AAZObjectType()
cls._build_schema_application_gateway_ip_configuration_read(application_gateway_ip_configurations.Element)
delegations = _schema_subnet_read.properties.delegations
delegations.Element = AAZObjectType()
_element = _schema_subnet_read.properties.delegations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = _schema_subnet_read.properties.delegations.Element.properties
properties.actions = AAZListType(
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.service_name = AAZStrType(
serialized_name="serviceName",
)
actions = _schema_subnet_read.properties.delegations.Element.properties.actions
actions.Element = AAZStrType()
ip_allocations = _schema_subnet_read.properties.ip_allocations
ip_allocations.Element = AAZObjectType()
cls._build_schema_sub_resource_read(ip_allocations.Element)
ip_configuration_profiles = _schema_subnet_read.properties.ip_configuration_profiles
ip_configuration_profiles.Element = AAZObjectType()
_element = _schema_subnet_read.properties.ip_configuration_profiles.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.ip_configuration_profiles.Element.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_subnet_read(properties.subnet)
ip_configurations = _schema_subnet_read.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
cls._build_schema_ip_configuration_read(ip_configurations.Element)
private_endpoints = _schema_subnet_read.properties.private_endpoints
private_endpoints.Element = AAZObjectType()
cls._build_schema_private_endpoint_read(private_endpoints.Element)
resource_navigation_links = _schema_subnet_read.properties.resource_navigation_links
resource_navigation_links.Element = AAZObjectType()
_element = _schema_subnet_read.properties.resource_navigation_links.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.resource_navigation_links.Element.properties
properties.link = AAZStrType()
properties.linked_resource_type = AAZStrType(
serialized_name="linkedResourceType",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
route_table = _schema_subnet_read.properties.route_table
route_table.etag = AAZStrType(
flags={"read_only": True},
)
route_table.id = AAZStrType()
route_table.location = AAZStrType()
route_table.name = AAZStrType(
flags={"read_only": True},
)
route_table.properties = AAZObjectType(
flags={"client_flatten": True},
)
route_table.tags = AAZDictType()
route_table.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.route_table.properties
properties.disable_bgp_route_propagation = AAZBoolType(
serialized_name="disableBgpRoutePropagation",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.routes = AAZListType()
properties.subnets = AAZListType(
flags={"read_only": True},
)
routes = _schema_subnet_read.properties.route_table.properties.routes
routes.Element = AAZObjectType()
_element = _schema_subnet_read.properties.route_table.properties.routes.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = _schema_subnet_read.properties.route_table.properties.routes.Element.properties
properties.address_prefix = AAZStrType(
serialized_name="addressPrefix",
)
properties.has_bgp_override = AAZBoolType(
serialized_name="hasBgpOverride",
)
properties.next_hop_ip_address = AAZStrType(
serialized_name="nextHopIpAddress",
)
properties.next_hop_type = AAZStrType(
serialized_name="nextHopType",
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
subnets = _schema_subnet_read.properties.route_table.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_subnet_read(subnets.Element)
tags = _schema_subnet_read.properties.route_table.tags
tags.Element = AAZStrType()
service_association_links = _schema_subnet_read.properties.service_association_links
service_association_links.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_association_links.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.service_association_links.Element.properties
properties.allow_delete = AAZBoolType(
serialized_name="allowDelete",
)
properties.link = AAZStrType()
properties.linked_resource_type = AAZStrType(
serialized_name="linkedResourceType",
)
properties.locations = AAZListType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
locations = _schema_subnet_read.properties.service_association_links.Element.properties.locations
locations.Element = AAZStrType()
service_endpoint_policies = _schema_subnet_read.properties.service_endpoint_policies
service_endpoint_policies.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_endpoint_policies.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.kind = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType()
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_subnet_read.properties.service_endpoint_policies.Element.properties
properties.contextual_service_endpoint_policies = AAZListType(
serialized_name="contextualServiceEndpointPolicies",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.service_alias = AAZStrType(
serialized_name="serviceAlias",
)
properties.service_endpoint_policy_definitions = AAZListType(
serialized_name="serviceEndpointPolicyDefinitions",
)
properties.subnets = AAZListType(
flags={"read_only": True},
)
contextual_service_endpoint_policies = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.contextual_service_endpoint_policies
contextual_service_endpoint_policies.Element = AAZStrType()
service_endpoint_policy_definitions = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.service_endpoint_policy_definitions
service_endpoint_policy_definitions.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.service_endpoint_policy_definitions.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType()
properties = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.service_endpoint_policy_definitions.Element.properties
properties.description = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.service = AAZStrType()
properties.service_resources = AAZListType(
serialized_name="serviceResources",
)
service_resources = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.service_endpoint_policy_definitions.Element.properties.service_resources
service_resources.Element = AAZStrType()
subnets = _schema_subnet_read.properties.service_endpoint_policies.Element.properties.subnets
subnets.Element = AAZObjectType()
cls._build_schema_subnet_read(subnets.Element)
tags = _schema_subnet_read.properties.service_endpoint_policies.Element.tags
tags.Element = AAZStrType()
service_endpoints = _schema_subnet_read.properties.service_endpoints
service_endpoints.Element = AAZObjectType()
_element = _schema_subnet_read.properties.service_endpoints.Element
_element.locations = AAZListType()
_element.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
_element.service = AAZStrType()
locations = _schema_subnet_read.properties.service_endpoints.Element.locations
locations.Element = AAZStrType()
_schema.etag = cls._schema_subnet_read.etag
_schema.id = cls._schema_subnet_read.id
_schema.name = cls._schema_subnet_read.name
_schema.properties = cls._schema_subnet_read.properties
_schema.type = cls._schema_subnet_read.type
_schema_virtual_network_tap_read = None
@classmethod
def _build_schema_virtual_network_tap_read(cls, _schema):
if cls._schema_virtual_network_tap_read is not None:
_schema.etag = cls._schema_virtual_network_tap_read.etag
_schema.id = cls._schema_virtual_network_tap_read.id
_schema.location = cls._schema_virtual_network_tap_read.location
_schema.name = cls._schema_virtual_network_tap_read.name
_schema.properties = cls._schema_virtual_network_tap_read.properties
_schema.tags = cls._schema_virtual_network_tap_read.tags
_schema.type = cls._schema_virtual_network_tap_read.type
return
cls._schema_virtual_network_tap_read = _schema_virtual_network_tap_read = AAZObjectType()
virtual_network_tap_read = _schema_virtual_network_tap_read
virtual_network_tap_read.etag = AAZStrType(
flags={"read_only": True},
)
virtual_network_tap_read.id = AAZStrType()
virtual_network_tap_read.location = AAZStrType()
virtual_network_tap_read.name = AAZStrType(
flags={"read_only": True},
)
virtual_network_tap_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
virtual_network_tap_read.tags = AAZDictType()
virtual_network_tap_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_virtual_network_tap_read.properties
properties.destination_load_balancer_front_end_ip_configuration = AAZObjectType(
serialized_name="destinationLoadBalancerFrontEndIPConfiguration",
)
cls._build_schema_frontend_ip_configuration_read(properties.destination_load_balancer_front_end_ip_configuration)
properties.destination_network_interface_ip_configuration = AAZObjectType(
serialized_name="destinationNetworkInterfaceIPConfiguration",
)
cls._build_schema_network_interface_ip_configuration_read(properties.destination_network_interface_ip_configuration)
properties.destination_port = AAZIntType(
serialized_name="destinationPort",
)
properties.network_interface_tap_configurations = AAZListType(
serialized_name="networkInterfaceTapConfigurations",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
network_interface_tap_configurations = _schema_virtual_network_tap_read.properties.network_interface_tap_configurations
network_interface_tap_configurations.Element = AAZObjectType()
cls._build_schema_network_interface_tap_configuration_read(network_interface_tap_configurations.Element)
tags = _schema_virtual_network_tap_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_virtual_network_tap_read.etag
_schema.id = cls._schema_virtual_network_tap_read.id
_schema.location = cls._schema_virtual_network_tap_read.location
_schema.name = cls._schema_virtual_network_tap_read.name
_schema.properties = cls._schema_virtual_network_tap_read.properties
_schema.tags = cls._schema_virtual_network_tap_read.tags
_schema.type = cls._schema_virtual_network_tap_read.type
_schema_web_application_firewall_policy_read = None
@classmethod
def _build_schema_web_application_firewall_policy_read(cls, _schema):
if cls._schema_web_application_firewall_policy_read is not None:
_schema.etag = cls._schema_web_application_firewall_policy_read.etag
_schema.id = cls._schema_web_application_firewall_policy_read.id
_schema.location = cls._schema_web_application_firewall_policy_read.location
_schema.name = cls._schema_web_application_firewall_policy_read.name
_schema.properties = cls._schema_web_application_firewall_policy_read.properties
_schema.tags = cls._schema_web_application_firewall_policy_read.tags
_schema.type = cls._schema_web_application_firewall_policy_read.type
return
cls._schema_web_application_firewall_policy_read = _schema_web_application_firewall_policy_read = AAZObjectType()
web_application_firewall_policy_read = _schema_web_application_firewall_policy_read
web_application_firewall_policy_read.etag = AAZStrType(
flags={"read_only": True},
)
web_application_firewall_policy_read.id = AAZStrType()
web_application_firewall_policy_read.location = AAZStrType()
web_application_firewall_policy_read.name = AAZStrType(
flags={"read_only": True},
)
web_application_firewall_policy_read.properties = AAZObjectType(
flags={"client_flatten": True},
)
web_application_firewall_policy_read.tags = AAZDictType()
web_application_firewall_policy_read.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties
properties.application_gateways = AAZListType(
serialized_name="applicationGateways",
flags={"read_only": True},
)
properties.custom_rules = AAZListType(
serialized_name="customRules",
)
properties.http_listeners = AAZListType(
serialized_name="httpListeners",
flags={"read_only": True},
)
properties.managed_rules = AAZObjectType(
serialized_name="managedRules",
flags={"required": True},
)
properties.path_based_rules = AAZListType(
serialized_name="pathBasedRules",
flags={"read_only": True},
)
properties.policy_settings = AAZObjectType(
serialized_name="policySettings",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.resource_state = AAZStrType(
serialized_name="resourceState",
flags={"read_only": True},
)
application_gateways = _schema_web_application_firewall_policy_read.properties.application_gateways
application_gateways.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.identity = AAZObjectType()
_element.location = AAZStrType()
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
_element.zones = AAZListType()
identity = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.identity
identity.principal_id = AAZStrType(
serialized_name="principalId",
flags={"read_only": True},
)
identity.tenant_id = AAZStrType(
serialized_name="tenantId",
flags={"read_only": True},
)
identity.type = AAZStrType()
identity.user_assigned_identities = AAZDictType(
serialized_name="userAssignedIdentities",
)
user_assigned_identities = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.identity.user_assigned_identities
user_assigned_identities.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.identity.user_assigned_identities.Element
_element.client_id = AAZStrType(
serialized_name="clientId",
flags={"read_only": True},
)
_element.principal_id = AAZStrType(
serialized_name="principalId",
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties
properties.authentication_certificates = AAZListType(
serialized_name="authenticationCertificates",
)
properties.autoscale_configuration = AAZObjectType(
serialized_name="autoscaleConfiguration",
)
properties.backend_address_pools = AAZListType(
serialized_name="backendAddressPools",
)
properties.backend_http_settings_collection = AAZListType(
serialized_name="backendHttpSettingsCollection",
)
properties.backend_settings_collection = AAZListType(
serialized_name="backendSettingsCollection",
)
properties.custom_error_configurations = AAZListType(
serialized_name="customErrorConfigurations",
)
properties.enable_fips = AAZBoolType(
serialized_name="enableFips",
)
properties.enable_http2 = AAZBoolType(
serialized_name="enableHttp2",
)
properties.firewall_policy = AAZObjectType(
serialized_name="firewallPolicy",
)
cls._build_schema_sub_resource_read(properties.firewall_policy)
properties.force_firewall_policy_association = AAZBoolType(
serialized_name="forceFirewallPolicyAssociation",
)
properties.frontend_ip_configurations = AAZListType(
serialized_name="frontendIPConfigurations",
)
properties.frontend_ports = AAZListType(
serialized_name="frontendPorts",
)
properties.gateway_ip_configurations = AAZListType(
serialized_name="gatewayIPConfigurations",
)
properties.global_configuration = AAZObjectType(
serialized_name="globalConfiguration",
)
properties.http_listeners = AAZListType(
serialized_name="httpListeners",
)
properties.listeners = AAZListType()
properties.load_distribution_policies = AAZListType(
serialized_name="loadDistributionPolicies",
)
properties.operational_state = AAZStrType(
serialized_name="operationalState",
flags={"read_only": True},
)
properties.private_endpoint_connections = AAZListType(
serialized_name="privateEndpointConnections",
flags={"read_only": True},
)
properties.private_link_configurations = AAZListType(
serialized_name="privateLinkConfigurations",
)
properties.probes = AAZListType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.redirect_configurations = AAZListType(
serialized_name="redirectConfigurations",
)
properties.request_routing_rules = AAZListType(
serialized_name="requestRoutingRules",
)
properties.resource_guid = AAZStrType(
serialized_name="resourceGuid",
flags={"read_only": True},
)
properties.rewrite_rule_sets = AAZListType(
serialized_name="rewriteRuleSets",
)
properties.routing_rules = AAZListType(
serialized_name="routingRules",
)
properties.sku = AAZObjectType()
properties.ssl_certificates = AAZListType(
serialized_name="sslCertificates",
)
properties.ssl_policy = AAZObjectType(
serialized_name="sslPolicy",
)
cls._build_schema_application_gateway_ssl_policy_read(properties.ssl_policy)
properties.ssl_profiles = AAZListType(
serialized_name="sslProfiles",
)
properties.trusted_client_certificates = AAZListType(
serialized_name="trustedClientCertificates",
)
properties.trusted_root_certificates = AAZListType(
serialized_name="trustedRootCertificates",
)
properties.url_path_maps = AAZListType(
serialized_name="urlPathMaps",
)
properties.web_application_firewall_configuration = AAZObjectType(
serialized_name="webApplicationFirewallConfiguration",
)
authentication_certificates = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.authentication_certificates
authentication_certificates.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.authentication_certificates.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.authentication_certificates.Element.properties
properties.data = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
autoscale_configuration = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.autoscale_configuration
autoscale_configuration.max_capacity = AAZIntType(
serialized_name="maxCapacity",
)
autoscale_configuration.min_capacity = AAZIntType(
serialized_name="minCapacity",
flags={"required": True},
)
backend_address_pools = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_address_pools
backend_address_pools.Element = AAZObjectType()
cls._build_schema_application_gateway_backend_address_pool_read(backend_address_pools.Element)
backend_http_settings_collection = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_http_settings_collection
backend_http_settings_collection.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_http_settings_collection.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_http_settings_collection.Element.properties
properties.affinity_cookie_name = AAZStrType(
serialized_name="affinityCookieName",
)
properties.authentication_certificates = AAZListType(
serialized_name="authenticationCertificates",
)
properties.connection_draining = AAZObjectType(
serialized_name="connectionDraining",
)
properties.cookie_based_affinity = AAZStrType(
serialized_name="cookieBasedAffinity",
)
properties.host_name = AAZStrType(
serialized_name="hostName",
)
properties.path = AAZStrType()
properties.pick_host_name_from_backend_address = AAZBoolType(
serialized_name="pickHostNameFromBackendAddress",
)
properties.port = AAZIntType()
properties.probe = AAZObjectType()
cls._build_schema_sub_resource_read(properties.probe)
properties.probe_enabled = AAZBoolType(
serialized_name="probeEnabled",
)
properties.protocol = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.request_timeout = AAZIntType(
serialized_name="requestTimeout",
)
properties.trusted_root_certificates = AAZListType(
serialized_name="trustedRootCertificates",
)
authentication_certificates = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_http_settings_collection.Element.properties.authentication_certificates
authentication_certificates.Element = AAZObjectType()
cls._build_schema_sub_resource_read(authentication_certificates.Element)
connection_draining = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_http_settings_collection.Element.properties.connection_draining
connection_draining.drain_timeout_in_sec = AAZIntType(
serialized_name="drainTimeoutInSec",
flags={"required": True},
)
connection_draining.enabled = AAZBoolType(
flags={"required": True},
)
trusted_root_certificates = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_http_settings_collection.Element.properties.trusted_root_certificates
trusted_root_certificates.Element = AAZObjectType()
cls._build_schema_sub_resource_read(trusted_root_certificates.Element)
backend_settings_collection = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_settings_collection
backend_settings_collection.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_settings_collection.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_settings_collection.Element.properties
properties.host_name = AAZStrType(
serialized_name="hostName",
)
properties.pick_host_name_from_backend_address = AAZBoolType(
serialized_name="pickHostNameFromBackendAddress",
)
properties.port = AAZIntType()
properties.probe = AAZObjectType()
cls._build_schema_sub_resource_read(properties.probe)
properties.protocol = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.timeout = AAZIntType()
properties.trusted_root_certificates = AAZListType(
serialized_name="trustedRootCertificates",
)
trusted_root_certificates = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.backend_settings_collection.Element.properties.trusted_root_certificates
trusted_root_certificates.Element = AAZObjectType()
cls._build_schema_sub_resource_read(trusted_root_certificates.Element)
custom_error_configurations = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.custom_error_configurations
custom_error_configurations.Element = AAZObjectType()
cls._build_schema_application_gateway_custom_error_read(custom_error_configurations.Element)
frontend_ip_configurations = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.frontend_ip_configurations
frontend_ip_configurations.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.frontend_ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.frontend_ip_configurations.Element.properties
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.private_link_configuration = AAZObjectType(
serialized_name="privateLinkConfiguration",
)
cls._build_schema_sub_resource_read(properties.private_link_configuration)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_ip_address = AAZObjectType(
serialized_name="publicIPAddress",
)
cls._build_schema_sub_resource_read(properties.public_ip_address)
properties.subnet = AAZObjectType()
cls._build_schema_sub_resource_read(properties.subnet)
frontend_ports = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.frontend_ports
frontend_ports.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.frontend_ports.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.frontend_ports.Element.properties
properties.port = AAZIntType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
gateway_ip_configurations = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.gateway_ip_configurations
gateway_ip_configurations.Element = AAZObjectType()
cls._build_schema_application_gateway_ip_configuration_read(gateway_ip_configurations.Element)
global_configuration = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.global_configuration
global_configuration.enable_request_buffering = AAZBoolType(
serialized_name="enableRequestBuffering",
)
global_configuration.enable_response_buffering = AAZBoolType(
serialized_name="enableResponseBuffering",
)
http_listeners = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.http_listeners
http_listeners.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.http_listeners.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.http_listeners.Element.properties
properties.custom_error_configurations = AAZListType(
serialized_name="customErrorConfigurations",
)
properties.firewall_policy = AAZObjectType(
serialized_name="firewallPolicy",
)
cls._build_schema_sub_resource_read(properties.firewall_policy)
properties.frontend_ip_configuration = AAZObjectType(
serialized_name="frontendIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.frontend_ip_configuration)
properties.frontend_port = AAZObjectType(
serialized_name="frontendPort",
)
cls._build_schema_sub_resource_read(properties.frontend_port)
properties.host_name = AAZStrType(
serialized_name="hostName",
)
properties.host_names = AAZListType(
serialized_name="hostNames",
)
properties.protocol = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.require_server_name_indication = AAZBoolType(
serialized_name="requireServerNameIndication",
)
properties.ssl_certificate = AAZObjectType(
serialized_name="sslCertificate",
)
cls._build_schema_sub_resource_read(properties.ssl_certificate)
properties.ssl_profile = AAZObjectType(
serialized_name="sslProfile",
)
cls._build_schema_sub_resource_read(properties.ssl_profile)
custom_error_configurations = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.http_listeners.Element.properties.custom_error_configurations
custom_error_configurations.Element = AAZObjectType()
cls._build_schema_application_gateway_custom_error_read(custom_error_configurations.Element)
host_names = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.http_listeners.Element.properties.host_names
host_names.Element = AAZStrType()
listeners = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.listeners
listeners.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.listeners.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.listeners.Element.properties
properties.frontend_ip_configuration = AAZObjectType(
serialized_name="frontendIPConfiguration",
)
cls._build_schema_sub_resource_read(properties.frontend_ip_configuration)
properties.frontend_port = AAZObjectType(
serialized_name="frontendPort",
)
cls._build_schema_sub_resource_read(properties.frontend_port)
properties.protocol = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.ssl_certificate = AAZObjectType(
serialized_name="sslCertificate",
)
cls._build_schema_sub_resource_read(properties.ssl_certificate)
properties.ssl_profile = AAZObjectType(
serialized_name="sslProfile",
)
cls._build_schema_sub_resource_read(properties.ssl_profile)
load_distribution_policies = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.load_distribution_policies
load_distribution_policies.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.load_distribution_policies.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.load_distribution_policies.Element.properties
properties.load_distribution_algorithm = AAZStrType(
serialized_name="loadDistributionAlgorithm",
)
properties.load_distribution_targets = AAZListType(
serialized_name="loadDistributionTargets",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
load_distribution_targets = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.load_distribution_policies.Element.properties.load_distribution_targets
load_distribution_targets.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.load_distribution_policies.Element.properties.load_distribution_targets.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.load_distribution_policies.Element.properties.load_distribution_targets.Element.properties
properties.backend_address_pool = AAZObjectType(
serialized_name="backendAddressPool",
)
cls._build_schema_sub_resource_read(properties.backend_address_pool)
properties.weight_per_server = AAZIntType(
serialized_name="weightPerServer",
)
private_endpoint_connections = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.private_endpoint_connections
private_endpoint_connections.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.private_endpoint_connections.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.private_endpoint_connections.Element.properties
properties.link_identifier = AAZStrType(
serialized_name="linkIdentifier",
flags={"read_only": True},
)
properties.private_endpoint = AAZObjectType(
serialized_name="privateEndpoint",
)
cls._build_schema_private_endpoint_read(properties.private_endpoint)
properties.private_link_service_connection_state = AAZObjectType(
serialized_name="privateLinkServiceConnectionState",
)
cls._build_schema_private_link_service_connection_state_read(properties.private_link_service_connection_state)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
private_link_configurations = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.private_link_configurations
private_link_configurations.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.private_link_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.private_link_configurations.Element.properties
properties.ip_configurations = AAZListType(
serialized_name="ipConfigurations",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
ip_configurations = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.private_link_configurations.Element.properties.ip_configurations
ip_configurations.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.private_link_configurations.Element.properties.ip_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.private_link_configurations.Element.properties.ip_configurations.Element.properties
properties.primary = AAZBoolType()
properties.private_ip_address = AAZStrType(
serialized_name="privateIPAddress",
)
properties.private_ip_allocation_method = AAZStrType(
serialized_name="privateIPAllocationMethod",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.subnet = AAZObjectType()
cls._build_schema_sub_resource_read(properties.subnet)
probes = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.probes
probes.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.probes.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.probes.Element.properties
properties.host = AAZStrType()
properties.interval = AAZIntType()
properties.match = AAZObjectType()
properties.min_servers = AAZIntType(
serialized_name="minServers",
)
properties.path = AAZStrType()
properties.pick_host_name_from_backend_http_settings = AAZBoolType(
serialized_name="pickHostNameFromBackendHttpSettings",
)
properties.pick_host_name_from_backend_settings = AAZBoolType(
serialized_name="pickHostNameFromBackendSettings",
)
properties.port = AAZIntType()
properties.protocol = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.timeout = AAZIntType()
properties.unhealthy_threshold = AAZIntType(
serialized_name="unhealthyThreshold",
)
match = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.probes.Element.properties.match
match.body = AAZStrType()
match.status_codes = AAZListType(
serialized_name="statusCodes",
)
status_codes = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.probes.Element.properties.match.status_codes
status_codes.Element = AAZStrType()
redirect_configurations = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.redirect_configurations
redirect_configurations.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.redirect_configurations.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.redirect_configurations.Element.properties
properties.include_path = AAZBoolType(
serialized_name="includePath",
)
properties.include_query_string = AAZBoolType(
serialized_name="includeQueryString",
)
properties.path_rules = AAZListType(
serialized_name="pathRules",
)
properties.redirect_type = AAZStrType(
serialized_name="redirectType",
)
properties.request_routing_rules = AAZListType(
serialized_name="requestRoutingRules",
)
properties.target_listener = AAZObjectType(
serialized_name="targetListener",
)
cls._build_schema_sub_resource_read(properties.target_listener)
properties.target_url = AAZStrType(
serialized_name="targetUrl",
)
properties.url_path_maps = AAZListType(
serialized_name="urlPathMaps",
)
path_rules = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.redirect_configurations.Element.properties.path_rules
path_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(path_rules.Element)
request_routing_rules = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.redirect_configurations.Element.properties.request_routing_rules
request_routing_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(request_routing_rules.Element)
url_path_maps = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.redirect_configurations.Element.properties.url_path_maps
url_path_maps.Element = AAZObjectType()
cls._build_schema_sub_resource_read(url_path_maps.Element)
request_routing_rules = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.request_routing_rules
request_routing_rules.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.request_routing_rules.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.request_routing_rules.Element.properties
properties.backend_address_pool = AAZObjectType(
serialized_name="backendAddressPool",
)
cls._build_schema_sub_resource_read(properties.backend_address_pool)
properties.backend_http_settings = AAZObjectType(
serialized_name="backendHttpSettings",
)
cls._build_schema_sub_resource_read(properties.backend_http_settings)
properties.http_listener = AAZObjectType(
serialized_name="httpListener",
)
cls._build_schema_sub_resource_read(properties.http_listener)
properties.load_distribution_policy = AAZObjectType(
serialized_name="loadDistributionPolicy",
)
cls._build_schema_sub_resource_read(properties.load_distribution_policy)
properties.priority = AAZIntType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.redirect_configuration = AAZObjectType(
serialized_name="redirectConfiguration",
)
cls._build_schema_sub_resource_read(properties.redirect_configuration)
properties.rewrite_rule_set = AAZObjectType(
serialized_name="rewriteRuleSet",
)
cls._build_schema_sub_resource_read(properties.rewrite_rule_set)
properties.rule_type = AAZStrType(
serialized_name="ruleType",
)
properties.url_path_map = AAZObjectType(
serialized_name="urlPathMap",
)
cls._build_schema_sub_resource_read(properties.url_path_map)
rewrite_rule_sets = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets
rewrite_rule_sets.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.rewrite_rules = AAZListType(
serialized_name="rewriteRules",
)
rewrite_rules = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element.properties.rewrite_rules
rewrite_rules.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element.properties.rewrite_rules.Element
_element.action_set = AAZObjectType(
serialized_name="actionSet",
)
_element.conditions = AAZListType()
_element.name = AAZStrType()
_element.rule_sequence = AAZIntType(
serialized_name="ruleSequence",
)
action_set = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element.properties.rewrite_rules.Element.action_set
action_set.request_header_configurations = AAZListType(
serialized_name="requestHeaderConfigurations",
)
action_set.response_header_configurations = AAZListType(
serialized_name="responseHeaderConfigurations",
)
action_set.url_configuration = AAZObjectType(
serialized_name="urlConfiguration",
)
request_header_configurations = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element.properties.rewrite_rules.Element.action_set.request_header_configurations
request_header_configurations.Element = AAZObjectType()
cls._build_schema_application_gateway_header_configuration_read(request_header_configurations.Element)
response_header_configurations = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element.properties.rewrite_rules.Element.action_set.response_header_configurations
response_header_configurations.Element = AAZObjectType()
cls._build_schema_application_gateway_header_configuration_read(response_header_configurations.Element)
url_configuration = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element.properties.rewrite_rules.Element.action_set.url_configuration
url_configuration.modified_path = AAZStrType(
serialized_name="modifiedPath",
)
url_configuration.modified_query_string = AAZStrType(
serialized_name="modifiedQueryString",
)
url_configuration.reroute = AAZBoolType()
conditions = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element.properties.rewrite_rules.Element.conditions
conditions.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.rewrite_rule_sets.Element.properties.rewrite_rules.Element.conditions.Element
_element.ignore_case = AAZBoolType(
serialized_name="ignoreCase",
)
_element.negate = AAZBoolType()
_element.pattern = AAZStrType()
_element.variable = AAZStrType()
routing_rules = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.routing_rules
routing_rules.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.routing_rules.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.routing_rules.Element.properties
properties.backend_address_pool = AAZObjectType(
serialized_name="backendAddressPool",
)
cls._build_schema_sub_resource_read(properties.backend_address_pool)
properties.backend_settings = AAZObjectType(
serialized_name="backendSettings",
)
cls._build_schema_sub_resource_read(properties.backend_settings)
properties.listener = AAZObjectType()
cls._build_schema_sub_resource_read(properties.listener)
properties.priority = AAZIntType(
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.rule_type = AAZStrType(
serialized_name="ruleType",
)
sku = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.sku
sku.capacity = AAZIntType()
sku.name = AAZStrType()
sku.tier = AAZStrType()
ssl_certificates = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.ssl_certificates
ssl_certificates.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.ssl_certificates.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.ssl_certificates.Element.properties
properties.data = AAZStrType()
properties.key_vault_secret_id = AAZStrType(
serialized_name="keyVaultSecretId",
)
properties.password = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.public_cert_data = AAZStrType(
serialized_name="publicCertData",
flags={"read_only": True},
)
ssl_profiles = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.ssl_profiles
ssl_profiles.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.ssl_profiles.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.ssl_profiles.Element.properties
properties.client_auth_configuration = AAZObjectType(
serialized_name="clientAuthConfiguration",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.ssl_policy = AAZObjectType(
serialized_name="sslPolicy",
)
cls._build_schema_application_gateway_ssl_policy_read(properties.ssl_policy)
properties.trusted_client_certificates = AAZListType(
serialized_name="trustedClientCertificates",
)
client_auth_configuration = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.ssl_profiles.Element.properties.client_auth_configuration
client_auth_configuration.verify_client_cert_issuer_dn = AAZBoolType(
serialized_name="verifyClientCertIssuerDN",
)
client_auth_configuration.verify_client_revocation = AAZStrType(
serialized_name="verifyClientRevocation",
)
trusted_client_certificates = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.ssl_profiles.Element.properties.trusted_client_certificates
trusted_client_certificates.Element = AAZObjectType()
cls._build_schema_sub_resource_read(trusted_client_certificates.Element)
trusted_client_certificates = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.trusted_client_certificates
trusted_client_certificates.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.trusted_client_certificates.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.trusted_client_certificates.Element.properties
properties.client_cert_issuer_dn = AAZStrType(
serialized_name="clientCertIssuerDN",
flags={"read_only": True},
)
properties.data = AAZStrType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.validated_cert_data = AAZStrType(
serialized_name="validatedCertData",
flags={"read_only": True},
)
trusted_root_certificates = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.trusted_root_certificates
trusted_root_certificates.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.trusted_root_certificates.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.trusted_root_certificates.Element.properties
properties.data = AAZStrType()
properties.key_vault_secret_id = AAZStrType(
serialized_name="keyVaultSecretId",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
url_path_maps = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.url_path_maps
url_path_maps.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.url_path_maps.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.url_path_maps.Element.properties
properties.default_backend_address_pool = AAZObjectType(
serialized_name="defaultBackendAddressPool",
)
cls._build_schema_sub_resource_read(properties.default_backend_address_pool)
properties.default_backend_http_settings = AAZObjectType(
serialized_name="defaultBackendHttpSettings",
)
cls._build_schema_sub_resource_read(properties.default_backend_http_settings)
properties.default_load_distribution_policy = AAZObjectType(
serialized_name="defaultLoadDistributionPolicy",
)
cls._build_schema_sub_resource_read(properties.default_load_distribution_policy)
properties.default_redirect_configuration = AAZObjectType(
serialized_name="defaultRedirectConfiguration",
)
cls._build_schema_sub_resource_read(properties.default_redirect_configuration)
properties.default_rewrite_rule_set = AAZObjectType(
serialized_name="defaultRewriteRuleSet",
)
cls._build_schema_sub_resource_read(properties.default_rewrite_rule_set)
properties.path_rules = AAZListType(
serialized_name="pathRules",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
path_rules = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.url_path_maps.Element.properties.path_rules
path_rules.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.url_path_maps.Element.properties.path_rules.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.url_path_maps.Element.properties.path_rules.Element.properties
properties.backend_address_pool = AAZObjectType(
serialized_name="backendAddressPool",
)
cls._build_schema_sub_resource_read(properties.backend_address_pool)
properties.backend_http_settings = AAZObjectType(
serialized_name="backendHttpSettings",
)
cls._build_schema_sub_resource_read(properties.backend_http_settings)
properties.firewall_policy = AAZObjectType(
serialized_name="firewallPolicy",
)
cls._build_schema_sub_resource_read(properties.firewall_policy)
properties.load_distribution_policy = AAZObjectType(
serialized_name="loadDistributionPolicy",
)
cls._build_schema_sub_resource_read(properties.load_distribution_policy)
properties.paths = AAZListType()
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.redirect_configuration = AAZObjectType(
serialized_name="redirectConfiguration",
)
cls._build_schema_sub_resource_read(properties.redirect_configuration)
properties.rewrite_rule_set = AAZObjectType(
serialized_name="rewriteRuleSet",
)
cls._build_schema_sub_resource_read(properties.rewrite_rule_set)
paths = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.url_path_maps.Element.properties.path_rules.Element.properties.paths
paths.Element = AAZStrType()
web_application_firewall_configuration = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.web_application_firewall_configuration
web_application_firewall_configuration.disabled_rule_groups = AAZListType(
serialized_name="disabledRuleGroups",
)
web_application_firewall_configuration.enabled = AAZBoolType(
flags={"required": True},
)
web_application_firewall_configuration.exclusions = AAZListType()
web_application_firewall_configuration.file_upload_limit_in_mb = AAZIntType(
serialized_name="fileUploadLimitInMb",
)
web_application_firewall_configuration.firewall_mode = AAZStrType(
serialized_name="firewallMode",
flags={"required": True},
)
web_application_firewall_configuration.max_request_body_size = AAZIntType(
serialized_name="maxRequestBodySize",
)
web_application_firewall_configuration.max_request_body_size_in_kb = AAZIntType(
serialized_name="maxRequestBodySizeInKb",
)
web_application_firewall_configuration.request_body_check = AAZBoolType(
serialized_name="requestBodyCheck",
)
web_application_firewall_configuration.rule_set_type = AAZStrType(
serialized_name="ruleSetType",
flags={"required": True},
)
web_application_firewall_configuration.rule_set_version = AAZStrType(
serialized_name="ruleSetVersion",
flags={"required": True},
)
disabled_rule_groups = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.web_application_firewall_configuration.disabled_rule_groups
disabled_rule_groups.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.web_application_firewall_configuration.disabled_rule_groups.Element
_element.rule_group_name = AAZStrType(
serialized_name="ruleGroupName",
flags={"required": True},
)
_element.rules = AAZListType()
rules = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.web_application_firewall_configuration.disabled_rule_groups.Element.rules
rules.Element = AAZIntType()
exclusions = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.web_application_firewall_configuration.exclusions
exclusions.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.properties.web_application_firewall_configuration.exclusions.Element
_element.match_variable = AAZStrType(
serialized_name="matchVariable",
flags={"required": True},
)
_element.selector = AAZStrType(
flags={"required": True},
)
_element.selector_match_operator = AAZStrType(
serialized_name="selectorMatchOperator",
flags={"required": True},
)
tags = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.tags
tags.Element = AAZStrType()
zones = _schema_web_application_firewall_policy_read.properties.application_gateways.Element.zones
zones.Element = AAZStrType()
custom_rules = _schema_web_application_firewall_policy_read.properties.custom_rules
custom_rules.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.custom_rules.Element
_element.action = AAZStrType(
flags={"required": True},
)
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.match_conditions = AAZListType(
serialized_name="matchConditions",
flags={"required": True},
)
_element.name = AAZStrType()
_element.priority = AAZIntType(
flags={"required": True},
)
_element.rule_type = AAZStrType(
serialized_name="ruleType",
flags={"required": True},
)
_element.state = AAZStrType()
match_conditions = _schema_web_application_firewall_policy_read.properties.custom_rules.Element.match_conditions
match_conditions.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.custom_rules.Element.match_conditions.Element
_element.match_values = AAZListType(
serialized_name="matchValues",
flags={"required": True},
)
_element.match_variables = AAZListType(
serialized_name="matchVariables",
flags={"required": True},
)
_element.negation_conditon = AAZBoolType(
serialized_name="negationConditon",
)
_element.operator = AAZStrType(
flags={"required": True},
)
_element.transforms = AAZListType()
match_values = _schema_web_application_firewall_policy_read.properties.custom_rules.Element.match_conditions.Element.match_values
match_values.Element = AAZStrType()
match_variables = _schema_web_application_firewall_policy_read.properties.custom_rules.Element.match_conditions.Element.match_variables
match_variables.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.custom_rules.Element.match_conditions.Element.match_variables.Element
_element.selector = AAZStrType()
_element.variable_name = AAZStrType(
serialized_name="variableName",
flags={"required": True},
)
transforms = _schema_web_application_firewall_policy_read.properties.custom_rules.Element.match_conditions.Element.transforms
transforms.Element = AAZStrType()
http_listeners = _schema_web_application_firewall_policy_read.properties.http_listeners
http_listeners.Element = AAZObjectType()
cls._build_schema_sub_resource_read(http_listeners.Element)
managed_rules = _schema_web_application_firewall_policy_read.properties.managed_rules
managed_rules.exclusions = AAZListType()
managed_rules.managed_rule_sets = AAZListType(
serialized_name="managedRuleSets",
flags={"required": True},
)
exclusions = _schema_web_application_firewall_policy_read.properties.managed_rules.exclusions
exclusions.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.managed_rules.exclusions.Element
_element.exclusion_managed_rule_sets = AAZListType(
serialized_name="exclusionManagedRuleSets",
)
_element.match_variable = AAZStrType(
serialized_name="matchVariable",
flags={"required": True},
)
_element.selector = AAZStrType(
flags={"required": True},
)
_element.selector_match_operator = AAZStrType(
serialized_name="selectorMatchOperator",
flags={"required": True},
)
exclusion_managed_rule_sets = _schema_web_application_firewall_policy_read.properties.managed_rules.exclusions.Element.exclusion_managed_rule_sets
exclusion_managed_rule_sets.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.managed_rules.exclusions.Element.exclusion_managed_rule_sets.Element
_element.rule_groups = AAZListType(
serialized_name="ruleGroups",
)
_element.rule_set_type = AAZStrType(
serialized_name="ruleSetType",
flags={"required": True},
)
_element.rule_set_version = AAZStrType(
serialized_name="ruleSetVersion",
flags={"required": True},
)
rule_groups = _schema_web_application_firewall_policy_read.properties.managed_rules.exclusions.Element.exclusion_managed_rule_sets.Element.rule_groups
rule_groups.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.managed_rules.exclusions.Element.exclusion_managed_rule_sets.Element.rule_groups.Element
_element.rule_group_name = AAZStrType(
serialized_name="ruleGroupName",
flags={"required": True},
)
_element.rules = AAZListType()
rules = _schema_web_application_firewall_policy_read.properties.managed_rules.exclusions.Element.exclusion_managed_rule_sets.Element.rule_groups.Element.rules
rules.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.managed_rules.exclusions.Element.exclusion_managed_rule_sets.Element.rule_groups.Element.rules.Element
_element.rule_id = AAZStrType(
serialized_name="ruleId",
flags={"required": True},
)
managed_rule_sets = _schema_web_application_firewall_policy_read.properties.managed_rules.managed_rule_sets
managed_rule_sets.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.managed_rules.managed_rule_sets.Element
_element.rule_group_overrides = AAZListType(
serialized_name="ruleGroupOverrides",
)
_element.rule_set_type = AAZStrType(
serialized_name="ruleSetType",
flags={"required": True},
)
_element.rule_set_version = AAZStrType(
serialized_name="ruleSetVersion",
flags={"required": True},
)
rule_group_overrides = _schema_web_application_firewall_policy_read.properties.managed_rules.managed_rule_sets.Element.rule_group_overrides
rule_group_overrides.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.managed_rules.managed_rule_sets.Element.rule_group_overrides.Element
_element.rule_group_name = AAZStrType(
serialized_name="ruleGroupName",
flags={"required": True},
)
_element.rules = AAZListType()
rules = _schema_web_application_firewall_policy_read.properties.managed_rules.managed_rule_sets.Element.rule_group_overrides.Element.rules
rules.Element = AAZObjectType()
_element = _schema_web_application_firewall_policy_read.properties.managed_rules.managed_rule_sets.Element.rule_group_overrides.Element.rules.Element
_element.action = AAZStrType()
_element.rule_id = AAZStrType(
serialized_name="ruleId",
flags={"required": True},
)
_element.state = AAZStrType()
path_based_rules = _schema_web_application_firewall_policy_read.properties.path_based_rules
path_based_rules.Element = AAZObjectType()
cls._build_schema_sub_resource_read(path_based_rules.Element)
policy_settings = _schema_web_application_firewall_policy_read.properties.policy_settings
policy_settings.custom_block_response_body = AAZStrType(
serialized_name="customBlockResponseBody",
)
policy_settings.custom_block_response_status_code = AAZIntType(
serialized_name="customBlockResponseStatusCode",
)
policy_settings.file_upload_limit_in_mb = AAZIntType(
serialized_name="fileUploadLimitInMb",
)
policy_settings.max_request_body_size_in_kb = AAZIntType(
serialized_name="maxRequestBodySizeInKb",
)
policy_settings.mode = AAZStrType()
policy_settings.request_body_check = AAZBoolType(
serialized_name="requestBodyCheck",
)
policy_settings.state = AAZStrType()
tags = _schema_web_application_firewall_policy_read.tags
tags.Element = AAZStrType()
_schema.etag = cls._schema_web_application_firewall_policy_read.etag
_schema.id = cls._schema_web_application_firewall_policy_read.id
_schema.location = cls._schema_web_application_firewall_policy_read.location
_schema.name = cls._schema_web_application_firewall_policy_read.name
_schema.properties = cls._schema_web_application_firewall_policy_read.properties
_schema.tags = cls._schema_web_application_firewall_policy_read.tags
_schema.type = cls._schema_web_application_firewall_policy_read.type
__all__ = ["List"]
|
[
"noreply@github.com"
] |
jiasli.noreply@github.com
|
c99cf3261ef3264d9556a7b23a4752ba3d1719ea
|
95c9cfb57346a4ff45b05847c2fd740cdd60fb79
|
/examples/2-hydrotrend/run_hydrotrend.py
|
fcff63ddf946fbbaae6da021a5b914505fc530ec
|
[
"MIT"
] |
permissive
|
mdpiper/dakota-tutorial
|
1234812eaf00e97999abcdccc0a3027ed2bb1d92
|
de5177bc741a0475266011de8363ff1ad4ce5ff0
|
refs/heads/master
| 2021-01-17T22:07:44.576114 | 2015-05-25T18:54:24 | 2015-05-25T18:54:24 | 35,640,799 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,479 |
py
|
#! /usr/bin/env python
# Brokers communication between HydroTrend and Dakota through files.
# Mark Piper (mark.piper@colorado.edu)
import sys
import os
import re
import shutil
from subprocess import call
import numpy as np
def read(output_file):
"""Reads a column of text containing HydroTrend output."""
return np.loadtxt(output_file, skiprows=2)
def write(results_file, array, labels):
"""Writes a Dakota results file from an input array."""
with open(results_file, 'w') as fp:
for i in range(len(array)):
fp.write(str(array[i]) + '\t' + labels[i] + '\n')
def get_labels(params_file):
"""Extracts labels from a Dakota parameters file."""
labels = []
with open(params_file, 'r') as fp:
for line in fp:
if re.search('ASV_', line):
labels.append(''.join(re.findall(':(\S+)', line)))
return labels
def main():
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_dir = os.path.join(start_dir, 'HYDRO_IN')
if os.path.exists(input_dir) is False:
os.mkdir(input_dir)
output_dir = os.path.join(start_dir, 'HYDRO_OUTPUT')
if os.path.exists(output_dir) is False:
os.mkdir(output_dir)
input_template = 'HYDRO.IN.template'
input_file = 'HYDRO.IN'
hypsometry_file = 'HYDRO0.HYPS'
output_file = 'HYDROASCII.QS'
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# incorporate the parameters from Dakota into the HydroTrend input
# template, creating a new HydroTrend input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
shutil.copy(input_file, input_dir)
shutil.copy(os.path.join(start_dir, hypsometry_file), input_dir)
# Call HydroTrend, using the updated input file.
call(['hydrotrend',
'--in-dir', os.path.relpath(input_dir),
'--out-dir', os.path.relpath(output_dir)])
# Calculate mean and standard deviation of a HydroTrend output time
# series for the simulation. Write the output to a Dakota results file.
shutil.copy(os.path.join(output_dir, output_file), os.curdir)
labels = get_labels(sys.argv[1])
series = read(output_file)
if series is not None:
m_series = [np.mean(series), np.std(series)]
else:
m_series = [0, 0]
write(sys.argv[2], m_series, labels)
if __name__ == '__main__':
main()
|
[
"mark.piper@colorado.edu"
] |
mark.piper@colorado.edu
|
79d7f65e8bca3455499b1630da53457cc99be1fd
|
4f213937edc0682ae56fbca353e1aab2a994f7ef
|
/python_built_in_functions/list_data.py
|
1add2cfee223b4fc43b0c0e4dd39340680635cec
|
[] |
no_license
|
adoniramj/general_training_python
|
45779550bbb66af80ce2ce23b4581945afcdad7a
|
36aa5237bb7d060d5cee186a10e02227a5013c88
|
refs/heads/master
| 2020-12-31T22:14:49.359839 | 2020-02-18T02:07:53 | 2020-02-18T02:07:53 | 239,049,715 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 63 |
py
|
cities = ['Miami', 'Orlando', 'Atlanta', 'New York', 'Seattle']
|
[
"adoniramjvargas@gmail.com"
] |
adoniramjvargas@gmail.com
|
190c5d6a103dad55c0f5c9786005190abd77b2d5
|
4c0bdc8e94bddf8406e4bfbf9620d87f72347976
|
/lunar_lander/A2C/train.py
|
4227a629bb67abc251dcbee776b3771d43b147ec
|
[] |
no_license
|
joaobose/rl-sandbox
|
ec6d2101089326e67b504d95b3d2c82192eb6d82
|
b6092e1bb8242461af16bc7f0ce78ba03487b8ea
|
refs/heads/master
| 2023-07-19T11:13:18.898407 | 2023-07-18T04:45:28 | 2023-07-18T04:45:28 | 209,424,599 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,646 |
py
|
import gymnasium as gym
import torch
from argparse import ArgumentParser
import os
import numpy as np
from parameters import *
from agent import Agent
from neural_network import Actor, Critic
resume_episode = 0
savedir = ''
resumedir = ''
def initialize(args):
global savedir, resumedir, resume_episode
# directory management
savedir = './instances'
if not os.path.exists(savedir):
os.makedirs(savedir)
savedir = './instances/{}'.format(args.save_instance)
if not os.path.exists(savedir):
os.makedirs(savedir)
os.makedirs(savedir + '/agent_model')
if args.resume is not None:
resumedir = './instances/{}'.format(args.resume)
assert os.path.exists(resumedir), 'resume directory not found'
# Define PyTorch device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Variable env contains the environment class
env = gym.envs.make(
env_name, render_mode='human' if render_environment else None)
# Define policy network
actor = Actor(actor_learning_rate, actor_lr_decay, 8, 4).to(device).float()
critic = Critic(critic_learning_rate, critic_lr_decay,
8, 1).to(device).float()
agent = Agent(actor, critic, gamma, device)
# Resume
if args.resume is not None:
checkpoint_file = os.path.join(resumedir, 'agent_model')
checkpoint_file = os.path.join(checkpoint_file, 'checkpoint.pth.tar')
assert os.path.exists(
checkpoint_file), 'Error: resume option was used but checkpoint was not found in the folder'
checkpoint = torch.load(checkpoint_file)
agent.resume(checkpoint, resumedir != savedir)
resume_episode = checkpoint['episode']
return env, agent
def train(args):
env, agent = initialize(args)
starting_episode = 1
if resume_episode != 0:
starting_episode = resume_episode + 1
for episode in range(starting_episode, num_episodes):
state, _ = env.reset()
done = False
if render_environment:
env.render()
steps = 0
rewards = []
to_avg_rewards = []
values = []
log_probs = []
episode_score = 0
while (not done):
steps += 1
if render_environment:
env.render()
action, log_prob = agent.select_action(state, env)
next_state, reward, terminated, truncated, _ = env.step(action)
done = terminated or truncated
episode_score += reward
value = agent.estimate_value(state)
print('\nValue: {}'.format(value.item()))
next_value = agent.estimate_value(next_state)
target = (reward + agent.gamma * next_value)
advantage = target - value
print('Eps: {}'.format(episode))
print('Step: {}'.format(steps))
print('In-step Adv: {}\n'.format(advantage.item()))
rewards.append(reward)
log_probs.append(log_prob)
values.append(value)
to_avg_rewards.append(reward)
# monte carlo like optimization
if steps % episode_steps == 0 or done:
agent.optimize(rewards, values, log_probs, next_value)
rewards = []
values = []
log_probs = []
state = next_state
if actor_lr_decay_active:
agent.actor.learning_rate_decay(episode)
if critic_lr_decay_active:
agent.critic.learning_rate_decay(episode)
avg_reward = np.mean(to_avg_rewards)
agent.rewards_history.append(avg_reward)
agent.episode_score_history.append(episode_score)
if save_model and episode % save_model_freq == 0:
agent.save_data(episode, savedir)
print('\nSteps: {}'.format(steps))
print('Episode: {}'.format(episode))
print('Episode lenght: {}'.format(steps))
print('average reward: {}'.format(avg_reward))
print('episode score:: {}'.format(episode_score))
print('Actor learning rate: {}'.format(
agent.actor.optimizer.param_groups[0]['lr']))
print('Critic learning rate: {}\n'.format(
agent.critic.optimizer.param_groups[0]['lr']))
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--resume')
parser.add_argument('--save_instance', required=True)
train(parser.parse_args())
|
[
"joaobose@gmail.com"
] |
joaobose@gmail.com
|
77285dffac7681028249c88d391e260358668299
|
39415394532266a48473a4e716bffcdaea5b3335
|
/graphs/rtgraph.py
|
efc79e34752e7d22f0bf6925672338283a9a832b
|
[] |
no_license
|
gcdeshpande/Modeling-and-Mitigation-of-XPath-Injection-Attacks-using-Neural-Networks
|
9fe2209cf10b7b8ad0233de207ad448af636e286
|
81b0517c97ade44cf9d18cb5c64cb9bfa3571b1a
|
refs/heads/master
| 2021-01-05T13:33:50.794401 | 2020-02-17T08:43:19 | 2020-02-17T08:43:19 | 241,036,602 | 1 | 0 | null | 2020-02-17T06:47:50 | 2020-02-17T06:36:03 | null |
UTF-8
|
Python
| false | false | 1,198 |
py
|
#!/usr/bin/env python
# a bar plot with errorbars
import numpy as np
import matplotlib.pyplot as plt
N = 10
modData = (10.23,20.27,30.98,40.74,51.31,62.05,70.54,81.47,92.27,101.75)
#menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig = plt.figure()
ax = fig.add_subplot(111)
rects1 = ax.bar(ind, modData, width, color='#6495ed')
nmodData = (15.31,30.20,45.74,61.32,75.61,90.78,106.34,120.45,136.17,150.87)
#womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, nmodData, width, color='#d3d3d3')
# add some
ax.set_ylabel('Response Time')
ax.set_xlabel('Number of Samples')
ax.set_title('Comparison of Response time')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('10', '20', '30', '40', '50','60', '70', '80', '90', '100') )
ax.set_ylim(0,190)
ax.legend( (rects1[0], rects2[0]), ('Modular Neural Networks', 'Single Neural Network') )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.02*height, '%d'%int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.show()
|
[
"noreply@github.com"
] |
gcdeshpande.noreply@github.com
|
2afb87b876777eba3345babac92efef1ee1fa736
|
0d0afd1dce972b4748ce8faccd992c019794ad9e
|
/integra/exata_personalizacao/wizard/projeto_relatorio.py
|
4618ff3fdae885ab494f0eb9f31e24029bb9adc2
|
[] |
no_license
|
danimaribeiro/odoo-erp
|
e2ca2cfe3629fbedf413e85f7c3c0453fd16941e
|
d12577bf7f5266b571cbedeb930720d653320e96
|
refs/heads/master
| 2020-01-23T21:32:16.149716 | 2016-11-05T15:35:40 | 2016-11-05T15:35:40 | 67,892,809 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,555 |
py
|
# -*- encoding: utf-8 -*-
import os
from osv import orm, fields, osv
import base64
from finan.wizard.finan_relatorio import Report
from pybrasil.data import parse_datetime, mes_passado, primeiro_dia_mes, ultimo_dia_mes, hoje, agora, formata_data
from finan.wizard.relatorio import *
from datetime import date
import csv
from pybrasil.base import DicionarioBrasil
from pybrasil.valor.decimal import Decimal as D
from pybrasil.valor import formata_valor
from pybrasil.data.grafico_gantt import tempo_tarefa
from dateutil.relativedelta import relativedelta
DIR_ATUAL = os.path.abspath(os.path.dirname(__file__))
JASPER_BASE_DIR = os.path.join(DIR_ATUAL, '../../reports/base/')
class projeto_relatorio(osv.osv_memory):
_name = 'projeto.relatorio'
_inherit = 'projeto.relatorio'
def gera_relatorio_imovel_projeto(self, cr, uid, ids, context={}):
if not ids:
return False
id = ids[0]
rel_obj = self.browse(cr, uid, id, context=context)
rel = Report('Imoveis por Projeto', cr, uid)
rel.caminho_arquivo_jasper = os.path.join(JASPER_BASE_DIR, 'exata_relatorio_venda_projeto.jrxml')
rel.outputFormat = rel_obj.formato
rel.parametros['PROJETO_ID'] = rel_obj.project_id.id
pdf, formato = rel.execute()
dados = {
'nome': u'Imoveis_' + rel_obj.project_id.name + '.' + rel_obj.formato,
'arquivo': base64.encodestring(pdf)
}
rel_obj.write(dados)
return True
projeto_relatorio()
|
[
"danimaribeiro@gmail.com"
] |
danimaribeiro@gmail.com
|
dc0a52f88186ea5bcc3affdb647775145018ea75
|
bceb104758d1ab3054ee2a21139cf9746a089622
|
/predict.py
|
010eb1543fcf2b7c129c0ce21331232dd950f85c
|
[] |
no_license
|
fuzzball5000/bloop
|
73dfecca18fbe62f92cf5efb7828a0dfc0e74ac7
|
5abb3cd01953c755c0ecef4988b579a4e70adb25
|
refs/heads/master
| 2021-01-20T18:01:15.280926 | 2016-11-28T09:27:23 | 2016-11-28T09:27:23 | 60,776,968 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 732 |
py
|
import os
import MySQLdb
import time
import sys
from sklearn import linear_model
import csv
db_pass = os.environ['sql_pass']
epoch = int(time.time())
oneDay = epoch - 86400
try:
db = MySQLdb.connect('localhost','bloop_write',db_pass,'bloop' )
cursor = db.cursor()
except MySQLdb.Error as e:
print("DB connect error: {}".format(e))
sys.exit(1)
cursor.execute ("select MIN(datetime),e_temp,e_hydro,m_temp,m_hydro,m_code from edwin GROUP BY DATE(datetime),HOUR(datetime) order by epoc DESC LIMIT 100")
data = cursor.fetchall ()
c = csv.writer(open("/home/centos/bloop/temp.csv","wb"))
c.writerow(['date','e_temp','e_hydro','m_temp','m_hydro','m_code'])
for i in data:
c.writerow(i)
cursor.close ()
db.close()
|
[
"gazbo83@gmail.com"
] |
gazbo83@gmail.com
|
8adbcc1ad500e6bf245502b352e00b7817bc4858
|
0130c1bcb43f9218a10ce952b431a4fe817de4a4
|
/HW3/code/mle_sim.py
|
c192ff220c4773174900b14053b7d6656c335268
|
[
"MIT"
] |
permissive
|
okuchap/SML
|
8e97937f750bd372a5d9e1faaa868030193b0b5a
|
4a301293524e21d31c8cc65cdd21cf72f89ecdc5
|
refs/heads/master
| 2020-03-19T16:08:36.031475 | 2018-06-11T04:36:46 | 2018-06-11T04:36:46 | 136,702,482 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 478 |
py
|
import numpy as np
import scipy as sp
from numpy.random import binomial
import matplotlib.pyplot as plt
import seaborn as sns
def mle_sim(N_MLE=100, N_BER=100, theta_true=0.3):
mle_list = binomial(n=N_BER, p=theta_true, size = N_MLE)/N_BER
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.set_title('N_MLE$ = {0}$, N_BER$ = {1}$'.format(N_MLE, N_BER))
ax.set_ylabel('freq')
sns.distplot(mle_list, kde=False, rug=False, bins=25, axlabel="MLE")
None
|
[
"kyohei.okumura@gmail.com"
] |
kyohei.okumura@gmail.com
|
7bb315717af2f9fcc108ddf906448bd50a7cee06
|
fe507a348e205c5e6af9aaf2c2894e5bb5450d8e
|
/server/node_modules/serialport/build/config.gypi
|
213fa38fb88542700d00cae19c55deaa08fcb2aa
|
[
"MIT"
] |
permissive
|
stedyyulius/Tracker
|
7468c07b7beb55bad97066324e1573eae75a4792
|
1f505d28f8e950604c302756785e24e55f0db5e9
|
refs/heads/master
| 2021-07-08T14:54:22.366541 | 2017-10-06T11:04:40 | 2017-10-06T11:04:40 | 105,789,525 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,340 |
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt59l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt59l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "59",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"want_separate_host_toolset_mkpeephole": 0,
"xcode_version": "7.0",
"nodedir": "/Users/stedy/.node-gyp/8.5.0",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/stedy/Documents/trials/Tracker/server/node_modules/serialport/build/Release/serialport.node",
"module_name": "serialport",
"module_path": "/Users/stedy/Documents/trials/Tracker/server/node_modules/serialport/build/Release",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"browser": "",
"also": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/Users/stedy/.nvm/versions/node/v8.5.0/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"metrics_registry": "https://registry.npmjs.org/",
"timing": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/Users/stedy/.nvm/versions/node/v8.5.0/etc/npmrc",
"prefer_online": "",
"logs_max": "10",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"heading": "npm",
"searchlimit": "20",
"proprietary_attribs": "true",
"offline": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/stedy/.npmrc",
"init_module": "/Users/stedy/.npm-init.js",
"user": "501",
"node_version": "8.5.0",
"ignore_prepublish": "",
"editor": "vi",
"auth_type": "legacy",
"save": "true",
"tag": "latest",
"script_shell": "",
"progress": "true",
"global": "",
"searchstaleness": "900",
"optional": "true",
"ham_it_up": "",
"save_prod": "",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "Infinity",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"sso_type": "oauth",
"scripts_prepend_node_path": "warn-only",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"prefer_offline": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/stedy/.npm",
"color": "true",
"package_lock": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/5.3.0 node/v8.5.0 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"send_metrics": "",
"save_bundle": "",
"umask": "0022",
"init_version": "1.0.0",
"init_author_name": "",
"git": "git",
"scope": "",
"unsafe_perm": "true",
"tmp": "/var/folders/94/_6s4bkr15rj3wxnzkpy608hc0000gn/T",
"onload_script": "",
"prefix": "/Users/stedy/.nvm/versions/node/v8.5.0",
"link": ""
}
}
|
[
"stedyyulius@gmail.com"
] |
stedyyulius@gmail.com
|
cda69e8ca6bb2ecb06be574f2a1f9320516d5976
|
ae74563d6ee2a0c8c8eef7b066756ed1e8abc236
|
/MyApp/Controls/HandlerFunc.py
|
8b403bb732b444132b1abc5ae5286b42ffdfafee
|
[] |
no_license
|
sunnyLbh/circus
|
b9aa9291b6c7d069284e659fb06abbcf4b79d523
|
a689c1a14c147bdbfdb55c57483d745704ef922c
|
refs/heads/master
| 2020-09-19T19:50:56.472900 | 2016-09-09T02:29:03 | 2016-09-09T02:29:03 | 67,757,911 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 973 |
py
|
# -*- coding: utf-8 -*-
"""
__author__ = 'Sunny'
__mtime__ = '9/6/2016'
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ 神兽保佑 ┣┓
┃ 永无BUG! ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import os
from MyApp.app import db
def StopCircus(path):
pidList,portList = db.GetAllPID(path)
for pid in pidList:
os.system("kill -9 {}".format(pid))
for port in portList:
db.projectStatic[port]['state'] = '0'
def StartCircus(process_pid,port,path):
db.SetProjectStartOne(path)
# os.system("circusd {} &".format(path))
# db.projectStatic[port]['date'] = datetime.now()
# db.projectStatic[port]['state'] = '1'
|
[
"13631310872@163.com"
] |
13631310872@163.com
|
e4a2ec77f4dd5e2af78977cca3a41199eaaa8af2
|
6fab83cd3036861badec08a1406a84bb93e0f016
|
/robo1proj3/pr2_robot/scripts/project_template.py
|
904b5b9870f6d7aa5df7ff181f4515e8fd4b78f3
|
[] |
no_license
|
sdwlig/udacityrobo1sdw
|
420ba5ca5b6244ec46076f19c9423e9b1178f99c
|
a34529ae57368b17ed719308dfee105c919cb284
|
refs/heads/master
| 2020-05-18T10:40:22.193388 | 2019-05-01T13:29:36 | 2019-05-01T13:29:36 | 184,358,364 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 15,012 |
py
|
#!/usr/bin/env python
# Import modules
import numpy as np
import sklearn
from sklearn.preprocessing import LabelEncoder
import pickle
from sensor_stick.srv import GetNormals
from sensor_stick.features import compute_color_histograms
from sensor_stick.features import compute_normal_histograms
from visualization_msgs.msg import Marker
from sensor_stick.marker_tools import *
from sensor_stick.msg import DetectedObjectsArray
from sensor_stick.msg import DetectedObject
from sensor_stick.pcl_helper import *
import rospy
import tf
from geometry_msgs.msg import Pose
from std_msgs.msg import Float64
from std_msgs.msg import Int32
from std_msgs.msg import String
from pr2_robot.srv import *
from rospy_message_converter import message_converter
import yaml
boxes = {}
arms = {}
normals = []
crop = 50
# Helper function to get surface normals
def get_normals(cloud):
get_normals_prox = rospy.ServiceProxy('/feature_extractor/get_normals', GetNormals)
cloud = get_normals_prox(cloud).cluster
# cloud = cloud.cropImage(0, 0, crop, 0, crop);
return cloud
# Helper function to create a yaml friendly dictionary from ROS messages
def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):
yaml_dict = {}
yaml_dict["test_scene_num"] = test_scene_num.data
yaml_dict["arm_name"] = arm_name.data
yaml_dict["object_name"] = '' + object_name.data
# yaml_dict["pick_pose"] = pick_pose
# yaml_dict["place_pose"] = place_pose
yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose)
yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose)
return yaml_dict
# Helper function to output to yaml file
def send_to_yaml(yaml_filename, dict_list):
data_dict = {"yaml dict": dict_list}
print(data_dict)
with open(yaml_filename, 'w') as outfile:
yaml.dump(data_dict, outfile, default_flow_style=False)
# Callback function for your Point Cloud Subscriber
def pcl_callback(pcl_msg):
# Exercise-2 TODOs:
# TODO: Convert ROS msg to PCL data
# TODO: Statistical Outlier Filtering
# TODO: Voxel Grid Downsampling
# TODO: PassThrough Filter
# TODO: RANSAC Plane Segmentation
# TODO: Extract inliers and outliers
# TODO: Euclidean Clustering
# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately
# TODO: Convert PCL data to ROS messages
# TODO: Publish ROS messages
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
# Grab the points for the cluster
# Compute the associated feature vector
# Make the prediction
# Publish a label into RViz
# Add the detected object to the list of detected objects.
# Publish the list of detected objects
# TODO: Convert ROS msg to PCL data
cloud = pointXYZRGB = ros_to_pcl(pcl_msg)
# cloud = cloud.cropImage(0, 0, crop, 0, crop);
# TODO: Voxel Grid Downsampling
vox = cloud.make_voxel_grid_filter()
LEAF_SIZE = 0.007
vox.set_leaf_size(LEAF_SIZE, LEAF_SIZE, LEAF_SIZE)
cloud_filtered = vox.filter()
# TODO: PassThrough Filter
passthrough = cloud_filtered.make_passthrough_filter()
filter_axis = 'z'
passthrough.set_filter_field_name(filter_axis)
axis_min = 0.6
axis_max = 1.1
passthrough.set_filter_limits(axis_min, axis_max)
cloud_filtered = passthrough.filter()
# TODO: RANSAC Plane Segmentation
seg = cloud_filtered.make_segmenter()
seg.set_model_type(pcl.SACMODEL_PLANE)
seg.set_method_type(pcl.SAC_RANSAC)
max_distance = 0.001
seg.set_distance_threshold(max_distance)
inliers, coefficients = seg.segment()
# TODO: Extract inliers and outliers
cloud_table = cloud_filtered.extract(inliers, negative=False)
cloud_objects = cloud_filtered.extract(inliers, negative=True)
outlier_filter = cloud_objects.make_statistical_outlier_filter()
outlier_filter.set_mean_k(20)
x = 1.0
outlier_filter.set_std_dev_mul_thresh(x)
cloud_objects = outlier_filter.filter()
# TODO: Euclidean Clustering
white_cloud = xyz = XYZRGB_to_XYZ(cloud_objects)
tree = white_cloud.make_kdtree()
# Create a cluster extraction object
ec = white_cloud.make_EuclideanClusterExtraction()
# Set tolerances for distance threshold
# as well as minimum and maximum cluster size (in points)
# NOTE: These are poor choices of clustering parameters
# Your task is to experiment and find values that work for segmenting objects.
ec.set_ClusterTolerance(0.01)
ec.set_MinClusterSize(30)
ec.set_MaxClusterSize(3000)
# Search the k-d tree for clusters
ec.set_SearchMethod(tree)
# Extract indices for each of the discovered clusters
cluster_indices = ec.Extract()
# TODO: Create Cluster-Mask Point Cloud to visualize each cluster separately
#Assign a color corresponding to each segmented object in scene
cluster_color = get_color_list(len(cluster_indices))
color_cluster_point_list = []
for j, indices in enumerate(cluster_indices):
for i, indice in enumerate(indices):
color_cluster_point_list.append([white_cloud[indice][0],
white_cloud[indice][1],
white_cloud[indice][2],
rgb_to_float(cluster_color[j])])
#Create new cloud containing all clusters, each with unique color
cluster_cloud = pcl.PointCloud_PointXYZRGB()
cluster_cloud.from_list(color_cluster_point_list)
ros_cluster_cloud = pcl_to_ros(cluster_cloud)
# TODO: Convert PCL data to ROS messages
pcl_table = pcl_to_ros(cloud_table)
pcl_objects = pcl_to_ros(cloud_objects)
# TODO: Publish ROS messages
# pcl_objects_pub.publish(pcl_objects)
# pcl_table_pub.publish(pcl_table)
# pcl_cluster_pub.publish(ros_cluster_cloud)
# Exercise-3 TODOs:
# Classify the clusters! (loop through each detected cluster one at a time)
# Classify the clusters!
detected_objects_labels = []
detected_objects = []
do_idx = {}
for index, pts_list in enumerate(cluster_indices):
# Grab the points for the cluster from the extracted outliers (cloud_objects)
# Grab the points for the cluster
pcl_cluster = cloud_objects.extract(pts_list)
sample_cloud = ros_cluster = pcl_to_ros(pcl_cluster)
# Extract histogram features
# print('color hist')
chists = compute_color_histograms(sample_cloud, using_hsv=True)
normals = get_normals(sample_cloud)
nhists = []
if normals:
# print('normal hist')
nhists = compute_normal_histograms(normals)
# print('concat')
# print(len(nhists))
if len(nhists) > 0:
feature = np.concatenate((chists, nhists))
# detected_objects.append([feature, ''])
# Compute the associated feature vector
# Make the prediction
# Publish a label into RViz
# Add the detected object to the list of detected objects.
# Make the prediction, retrieve the label for the result
# and add it to detected_objects_labels list
prediction = clf.predict(scaler.transform(feature.reshape(1,-1)))
label = encoder.inverse_transform(prediction)[0]
detected_objects_labels.append(label)
# print( detected_objects_labels)
# Publish a label into RViz
label_pos = list(white_cloud[pts_list[0]])
label_pos[2] += .4
object_markers_pub.publish(make_label(label,label_pos, index))
# Add the detected object to the list of detected objects.
do = DetectedObject()
do.label = label
do.cloud = ros_cluster
detected_objects.append(do)
do_idx[label] = do
rospy.loginfo('Detected {} objects: {}'.format(len(detected_objects_labels),
detected_objects_labels))
# Publish the list of detected objects
# This is the output you'll need to complete the upcoming project!
detected_objects_pub.publish(detected_objects)
# detected_objects_list = []
# Suggested location for where to invoke your pr2_mover() function within pcl_callback()
# Could add some logic to determine whether or not your object detections are robust
# before calling pr2_mover()
try:
# print('pr2 call:', len(detected_objects_labels))
pr2_mover(detected_objects, detected_objects_labels)
except rospy.ROSInterruptException:
pass
# function to load parameters and request PickPlace service
def pr2_mover(objects, dlabels):
print(dlabels)
# TODO: Initialize variables
# TODO: Get/Read parameters
# TODO: Parse parameters into individual variable
# get parameters
dropboxinfo = rospy.get_param('/dropbox')
# print(dropboxinfo)
for drop in dropboxinfo:
print('dropbox:', drop)
boxes[drop["group"]] = drop["position"]
# green -> right
arms[drop["group"]] = drop["name"]
print('arms:', arms)
print('boxes:', boxes)
# red = boxes['red']
# green = boxes['green']
object_list_param = rospy.get_param('/object_list')
oidx = {}
# TODO: Rotate PR2 in place to capture side tables for the collision map
print('list:', object_list_param)
for i in range(0, len(object_list_param)):
object_name = object_list_param[i]['name']
object_group = object_list_param[i]['group']
print('remembering:', object_name)
oidx[object_name] = {"group": object_group}
# TODO: Loop through the pick list
# TODO: Get the PointCloud for a given object and obtain it's centroid
dict_list = []
# for i in range(0, len(object_list_param)):
# Populate various ROS messages
labels = []
centroids = [] # to be list of tuples (x, y, z)
do_centroids = {}
for o in range(0,len(objects)):
ob = objects[o]
label = dlabels[o]
print(o, label)
labels.append(label)
points_arr = ros_to_pcl(ob.cloud).to_array()
# print('points:', points_arr)
# print('np.mean:', np.mean(points_arr, axis=0)[:3])
centroid = np.mean(points_arr, axis=0)[:3]
# np.asscalar(
# print(np.mean(points_arr, axis=0)[:3])
print(centroid)
centroids.append(centroid)
do_centroids[label] = centroid
# TODO: Create 'place_pose' for the object
if label in oidx:
test_scene_num = Int32()
test_scene_num.data = 3
object_name = String()
# print('ob:', ob)
print('ob.label:', ob.label)
object_name.data = ob.label #_list_param[i]['name']
# TODO: Assign the arm to be used for pick_place
group = oidx[label]["group"]
arm_name = String()
arm_name.data = arms[group]
pick_pose = Pose()
pick_pose.position.x = np.asscalar(centroid[0])
pick_pose.position.y = np.asscalar(centroid[1] + 0.01)
pick_pose.position.z = np.asscalar(0.0 + centroid[2])
pick_pose.orientation.x = np.asscalar(0.0 + centroid[0])
pick_pose.orientation.y = np.asscalar(0.0 + centroid[1] + 0.01)
pick_pose.orientation.z = np.asscalar(0.0 + centroid[2])
pick_pose.orientation.w = .1
p=boxes[group]
place_pose = Pose()
place_pose.position.x = p[0]
place_pose.position.y = p[1]
place_pose.position.z = p[2]
print('pick_pose:', pick_pose)
print('place_pose:', place_pose)
yaml_dict = make_yaml_dict(test_scene_num, arm_name, object_name,
pick_pose, place_pose)
# TODO: Create a list of dictionaries (made with make_yaml_dict())
# for later output to yaml format
dict_list.append(yaml_dict)
move_items = True
if move_items:
# Wait for 'pick_place_routine' service to come up
rospy.wait_for_service('pick_place_routine')
try:
pick_place_routine = rospy.ServiceProxy('pick_place_routine', PickPlace)
# TODO: Insert your message variables to be sent as a service request
resp = pick_place_routine(test_scene_num, object_name, arm_name,
pick_pose, place_pose)
print ("Response: ",resp.success)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
# TODO: Output your request parameters into output yaml file
yamls = dict_list
send_to_yaml('out.yaml', yamls)
if __name__ == '__main__':
# TODO: ROS node initialization
# TODO: Create Subscribers
# TODO: Create Publishers
# TODO: Load Model From disk
# TODO: Spin while node is not shutdown
# Initialize color_list
get_color_list.color_list = []
rospy.init_node('project_template')
rate = rospy.Rate(5000)
start_time = 0
while not start_time:
start_time = rospy.Time.now().to_sec()
# Create Publishers
# TODO: here you need to create two publishers
# Call them object_markers_pub and detected_objects_pub
# Have them publish to "/object_markers" and "/detected_objects" with
# Message Types "Marker" and "DetectedObjectsArray" , respectively
# print('registering publishers')
object_markers_pub = rospy.Publisher('/object_markers', Marker, queue_size=10)
detected_objects_pub = rospy.Publisher('/detected_objects', DetectedObjectsArray,
queue_size=10)
pcl_objects_pub = rospy.Publisher("/pcl_objects", PointCloud2, queue_size=1)
pcl_table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
pcl_cluster_pub = rospy.Publisher("/pcl_cluster", PointCloud2, queue_size=1)
# Load Model From disk
model = pickle.load(open('model.sav', 'rb'))
clf = model['classifier']
encoder = LabelEncoder()
encoder.classes_ = model['classes']
scaler = model['scaler']
# TODO: Create Subscribers
# pcl_sub = rospy.Subscriber("/sensor_stick/point_cloud", pc2.PointCloud2,
pcl_sub = rospy.Subscriber("/pr2/world/points", pc2.PointCloud2,
pcl_callback, queue_size=1)
# TODO: Create Publishers
# TODO: Load Model From disk
# Initialize color_list
get_color_list.color_list = []
# TODO: Spin while node is not shutdown
while not rospy.is_shutdown():
elapsed = rospy.Time.now().to_sec() - start_time
rate.sleep()
|
[
"sdw@lig.net"
] |
sdw@lig.net
|
07e7569c8db903217b70a5cd79e4b4672e203cf9
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03636/s795119653.py
|
d8a4b73447a786ef0435aa2faeea73c314446486
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 114 |
py
|
S = str(input())
l = int(len(S))
#print(l)
#print(S[0])
s1 = S[0]
s2 = S[-1]
L = int(l-2)
print(s1 + str(L) + s2)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5126300142b53af725535d97c532dcd641cf43bc
|
6051370a68d5e1afee9c82f306bc1fb078a8bf62
|
/0x03-python-data_structures/1-element_at.py
|
929ff039e33d785afcd9876171a2a821305f945b
|
[] |
no_license
|
leobyeon/holbertonschool-higher_level_programming
|
be38c699ac7d53770d0524b620f8696bf4eaa7bc
|
8d69af503d522bb709b17096a514eaab3f0bba35
|
refs/heads/master
| 2020-03-28T11:15:11.913060 | 2019-02-22T01:21:51 | 2019-02-22T01:21:51 | 148,193,071 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 141 |
py
|
#!/usr/bin/python3
def element_at(my_list, idx):
if (0 <= idx < len(my_list)):
return my_list[idx]
else:
return None
|
[
"386@holbertonschool.com"
] |
386@holbertonschool.com
|
7bc6251a10857ddde0e8bfd5af1c863c52d2d276
|
b95d271f9ecb494185d9e58d6b7009c010c934b2
|
/flaskr/__init__.py
|
3ba68cafceca3503df991da63acc089802cb018b
|
[] |
no_license
|
Nihalian/flask-tutorial
|
67db0fbd16b5b4ff3c37b5ba35812c0ed2b12cfc
|
0180247a9af1506fb7d135639eecf0cf15205fed
|
refs/heads/master
| 2023-04-29T09:10:18.732085 | 2021-05-14T09:12:16 | 2021-05-14T09:12:16 | 366,514,452 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 851 |
py
|
import os
from flask import Flask
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__,instance_relative_config=True)
app.config.from_mapping(
SECRET_KEY='dev',
DATABASE=os.path.join(app.instance_path,'flaskr.sqlite'),
)
if test_config is None:
# Load the instance config, if it exists, when not testing
app.config.from_pyfile('config.py',silent=True)
else:
# Load the test config if passed in
app.config.from_mapping(test_config)
try:
os.makedirs(app.instance_path)
except OSError:
pass
# a simple page that says hello
@app.route('/hello')
def hello():
return 'Hello, World!'
from . import db
db.init_app(app)
from . import auth
app.register_blueprint(auth.bp)
return app
|
[
"Abdulmanaf@Abduls-iMac.local"
] |
Abdulmanaf@Abduls-iMac.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.