text stringlengths 8 6.05M |
|---|
# Generated by Django 2.0.7 on 2019-01-10 13:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basedata', '0048_auto_20190110_1318'),
]
operations = [
migrations.AddField(
model_name='outsource_items',
name='total_price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=10, null=True, verbose_name='金额'),
),
migrations.AlterField(
model_name='outsource',
name='price',
field=models.DecimalField(blank=True, decimal_places=2, default=0, max_digits=10, max_length=10, null=True, verbose_name='外包施工单位报价'),
),
]
|
#!/bin/python3
import math
import os
import random
import re
import sys
from collections import defaultdict
# couple of brute-force approaches
'''
def riddle(arr):
ln = len(arr)
res = []
#print(arr)
for l in range(1,ln+1):
tmp = []
for i in range(ln):
if i+l > ln:
continue
#print(i,l,arr[i:i+l],min(arr[i:i+l]))
tmp.append(min(arr[i:i+l]))
res.append(max(tmp))
#print("max => ",res[-1])
return res
def riddle(arr):
ln = len(arr)
res = []
stk2 = []
print(arr)
stk1 = arr.copy()
res.append(max(arr))
itr = 0
while len(stk1) > 1:
maxv = 0
print(len(stk1))
while len(stk1) > 1:
stk2.append(min(stk1[:2]))
stk1 = stk1[1:]
if stk2[-1] > maxv:
maxv = stk2[-1]
itr += 1
if itr%1000 == 0:
print(str(itr))
stk1 = stk2
stk2 = []
#print(stk1)
#print(stk2)
res.append(maxv)
print(res)
return res
'''
'''
The logic:
- for each number, find the largest window in which the number is minimum
- create a map for all the numbers and their maximum window size in which they are minimum
- example: for input array { 2, 6, 1, 12 } the map would look like as follows:
{ 1: 4, 6: 1, 2: 2, 12: 1 }
here the maximum window size where 1 is minimum is 4 i.e. 1 is minimum among window {2, 6, 1, 12}
2 is mininum in the window {2, 6} hence window size = 2
6, 12 are minimum in the windows {6} and {12} respectively hence window size 1
- Algorithm to craete the above mapping:
- - append 0 at the end of array to make the last index of the array as minimum (0 will not be part of original input array as per constraint)
- - create an empty stack
- - iterate the array from begining (left to right) for index, values => i, j
- - if stack is empty, push the tuple (value, index) into the stack
- - if the value is less than the value on top of the stack:
- - pop the element from stack, the tuple (val, index)
- - update the max window size for the current value in array as follows
- if previous max window size of that element is less than the i - li + 1 where is current array index and li is the
index of the item popped from stack.
example: if array is: {2, 6, 1, 12} current stack is [2, 6] current index i is 2, arr[2] = 1
so map[1] = max(map[1], 2 - 1 + 1) => here i = 2 current idx, li = 1 (index of popped value)
finally map[val] where val = popped value which is 6 and its index is 1 in this case so
map[6] = max(map[6], 2 - 1) => here i = 2 and li = 1
- finally push the value at ith index i.e. arr[i] with the index of the popped value i.e.
in the above example when the stack is [(2, 0), (6, 1)] and next item '1' comes, as mentioned above the 6 will be popped out and
after updating map for 6 and 1, again stack top (which is '2') shall be compared with 1 which is greater so 2 will be popped out and
map for 2 and 1 shall be updated accordingly and now 1 shall be pushed to stack with index of last popped item which is '2' and whose index
is 0, so the stack shall look like this: [(1,0)].
when next item '12' comes, the stack will look like [(1,0), (12,3)]
With this approach, the window in the left hand side of the number is also taken care
- Once the map is ready, invert the map i.e. key, value to value,key
- e.g. {1: 4, 6: 1, 2: 2, 12: 1} to { 4: 1, 1: 6, 2: 2, 1: 12}, the thing to note here is if there are multiple values with same window
size then take the one which has bigger number associated
- the reason of doing it is to have indexes based upon window sizes
- now start with max window size which is len(arr) and find the entry in above converted map e.g. for window size 4 the number is 1.
- append it to result array [1], now check for window size 3 and corresponding entry is not present in the map, in such cases keep the previously used value i.e. 1
so the result array will be [1,1]
Now check for window size 2 and the corresponding entry is present which is '2', append in the array: [1,1,2]
check for window size 1 and the corresponding entry is present and value against it is 12 so append in result array: [1,1,2,12]
- Now just reverse the array which is the answer i.e. [12,2,1,1]
Note: Initially I thought of using insert(0) of list but one of the TC timed out, when investigated I found that insert is way more expensive than
just append. I was trying to avoid reversing array but appened and reverse is very fast then insert.
'''
def getMapUpdated(arr):
stack = []
arr.append(0)
map=defaultdict(int)
print(arr)
for i,j in enumerate(arr):
t=i
while stack and stack[-1][0]>=j:
val,li = stack.pop()
map[j]=max(map[j],i-li+1)
map[val]=max(map[val],i-li)
print(stack)
#print(val,i, j,map[val],map[j])
t=li
stack.append((j,t))
print(stack)
#print(d)
del map[0]
print(map)
return map
def riddle(arr):
ln = len(arr)
stack = []
map = {}
res = []
map = getMapUpdated(arr)
#print(map)
# reverse the map
e = defaultdict(int)
for k,v in map.items():
e[v] = max(e[v],k)
#print(e)
i = len(arr)
prev = 0
while i > 1:
# thought this would be faster but due to following 'insert' one of the test case got time limit
# insert is way slow then append. append and then reversing arry was way faster !
#res.insert(0,max(e[i-1],prev))
#prev = res[0]
res.append(max(e[i-1],prev))
prev = res[-1]
i -= 1
res.reverse()
return res
if __name__ == '__main__':
fptr = open("out.txt", 'w')
n = int(input())
arr = list(map(int, input().rstrip().split()))
res = riddle(arr)
fptr.write(' '.join(map(str, res)))
fptr.write('\n')
fptr.close() |
# Generated by Django 2.1.4 on 2019-01-14 20:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clientele', '0015_auto_20190114_2037'),
]
operations = [
migrations.AddField(
model_name='besoin',
name='ressource',
field=models.ManyToManyField(to='clientele.Ressource'),
),
]
|
import time
from datetime import datetime
from random import random
import game_sound
from Score import Score
from features.feature import Feature
from field import Field
from highscorelist import Highscorelist, Highscoreentry
from painter import RGB_Field_Painter, Led_Matrix_Painter
BLACK = [0, 0, 0]
class Snake_Main(Feature):
def __init__(self, field_leds: Field, field_matrix: Field, rgb_field_painter: RGB_Field_Painter,
led_matrix_painter: Led_Matrix_Painter, highscorelist: Highscorelist = Highscorelist("Not_used")):
super(Snake_Main, self).__init__(field_leds, field_matrix, rgb_field_painter, led_matrix_painter, highscorelist)
self.direction = 0
self.is_there_a_direction_change_in_this_tick = False
self.food_is_on_field = False
self.field_for_snake = []
def event(self, eventname: str):
if not self.is_there_a_direction_change_in_this_tick:
if eventname == "move up":
if self.direction != 2:
self.direction = 0
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "move left":
if self.direction != 3:
self.direction = 1
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "move down":
if self.direction != 0:
self.direction = 2
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "move right":
if self.direction != 1:
self.direction = 3
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "rotate left":
self.direction += 1
if self.direction >= 4:
self.direction -= 4
self.is_there_a_direction_change_in_this_tick = True
elif eventname == "rotate right":
self.direction -= 1
if self.direction < 0:
self.direction += 4
self.is_there_a_direction_change_in_this_tick = True
def move_snake_if_possible(self):
if self.direction == 0:
if self.test_for_case_of_block_in_field(self.head_x, self.head_y - 1) <= 0:
self.head_y -= 1
elif self.test_for_case_of_block_in_field(self.head_x, self.head_y - 1) == 1:
self.game_over = True
elif self.direction == 1:
if self.test_for_case_of_block_in_field(self.head_x - 1, self.head_y) <= 0:
self.head_x -= 1
elif self.test_for_case_of_block_in_field(self.head_x - 1, self.head_y) == 1:
self.game_over = True
elif self.direction == 2:
if self.test_for_case_of_block_in_field(self.head_x, self.head_y + 1) <= 0:
self.head_y += 1
elif self.test_for_case_of_block_in_field(self.head_x, self.head_y + 1) == 1:
self.game_over = True
elif self.direction == 3:
if self.test_for_case_of_block_in_field(self.head_x + 1, self.head_y) <= 0:
self.head_x += 1
elif self.test_for_case_of_block_in_field(self.head_x + 1, self.head_y) == 1:
self.game_over = True
if not self.game_over:
if self.test_for_case_of_block_in_field(self.head_x, self.head_y) == -1: # if head eats food
self.food_is_on_field = False
self.lenght_of_snake += 1
self.score.score_for_block()
self.field_matrix.set_all_pixels_to_black()
self.score.draw_score_on_field(self.field_matrix)
self.led_matrix_painter.draw(self.field_matrix)
self.turn_every_pixel_in_snakes_field_ones_up()
self.field_for_snake[self.head_y][self.head_x] = 1
else:
game_sound.stop_song()
game_sound.play_sound("game_over")
self.highscorelist.add_entry(Highscoreentry(datetime.today(), self.playername, self.score.get_score_int()))
self.highscorelist.save()
self.led_matrix_painter.show_Message("Game over - Your Points: " + self.score.get_score_str(), 250)
def turn_every_pixel_in_snakes_field_ones_up(self):
for y in range(len(self.field_for_snake)):
for x in range(len(self.field_for_snake[0])):
if self.field_for_snake[y][x] > 0:
self.field_for_snake[y][x] += 1
if self.field_for_snake[y][x] > self.lenght_of_snake:
self.field_for_snake[y][x] = 0
def test_for_case_of_block_in_field(self, x: int, y: int) -> int:
if 0 <= x < len(self.field_for_snake[0]) and 0 <= y < len(self.field_for_snake):
if self.field_for_snake[y][x] == 0:
return 0
elif self.field_for_snake[y][x] < 0:
return -1
else:
return 1
else:
return 1
def translate_snakes_field_into_normal_field(self):
self.field_leds.set_all_pixels_to_black()
for y in range(self.field_leds.height):
for x in range(self.field_leds.width):
if self.field_for_snake[y][x] == 1:
self.field_leds.field[y][x] = [255, 0, 0]
elif self.field_for_snake[y][x] > 1:
self.field_leds.field[y][x] = [0, 255, 0]
elif self.field_for_snake[y][x] == -1:
self.field_leds.field[y][x] = [0, 0, 255]
def test_and_print_food(self):
if not self.food_is_on_field:
while not self.food_is_on_field:
self.food_x = int(random()*len(self.field_for_snake[0]))
self.food_y = int(random()*len(self.field_for_snake))
if self.test_for_case_of_block_in_field(self.food_x, self.food_y) == 0:
self.food_is_on_field = True
self.field_for_snake[self.food_y][self.food_x] = -1
def tick(self):
if not self.game_over:
self.move_snake_if_possible()
self.test_and_print_food()
self.translate_snakes_field_into_normal_field()
self.rgb_field_painter.draw(self.field_leds)
self.is_there_a_direction_change_in_this_tick = False
time.sleep(0.5)
else:
self.led_matrix_painter.move_Message()
time.sleep(0.02)
def start(self, playername: str = None):
super().start(playername)
self.prepare_for_start()
def stop(self):
self.game_over = True
def is_game_over(self):
return super(Snake_Main, self).is_game_over()
def prepare_for_start(self):
self.field_leds.set_all_pixels_to_black()
self.field_matrix.set_all_pixels_to_black()
self.field_for_snake = []
for i in range(self.field_leds.height):
self.field_for_snake.append([])
for _ in range(self.field_leds.width):
self.field_for_snake[i].append(0)
self.head_x = 5
self.head_y = 20
self.direction = 0
self.lenght_of_snake = 3
self.delay = 0.5
self.game_over = False
self.food_is_on_field = False
self.food_x = 0
self.food_y = 0
self.is_there_a_direction_change_in_this_tick = False
self.score = Score()
self.score.points = 3
self.score.draw_score_on_field(self.field_matrix)
self.rgb_field_painter.draw(self.field_leds)
self.led_matrix_painter.draw(self.field_matrix)
|
# -*- coding: utf-8 -*-
from django.db import models
from easy_thumbnails.fields import ThumbnailerImageField
from django.db.models import Count, Max
from django.utils.translation import ugettext_lazy as _
import datetime
from django.urls import reverse
from .settings_newsapp import ENABLE_ARCHIVE, ENABLE_CATEGORIES, NEWS_CLASS, ENABLE_TAGS
from newsapp import class_for_name
if ENABLE_CATEGORIES:
class NewCategory(models.Model):
_translation_fields = ['title']
title = models.CharField(_('title'), max_length=256)
slug = models.SlugField(_('slug'), blank=True, unique=True)
position = models.SmallIntegerField(_('position'), default=0)
class Meta:
verbose_name = _('new category')
verbose_name_plural = _('new categories')
ordering = ('position',)
def __str__(self):
return self.title
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "{0}".format(reverse("new_category", kwargs={"category_url": self.slug}))
if ENABLE_TAGS:
class Tag(models.Model):
_translation_fields = ['title']
title = models.CharField(_('title'), max_length=256)
slug = models.SlugField(_('slug'), blank=True, unique=True)
position = models.SmallIntegerField(_('position'), default=0)
class Meta:
verbose_name = _('tag')
verbose_name_plural = _('tags')
ordering = ('position',)
def __str__(self):
return self.title
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "{0}".format(reverse("tag", kwargs={"tag_url": self.slug}))
# First, define the Manager subclass.
class ActiveNewsManager(models.Manager):
def get_queryset(self):
return super(ActiveNewsManager, self).get_queryset().filter(active=True, date_added__lte=datetime.datetime.now())
class NewAbstract(models.Model):
_translation_fields = ['title', 'content_short', 'content', 'more_text']
title = models.CharField(_('title'), max_length=256)
slug = models.SlugField(_('slug'), blank=True, unique=True)
content_short = models.TextField(_('brief'))
content = models.TextField(_('content'), blank=True)
date_added = models.DateTimeField(_('date added'), default=datetime.datetime.today)
active = models.BooleanField(_('active'), default=True)
more_text = models.CharField(_('read more'), max_length=256, blank=True, help_text=_('read more button text'))
image = ThumbnailerImageField(_('image'), upload_to='news', resize_source=dict(size=(1024, 1024)), blank=True, null=True)
if ENABLE_CATEGORIES:
new_category = models.ManyToManyField(NewCategory, verbose_name=_('new categories'))
if ENABLE_TAGS:
tag = models.ManyToManyField(Tag, verbose_name=_('tags'))
objects = models.Manager()
active_objects = ActiveNewsManager()
class Meta:
verbose_name = _('new')
verbose_name_plural = _('news')
ordering = ('-date_added',)
abstract = True
def __unicode__(self):
return self.title
def get_absolute_url(self):
return "{0}{1}/".format(reverse("news_all"), self.slug)
def get_prev(self):
return New.active_objects.filter(date_added__lt=self.date_added).first()
def get_next(self):
return New.active_objects.filter(date_added__gt=self.date_added).last()
def get_page_title(self):
return self.title
def get_more_text(self):
if self.more_text:
return self.more_text
else:
return _('read more')
@staticmethod
def date_archive():
if ENABLE_ARCHIVE:
date_archive = New.active_objects.extra(
select={
'year': "EXTRACT(year FROM date_added)",
'month': "EXTRACT(month from date_added)"}).values(
'year',
'month'
).order_by('-year', '-month')
date_archive.query.group_by = ['year', 'month']
date_archive = date_archive.annotate(date_added=Max('date_added'), cnt=Count("pk"))
return date_archive
return None
class New(class_for_name(NEWS_CLASS) if NEWS_CLASS else NewAbstract):
pass
|
import unittest
from class_methods import *
from delta import *
from dynamic import *
from indexes import *
from inheritance import *
from instance import *
from json_serialisation import *
from validation import *
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
import numpy as np
#import scipy.linalg
def vector_lengths(a):
return np.sqrt(np.sum(a**2, axis=1))
def main():
print(vector_lengths(np.random.randint(1, 8, (4, 5))))
if __name__ == "__main__":
main()
|
DEFAULT_SETTINGS = {
'AUTOCONFIG_URL_PREFIXES': {
'shorty': '',
},
}
SETTINGS = {
'INSTALLED_APPS': [
'django.contrib.admin',
'nuit',
],
'NUIT_GLOBAL_TITLE': 'Shorty',
'CSP_SCRIPT_SRC': (
"'self'",
"https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/1.5.8/clipboard.min.js",
),
}
|
# -*- coding: utf-8 -*-
import tkinter as tk # 使用Tkinter前需要先導入
# 第1步,產生實體object,建立視窗window
window = tk.Tk()
# 第2步,給窗口的視覺化起名字
window.title('My Window')
# 第3步,設定窗口的大小(長 * 寬)
window.geometry('500x300') # 這裡的乘是小x
# 第4步,在圖形介面上創建一個標籤label用以顯示並放置
var = tk.StringVar() # 定義一個var用來將radiobutton的值和Label的值聯繫在一起.
l = tk.Label(window, bg='yellow', width=20, text='empty')
l.pack()
# 第6步,定義選項觸發函數功能
def print_selection():
l.config(text='you have selected ' + var.get())
# 第5步,創建三個radiobutton選項,其中variable=var, value='A'的意思就是,當滑鼠選中了其中一個選項,把value的值A放到變數var中,然後賦值給variable
r1 = tk.Radiobutton(window, text='Option A', variable=var, value='A', command=print_selection)
r1.pack()
r2 = tk.Radiobutton(window, text='Option B', variable=var, value='B', command=print_selection)
r2.pack()
r3 = tk.Radiobutton
# 第7步,主視窗迴圈顯示
window.mainloop()
|
lisnum = []
v = 0
while v == 0:
print ("Digite 0 para parar")
L = int(input('Insira os dados a ser acresentados a lista:'))
lisnum.append(L)
if L == 0:
lisnum.pop(0)
print(lisnum)
print ("O dado de menor valor encontrado foi", lisnum[0])
fim = lisnum[-1]
print("o dado de maior valor encontrado foi", fim) |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
# login code modified from https://gist.github.com/guillaumevincent/4771570
import tornado.auth
import tornado.escape
import tornado.web
import tornado.websocket
from os.path import dirname, join
from base64 import b64encode
from uuid import uuid4
from qiita_core.qiita_settings import qiita_config
from qiita_core.util import is_test_environment
from qiita_pet.handlers.base_handlers import (
MainHandler, NoPageHandler, IFrame)
from qiita_pet.handlers.auth_handlers import (
AuthCreateHandler, AuthLoginHandler, AuthLogoutHandler, AuthVerifyHandler)
from qiita_pet.handlers.user_handlers import (
ChangeForgotPasswordHandler, ForgotPasswordHandler, UserProfileHandler,
UserMessagesHander, UserJobs)
from qiita_pet.handlers.analysis_handlers import (
ListAnalysesHandler, AnalysisSummaryAJAX, SelectedSamplesHandler,
AnalysisDescriptionHandler, AnalysisGraphHandler, CreateAnalysisHandler,
AnalysisJobsHandler, ShareAnalysisAJAX)
from qiita_pet.handlers.study_handlers import (
StudyIndexHandler, StudyBaseInfoAJAX, SampleTemplateHandler,
SampleTemplateOverviewHandler, SampleTemplateColumnsHandler,
StudyEditHandler, ListStudiesHandler, ListStudiesAJAX, EBISubmitHandler,
CreateStudyAJAX, ShareStudyAJAX, StudyApprovalList, ArtifactGraphAJAX,
VAMPSHandler, Study, StudyTags, StudyGetTags,
ListCommandsHandler, ListOptionsHandler, PrepTemplateSummaryAJAX,
PrepTemplateAJAX, NewArtifactHandler, SampleAJAX, StudyDeleteAjax,
ArtifactAdminAJAX, NewPrepTemplateAjax, DataTypesMenuAJAX, StudyFilesAJAX,
ArtifactGetSamples, ArtifactGetInfo, WorkflowHandler,
WorkflowRunHandler, JobAJAX, AutocompleteHandler)
from qiita_pet.handlers.artifact_handlers import (
ArtifactSummaryAJAX, ArtifactAJAX, ArtifactSummaryHandler)
from qiita_pet.handlers.websocket_handlers import (
MessageHandler, SelectedSocketHandler, SelectSamplesHandler)
from qiita_pet.handlers.logger_handlers import LogEntryViewerHandler
from qiita_pet.handlers.upload import (
UploadFileHandler, StudyUploadFileHandler, StudyUploadViaRemote)
from qiita_pet.handlers.stats import StatsHandler
from qiita_pet.handlers.download import (
DownloadHandler, DownloadStudyBIOMSHandler, DownloadRelease,
DownloadRawData, DownloadEBISampleAccessions, DownloadEBIPrepAccessions,
DownloadUpload, DownloadPublicHandler)
from qiita_pet.handlers.prep_template import (
PrepTemplateHandler, PrepTemplateGraphHandler, PrepTemplateJobHandler)
from qiita_pet.handlers.ontology import OntologyHandler
from qiita_pet.handlers.software import SoftwareHandler
from qiita_db.handlers.processing_job import (
JobHandler, HeartbeatHandler, ActiveStepHandler, CompleteHandler,
ProcessingJobAPItestHandler)
from qiita_db.handlers.artifact import (
ArtifactHandler, ArtifactAPItestHandler, ArtifactTypeHandler)
from qiita_db.handlers.sample_information import SampleInfoDBHandler
from qiita_db.handlers.user import UserInfoDBHandler, UsersListDBHandler
from qiita_db.handlers.prep_template import (
PrepTemplateDataHandler, PrepTemplateAPItestHandler,
PrepTemplateDBHandler)
from qiita_db.handlers.oauth2 import TokenAuthHandler
from qiita_db.handlers.reference import ReferenceHandler
from qiita_db.handlers.core import ResetAPItestHandler
from qiita_db.handlers.plugin import (
PluginHandler, CommandHandler, CommandListHandler, CommandActivateHandler,
ReloadPluginAPItestHandler)
from qiita_db.handlers.analysis import APIAnalysisMetadataHandler
from qiita_db.handlers.archive import APIArchiveObservations
from qiita_db.util import get_mountpoint
from qiita_pet.handlers.rest import ENDPOINTS as REST_ENDPOINTS
from qiita_pet.handlers.qiita_redbiom import RedbiomPublicSearch
if qiita_config.portal == "QIITA":
from qiita_pet.handlers.portal import (
StudyPortalHandler, StudyPortalAJAXHandler)
DIRNAME = dirname(__file__)
STATIC_PATH = join(DIRNAME, "static")
TEMPLATE_PATH = join(DIRNAME, "templates") # base folder for webpages
_, RES_PATH = get_mountpoint('job')[0]
COOKIE_SECRET = b64encode(uuid4().bytes + uuid4().bytes)
DEBUG = qiita_config.test_environment
_vendor_js = join(STATIC_PATH, 'vendor', 'js')
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r"/", MainHandler),
(r"/auth/login/", AuthLoginHandler),
(r"/auth/logout/", AuthLogoutHandler),
(r"/auth/create/", AuthCreateHandler),
(r"/auth/verify/(.*)", AuthVerifyHandler),
(r"/auth/forgot/", ForgotPasswordHandler),
(r"/auth/reset/(.*)", ChangeForgotPasswordHandler),
(r"/profile/", UserProfileHandler),
(r"/user/messages/", UserMessagesHander),
(r"/user/jobs/", UserJobs),
(r"/static/(.*)", tornado.web.StaticFileHandler,
{"path": STATIC_PATH}),
# Analysis handlers
(r"/analysis/list/", ListAnalysesHandler),
(r"/analysis/dflt/sumary/", AnalysisSummaryAJAX),
(r"/analysis/create/", CreateAnalysisHandler),
(r"/analysis/selected/", SelectedSamplesHandler),
(r"/analysis/selected/socket/", SelectedSocketHandler),
(r"/analysis/description/(.*)/graph/", AnalysisGraphHandler),
(r"/analysis/description/(.*)/jobs/", AnalysisJobsHandler),
(r"/analysis/description/(.*)/", AnalysisDescriptionHandler),
(r"/analysis/sharing/", ShareAnalysisAJAX),
(r"/artifact/samples/", ArtifactGetSamples),
(r"/artifact/info/", ArtifactGetInfo),
(r"/consumer/", MessageHandler),
(r"/admin/error/", LogEntryViewerHandler),
(r"/admin/approval/", StudyApprovalList),
(r"/admin/artifact/", ArtifactAdminAJAX),
(r"/admin/software/", SoftwareHandler),
(r"/ebi_submission/(.*)", EBISubmitHandler),
# Study handlers
(r"/study/create/", StudyEditHandler),
(r"/study/edit/(.*)", StudyEditHandler),
(r"/study/list/", ListStudiesHandler),
(r"/study/process/commands/options/", ListOptionsHandler),
(r"/study/process/commands/", ListCommandsHandler),
(r"/study/process/workflow/run/", WorkflowRunHandler),
(r"/study/process/workflow/", WorkflowHandler),
(r"/study/process/job/", JobAJAX),
(r"/study/list/socket/", SelectSamplesHandler),
(r"/study/list_studies/(.*)", ListStudiesAJAX),
(r"/study/new_artifact/", NewArtifactHandler),
(r"/study/files/", StudyFilesAJAX),
(r"/study/sharing/", ShareStudyAJAX),
(r"/study/sharing/autocomplete/", AutocompleteHandler),
(r"/study/new_prep_template/", NewPrepTemplateAjax),
(r"/study/tags/(.*)", StudyTags),
(r"/study/get_tags/", StudyGetTags),
(r"/study/([0-9]+)$", Study),
# Artifact handlers
(r"/artifact/graph/", ArtifactGraphAJAX),
(r"/artifact/(.*)/summary/", ArtifactSummaryAJAX),
(r"/artifact/html_summary/(.*)", ArtifactSummaryHandler,
{"path": qiita_config.base_data_dir}),
(r"/artifact/(.*)/", ArtifactAJAX),
# Prep template handlers
(r"/prep_template/", PrepTemplateHandler),
(r"/prep_template/(.*)/graph/", PrepTemplateGraphHandler),
(r"/prep_template/(.*)/jobs/", PrepTemplateJobHandler),
(r"/ontology/", OntologyHandler),
# ORDER FOR /study/description/ SUBPAGES HERE MATTERS.
# Same reasoning as below. /study/description/(.*) should be last.
(r"/study/description/sample_template/overview/",
SampleTemplateOverviewHandler),
(r"/study/description/sample_template/columns/",
SampleTemplateColumnsHandler),
(r"/study/description/sample_template/", SampleTemplateHandler),
(r"/study/description/sample_summary/", SampleAJAX),
(r"/study/description/prep_summary/", PrepTemplateSummaryAJAX),
(r"/study/description/prep_template/", PrepTemplateAJAX),
(r"/study/description/baseinfo/", StudyBaseInfoAJAX),
(r"/study/description/data_type_menu/", DataTypesMenuAJAX),
(r"/study/description/(.*)", StudyIndexHandler),
(r"/study/delete/", StudyDeleteAjax),
(r"/study/upload/remote/(.*)", StudyUploadViaRemote),
(r"/study/upload/(.*)", StudyUploadFileHandler),
(r"/upload/", UploadFileHandler),
(r"/check_study/", CreateStudyAJAX),
(r"/stats/", StatsHandler),
(r"/download/(.*)", DownloadHandler),
(r"/download_study_bioms/(.*)", DownloadStudyBIOMSHandler),
(r"/download_raw_data/(.*)", DownloadRawData),
(r"/download_ebi_accessions/samples/(.*)",
DownloadEBISampleAccessions),
(r"/download_ebi_accessions/experiments/(.*)",
DownloadEBIPrepAccessions),
(r"/download_upload/(.*)", DownloadUpload),
(r"/release/download/(.*)", DownloadRelease),
(r"/public_download/", DownloadPublicHandler),
(r"/vamps/(.*)", VAMPSHandler),
(r"/redbiom/(.*)", RedbiomPublicSearch),
(r"/iframe/", IFrame),
# Plugin handlers - the order matters here so do not change
# qiita_db/jobs/(.*) should go after any of the
# qiita_db/jobs/(.*)/XXXX because otherwise it will match the
# regular expression and the qiita_db/jobs/(.*)/XXXX will never
# be hit.
(r"/qiita_db/authenticate/", TokenAuthHandler),
(r"/qiita_db/jobs/(.*)/heartbeat/", HeartbeatHandler),
(r"/qiita_db/jobs/(.*)/step/", ActiveStepHandler),
(r"/qiita_db/jobs/(.*)/complete/", CompleteHandler),
(r"/qiita_db/jobs/(.*)", JobHandler),
(r"/qiita_db/artifacts/types/", ArtifactTypeHandler),
(r"/qiita_db/artifacts/(.*)/", ArtifactHandler),
(r"/qiita_db/users/", UsersListDBHandler),
(r"/qiita_db/user/(.*)/data/", UserInfoDBHandler),
(r"/qiita_db/sample_information/(.*)/data/", SampleInfoDBHandler),
(r"/qiita_db/prep_template/(.*)/data/", PrepTemplateDataHandler),
(r"/qiita_db/prep_template/(.*)/", PrepTemplateDBHandler),
(r"/qiita_db/references/(.*)/", ReferenceHandler),
(r"/qiita_db/plugins/(.*)/(.*)/commands/(.*)/activate/",
CommandActivateHandler),
(r"/qiita_db/plugins/(.*)/(.*)/commands/(.*)/", CommandHandler),
(r"/qiita_db/plugins/(.*)/(.*)/commands/", CommandListHandler),
(r"/qiita_db/plugins/(.*)/(.*)/", PluginHandler),
(r"/qiita_db/analysis/(.*)/metadata/", APIAnalysisMetadataHandler),
(r"/qiita_db/archive/observations/", APIArchiveObservations)
]
# rest endpoints
handlers.extend(REST_ENDPOINTS)
if qiita_config.portal == "QIITA":
# Add portals editing pages only on main portal
portals = [
(r"/admin/portals/studies/", StudyPortalHandler),
(r"/admin/portals/studiesAJAX/", StudyPortalAJAXHandler)
]
handlers.extend(portals)
if is_test_environment():
# We add the endpoints for testing plugins
test_handlers = [
(r"/apitest/processing_job/", ProcessingJobAPItestHandler),
(r"/apitest/reset/", ResetAPItestHandler),
(r"/apitest/prep_template/", PrepTemplateAPItestHandler),
(r"/apitest/artifact/", ArtifactAPItestHandler),
(r"/apitest/reload_plugins/", ReloadPluginAPItestHandler)
]
handlers.extend(test_handlers)
# 404 PAGE MUST BE LAST IN THIS LIST!
handlers.append((r".*", NoPageHandler))
settings = {
"template_path": TEMPLATE_PATH,
"debug": DEBUG,
"cookie_secret": qiita_config.cookie_secret,
"login_url": "%s/auth/login/" % qiita_config.portal_dir,
}
tornado.web.Application.__init__(self, handlers, **settings)
|
"""
__author__ = Hagai Hargil
"""
import attr
import numpy as np
import pandas as pd
from attr.validators import instance_of
from pysight.nd_hist_generator.movie import Movie, FrameChunk
from collections import deque, namedtuple
from typing import Tuple, Union
from numba import jit, uint8, int64
@attr.s(slots=True)
class CensorCorrection(object):
raw = attr.ib(validator=instance_of(dict))
data = attr.ib(validator=instance_of(pd.DataFrame))
movie = attr.ib(validator=instance_of(Movie))
all_laser_pulses = attr.ib()
nano_flim_list = attr.ib(init=False)
flim = attr.ib(default=False, validator=instance_of(bool))
reprate = attr.ib(default=80e6, validator=instance_of(float))
binwidth = attr.ib(default=800e-12, validator=instance_of(float))
laser_offset = attr.ib(default=3.5, validator=instance_of(float))
num_of_channels = attr.ib(default=1, validator=instance_of(int))
@property
def bins_bet_pulses(self) -> int:
return int(np.ceil(1 / (self.reprate * self.binwidth)))
@property
def offset(self):
return int(np.floor(self.laser_offset * 10 ** -9 / self.binwidth))
def run(self):
"""
Main pipeline for the censor correction part.
"""
if self.flim:
print("Starting the censor correction...")
self.create_arr_of_hists_deque()
else:
print("FLIM deactivated, no censor correction performed.")
def __gen_laser_pulses_deque(self) -> np.ndarray:
"""
If data has laser pulses - return them. Else - simulate them with an offset
"""
start_time = 0
step = self.bins_bet_pulses
volumes_in_movie = self.movie.gen_of_volumes()
if (
self.all_laser_pulses == 0 and self.flim == False
): # no 'Laser' data was recorded
for vol in volumes_in_movie:
yield np.arange(
start=start_time + self.offset,
stop=vol.end_time,
step=step,
dtype=np.uint64,
)
elif self.all_laser_pulses == 0 and self.flim == True:
pass
else:
for vol in volumes_in_movie:
yield self.all_laser_pulses[
(self.all_laser_pulses >= vol.abs_start_time - step)
& (self.all_laser_pulses <= vol.end_time + step)
] + self.offset
def __get_bincount_deque(self):
print("Movie object created. Generating the bincount deque...")
bincount_deque = deque()
laser_pulses_deque = self.__gen_laser_pulses_deque()
volumes_in_movie = self.movie.gen_of_volumes()
for idx, vol in enumerate(volumes_in_movie):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
laser_pulses=next(laser_pulses_deque),
)
dig, bincount = censored.gen_bincount()
pos_idx = np.where(dig >= 0)[0]
dig = dig[pos_idx]
pos_photons = censored.df.iloc[pos_idx, -1].values.T
if len(pos_photons) == 0:
data_dict = {
"photon_hist": np.zeros((self.bins_bet_pulses, 1), dtype=np.uint8),
"bincount": bincount,
"num_empty_hists": bincount[0],
}
return data_dict
photon_hist = np.zeros(
(self.bins_bet_pulses, pos_photons.shape[0]), dtype=np.uint8
)
for laser_idx, photon in enumerate(np.nditer(pos_photons)):
start_time = censored.laser_pulses[dig[laser_idx]]
try:
end_time = censored.laser_pulses[dig[laser_idx] + 1]
except IndexError: # photons out of laser pulses
continue
else:
photon_hist[:, laser_idx] = np.histogram(
photon, bins=np.arange(start_time, end_time + 1, dtype="uint64")
)[0]
data_dict = {
"photon_hist": photon_hist,
"bincount": bincount,
"num_empty_hists": bincount[0],
}
assert (
data_dict["num_empty_hists"] >= 0
), "Sum of bincount: {}, number of photons: {}".format(
sum(bincount), laser_idx
)
bincount_deque.append(data_dict)
return bincount_deque
def find_temporal_structure_deque(self):
temp_struct_deque = deque()
laser_pulses_deque = self.__gen_laser_pulses_deque()
volumes_in_movie = self.movie.gen_of_volumes()
for idx, vol in enumerate(volumes_in_movie):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
laser_pulses=next(laser_pulses_deque),
binwidth=self.binwidth,
reprate=self.reprate,
)
temp_struct_deque.append(censored.find_temp_structure())
return temp_struct_deque
def __worker_arr_of_hists(self, vol):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
binwidth=self.binwidth,
reprate=self.reprate,
)
return censored.gen_arr_of_hists()
def create_arr_of_hists_deque(self):
"""
For each volume generate a single matrix with the same size as the underlying volume,
which contains a histogram of photons in their laser pulses for each pixel.
:return: deque() that contains an array of histograms in each place
"""
self.nano_flim_list = [] # each cell contains a different data channel
for chan in range(1, self.num_of_channels + 1):
print("Starting channel number {}: ".format(chan))
volumes_in_movie = self.movie.gen_of_volumes(channel_num=chan)
self.nano_flim_list.append(
[self.__worker_arr_of_hists(vol) for vol in volumes_in_movie]
)
def create_array_of_hists_deque(self):
"""
Go through each volume in the deque and find the laser pulses for each pixel, creating a summed histogram per pixel.
:return:
"""
temp_struct_deque = deque()
laser_pulses_deque = self.__gen_laser_pulses_deque()
volumes_in_movie = self.movie.gen_of_volumes()
for idx, vol in enumerate(volumes_in_movie):
censored = CensoredVolume(
df=vol.data,
vol=vol,
offset=self.offset,
laser_pulses=next(laser_pulses_deque),
binwidth=self.binwidth,
reprate=self.reprate,
)
temp_struct_deque.append(censored.gen_array_of_hists())
return temp_struct_deque
def __gen_labels(self, size: int, label: Union[int, float]) -> np.ndarray:
"""
Create labels for the ML algorithm. Label value must be an integer.
:size: Number of elements
:return: np.ndarray
"""
if isinstance(label, int): # fixed power during the session
return np.ones(size, dtype=np.uint8) * label
elif isinstance(
label, float
): # `label` contains the frequency of the triangular wave
pass
def learn_histograms(
self, label: Union[int, float], power: int, folder_to_save: str
):
"""
Implement the machine learning algorithm on the data.
:param label: Label of ML algorithm.
:param power: How much power was injected to the Qubig. For saving the file.
:return: data, labels
"""
from sklearn import svm, metrics
import pathlib
# Start by generating the data and arranging it properly for the machine
bincount = self.__get_bincount_deque()
print("Bincount done. Adding all data to a single matrix.")
data = np.empty((self.bins_bet_pulses, 0))
for vol in bincount:
data = np.concatenate(
(data, vol["photon_hist"]), axis=1
) # the histograms with photons in them
data = np.concatenate(
(
data,
np.zeros(
(self.bins_bet_pulses, vol["num_empty_hists"]), dtype=np.uint8
),
),
axis=1,
) # empty hists
data = data.T
n_samples = data.shape[0]
labels = self.__gen_labels(n_samples, label)
classifier = svm.SVC(gamma=0.001)
labels[1] = 10 # toying around
print("Fitting the data...")
classifier.fit(data[: n_samples // 2], labels[: n_samples // 2])
# Predictions
expected = labels[n_samples // 2 :]
predicted = classifier.predict(data[n_samples // 2 :])
print("Number of samples is %s." % n_samples)
print(
"Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted))
)
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
# Save the data for future use
folder_as_path = pathlib.Path(folder_to_save)
filename = str(folder_as_path / "{}p_label_{}.npy".format(power, label))
self.__save_data(data=data, filename=filename)
return data, labels
def __save_data(self, data: np.ndarray, filename: str):
"""
Save the data array for future training.
:param data: Data to be learnt.
:param filename: Including dir
:return:
"""
print("Saving to {}...".format(filename))
with open(filename, "wb") as f:
np.save(f, data)
def append_laser_line(self):
"""
Add a final laser line to the laser signal input.
"""
last_laser_row = pd.DataFrame(
{
"abs_time": self.raw["Laser"]["abs_time"].iat[-1]
+ self.bins_bet_pulses,
"edge": 0,
"sweep": self.raw["Laser"]["sweep"].iat[-1],
"time_rel_sweep": self.raw["Laser"]["time_rel_sweep"].iat[-1]
+ self.bins_bet_pulses,
},
index=[self.raw["Laser"].shape[0]],
)
self.raw["Laser"] = pd.concat([self.raw["Laser"], last_laser_row])
def train_dataset(self):
"""
Using almost raw data, allocate photons to their laser pulses
(instead of laser pulses to photons) and create all 16 bit words for the ML algorithm.
:return:
"""
# Append a fake laser pulse to retain original number of "bins"
self.append_laser_line()
sorted_indices = pd.cut(
self.raw["PMT1"]["abs_time"],
bins=self.raw["Laser"]["abs_time"],
labels=self.raw["Laser"].iloc[:-1, 3],
include_lowest=True,
)
self.raw["Laser"].set_index(
keys="time_rel_sweep", inplace=True, append=True, drop=True
)
num_of_pos_bins = 22
new_bins = np.arange(-10, num_of_pos_bins + 1) # 32 bins
min_time_after_sweep = 10
max_time_after_sweep = (
self.raw["Laser"]["time_rel_sweep"].max() - num_of_pos_bins - 1
)
indices = np.arange(min_time_after_sweep, max_time_after_sweep)
hist_df = pd.DataFrame([], dtype=object)
for idx in indices:
cur_pulse = self.raw["Laser"].xs(
idx, level="time_rel_sweep", drop_level=False
)
@attr.s(slots=True)
class CensoredVolume(object):
df = attr.ib(validator=instance_of(pd.DataFrame))
chunk = attr.ib(validator=instance_of(FrameChunk))
offset = attr.ib(validator=instance_of(int))
binwidth = attr.ib(default=800e-12)
reprate = attr.ib(default=80e6)
@property
def bins_bet_pulses(self) -> int:
return int(np.ceil(1 / (self.reprate * self.binwidth)))
def gen_bincount(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Bin the photons into their relative laser pulses, and count how many photons arrived due to each pulse.
"""
hist, _ = np.histogram(
self.chunk.data.loc[:, "time_rel_frames"].values, bins=self.laser_pulses
)
dig = (
np.digitize(
self.chunk.data.loc[:, "time_rel_frames"].values, bins=self.laser_pulses
)
- 1
)
return dig, np.bincount(hist)
def find_temp_structure(self) -> np.ndarray:
"""
Generate a summed histogram of the temporal structure of detected photons.
"""
bins = np.arange(
start=0, stop=np.ceil(1 / (self.reprate * self.binwidth)) + 1, step=1
)
subtracted_times, _, _ = self.sort_photons_in_pulses()
hist, _ = np.histogram(subtracted_times, bins=bins)
return hist
def sort_photons_in_pulses(self):
"""
Helper function to generate a searchsorted output of photons in laser pulses.
"""
pulses = self.laser_pulses
sorted_indices: np.ndarray = np.searchsorted(
pulses, self.movie.data["time_rel_frames"].values
) - 1
array_of_laser_starts = pulses[sorted_indices]
subtracted_times = (
self.movie.data["time_rel_frames"].values - array_of_laser_starts
)
return subtracted_times, array_of_laser_starts, sorted_indices
def gen_array_of_hists(self) -> np.ndarray:
"""
For a specific frame, sort photons and laser pulses inside the pixels to gain
statistics on the distribution of the photons inside the pixels.
:return: np.ndarray of the same size as the original image. Each pixels contains
a histogram inside it.
"""
BinData = namedtuple("BinData", ("hist", "pulses", "photons"))
all_pulses = 0
all_photons = 0
hist, edges = self.chunk.create_hist()
# Create a relative timestamp to the line signal for each laser pulse
sorted_pulses = np.searchsorted(edges[0][:-1], self.laser_pulses) - 1
pulses = pd.DataFrame(
data=self.laser_pulses[np.where(sorted_pulses >= 0)[0]],
columns=["time_rel_frames"],
)
pulses = pulses.assign(
Lines=edges[0][:-1][sorted_pulses[np.where(sorted_pulses >= 0)[0]]]
)
pulses.dropna(how="any", inplace=True)
pulses.loc[:, "Lines"] = pulses.loc[:, "Lines"].astype("uint64")
pulses.loc[:, "time_rel_line"] = (
pulses.loc[:, "time_rel_frames"] - pulses.loc[:, "Lines"]
)
pulses.loc[:, "Lines"] = pulses.loc[:, "Lines"].astype("category")
pulses.set_index(keys=["Lines"], inplace=True, append=True, drop=True)
# Allocate laser pulses and photons to their bins
pulses.loc[:, "bins_x"] = (
np.digitize(pulses.loc[:, "time_rel_frames"].values, bins=edges[0]) - 1
).astype("uint16", copy=False)
pulses.loc[:, "bins_y"] = (
np.digitize(pulses.loc[:, "time_rel_line"].values, bins=edges[1]) - 1
).astype("uint16", copy=False)
self.chunk.data.loc[:, "bins_x"] = (
np.digitize(self.chunk.data.loc[:, "time_rel_frames"].values, bins=edges[0])
- 1
).astype("uint16", copy=False)
self.chunk.data.loc[:, "bins_y"] = (
np.digitize(self.chunk.data.loc[:, "time_rel_line"].values, bins=edges[1])
- 1
).astype("uint16", copy=False)
pulses.set_index(
keys=["bins_x", "bins_y"], inplace=True, append=True, drop=True
)
self.chunk.data.set_index(
keys=["bins_x", "bins_y"], inplace=True, append=True, drop=True
)
# Go through each bin and histogram the photons there
image_bincount = np.zeros_like(hist, dtype=object)
for row in range(self.chunk.x_pixels):
row_pulses = pulses.xs(key=row, level="bins_x", drop_level=False)
assert len(row_pulses) > 0, "Row {} contains no pulses".format(row)
try:
row_photons = self.chunk.data.xs(
key=row, level="bins_x", drop_level=False
)
except KeyError: # no photons in row
for col in range(self.chunk.y_pixels):
final_pulses = row_pulses.xs(
key=col, level="bins_y", drop_level=False
)
hist = (
np.histogram(
np.array([]),
bins=final_pulses.loc[:, "time_rel_line"].values,
)[0]
).astype("uint8")
cur_bincount = np.bincount(hist).astype("uint64", copy=False)
tot_pulses = np.sum(cur_bincount)
tot_photons = np.average(
cur_bincount, weights=range(len(cur_bincount))
)
image_bincount[row, col] = BinData(
hist=hist, pulses=tot_pulses, photons=tot_photons
)
all_photons += tot_photons
all_pulses += tot_pulses
else:
for col in range(self.movie.y_pixels):
final_pulses = row_pulses.xs(
key=col, level="bins_y", drop_level=False
)
assert (
len(final_pulses) > 0
), "Column {} in row {} contains no pulses".format(col, row)
try:
final_photons = row_photons.xs(
key=col, level="bins_y", drop_level=False
)
except KeyError: # no photons in col
hist = (
np.histogram(
np.array([]),
bins=final_pulses.loc[:, "time_rel_line"].values,
)[0]
).astype("uint8", copy=False)
else:
# hist = (np.histogram(final_photons.loc[:, 'time_rel_line'].values,
# bins=final_pulses.loc[:, 'time_rel_line'].values)[0])\
# .astype('uint8', copy=False)
hist = numba_histogram(
final_photons.loc[:, "time_rel_line"].values,
bins=final_pulses.loc[:, "time_rel_line"].values,
).astype("uint8", copy=False)
finally:
if not np.all(hist >= 0):
print("WHAT IS GOING ON")
assert np.all(
hist >= 0
), "In row {}, column {}, the histogram turned out to be negative.".format(
row, col
)
cur_bincount = numba_bincount(hist).astype("uint64", copy=False)
tot_pulses = np.sum(cur_bincount)
tot_photons = np.average(
cur_bincount, weights=range(len(cur_bincount))
)
image_bincount[row, col] = BinData(
hist=hist, pulses=tot_pulses, photons=tot_photons
)
all_photons += tot_photons
all_pulses += tot_pulses
return image_bincount, all_pulses, all_photons
def gen_arr_of_hists(self) -> np.ndarray:
"""
For each specific frame, sort the photons inside the pixels to gqin
statistics on the distribution of the photons inside the pixels.
:return: np.ndarray of the same size as the original frame,
with each bin containing a histogram.
"""
edges = self.chunk._Volume__create_hist_edges()[0]
hist_t, edges_t = self.chunk.create_hist()
assert (
"time_rel_pulse" in self.chunk.data.columns
), "No `time_rel_pulse` column in data."
self.chunk.data.loc[:, "bins_x"] = (
np.digitize(self.chunk.data.loc[:, "time_rel_frames"].values, bins=edges[0])
- 1
).astype("uint16", copy=False)
self.chunk.data.loc[:, "bins_y"] = (
np.digitize(self.chunk.data.loc[:, "time_rel_line"].values, bins=edges[1])
- 1
).astype("uint16", copy=False)
self.chunk.data.set_index(
keys=["bins_x", "bins_y"], inplace=True, append=True, drop=True
)
self.chunk.data.drop(len(edges[0]) - 1, level="bins_x", inplace=True)
self.chunk.data.drop(len(edges[1]) - 1, level="bins_y", inplace=True)
image_bincount = np.zeros(
(len(edges[0]) - 1, len(edges[1]) - 1), dtype=object
) # returned variable, contains hists
active_lines = np.unique(self.chunk.data.index.get_level_values("bins_x"))
for row in active_lines:
print("Row number: {}".format(row))
row_photons = self.chunk.data.xs(key=row, level="bins_x", drop_level=False)
rel_idx = np.unique(row_photons.index.get_level_values("bins_y"))
image_bincount[row, rel_idx] = self.__allocate_photons_to_bins(
idx=rel_idx, photons=row_photons
)
return image_bincount
def __allocate_photons_to_bins(self, idx: np.ndarray, photons: pd.DataFrame):
"""
Generate a summed-histogram of photons for the relevant edges.
:param photons:
:param edge_x:
:param edge_y:
:return:
"""
hist_storage = np.zeros_like(idx, dtype=object)
for cur_idx, col in enumerate(idx):
cur_photons = photons.xs(key=col, level="bins_y", drop_level=False)
hist_storage[cur_idx] = numba_histogram(
cur_photons.loc[:, "time_rel_pulse"].values,
bins=np.arange(0, self.bins_bet_pulses + 1, dtype=np.uint8),
).astype("uint8", copy=False)
return hist_storage
@jit((int64[:](uint8[:], uint8[:])), nopython=True, cache=True)
def numba_histogram(arr: np.array, bins) -> np.array:
return np.histogram(arr, bins)[0]
@jit((int64[:](uint8[:])), nopython=True, cache=True)
def numba_bincount(arr: np.array) -> np.array:
return np.bincount(arr)
#
# @jit((int64[:](uint64[:], uint64[:])), nopython=True, cache=True)
# def numba_digitize(arr: np.array) -> np.array:
# pass
|
from django.db import models
class Story(models.Model):
name = models.CharField(max_length=150)
def __str__(self):
return self.name
class Page(models.Model):
story = models.ForeignKey(Story, on_delete=models.CASCADE)
text = models.TextField()
title = models.CharField(max_length=150)
image = models.ImageField(null=True, blank=True)
reason = models.CharField(max_length=140, null=True,blank=True)
def __str__(self):
return self.title
class Option(models.Model):
choice = models.CharField(max_length=150)
page = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='current_page')
nextpage = models.ForeignKey(Page, on_delete=models.CASCADE, related_name='next_page')
def __str__(self):
return self.choice
|
#!/usr/bin/env python
import rospy
import math
from andrewbot_msgs.msg import RobotCommand
from geometry_msgs.msg import Twist
from base_driver import BaseDriver
robot_base_pub = rospy.Publisher('andrewbot/RobotCommand', RobotCommand, queue_size=10)
robot_base = BaseDriver()
robot_command = RobotCommand()
def sent_robot_command(command):
robot_command.command = command
robot_base_pub.publish(robot_command)
def move_base(linear, angular):
command = robot_base.rotate_base(angular)
if command is not None:
sent_robot_command(command)
command = robot_base.translate_base(linear)
if command is not None:
sent_robot_command(command)
def move_base_callback(base_movement):
move_base(base_movement.linear.x, base_movement.angular.z)
if __name__ == '__main__':
try:
rospy.init_node('andrewbot_base_control', anonymous=True)
rospy.Subscriber("andrewbot/BaseCommand", Twist, move_base_callback)
rate = rospy.Rate(100) # 10 ms
while not rospy.is_shutdown():
rate.sleep()
except rospy.ROSInterruptException:
rospy.loginfo("node terminated.")
|
# Allow conans to import ConanFile from here
# to allow refactors
from conans.model.conan_file import ConanFile
from conans.model.options import Options
from conans.model.settings import Settings
from conans.client.build.cmake import CMake
from conans.client.build.meson import Meson
from conans.client.build.gcc import GCC
from conans.client.build.configure_environment import ConfigureEnvironment
from conans.client.build.autotools_environment import AutoToolsBuildEnvironment
from conans.client.build.visual_environment import VisualStudioBuildEnvironment
from conans.client.run_environment import RunEnvironment
from conans.util.files import load
# complex_search: With ORs and not filtering by not restricted settings
COMPLEX_SEARCH_CAPABILITY = "complex_search"
SERVER_CAPABILITIES = [COMPLEX_SEARCH_CAPABILITY, ]
__version__ = '0.30.0-dev'
|
import doctest
from asyncio import gather, run
from math import sqrt
from async_lru import alru_cache
@alru_cache(maxsize=256)
async def fib(digit: int) -> int:
"""
>>> run(fib(10))
55
>>> run(fib(20))
6765
>>> run(fib(1))
1
"""
if digit == 0:
return 0
elif digit == 1:
return 1
else:
return await fib(digit - 1) + await fib(digit - 2)
async def is_prime(digit: int) -> bool:
"""
>>> run(is_prime(70))
False
>>> run(is_prime(2))
True
"""
prime_flag = 0
if digit <= 1:
return False
for i in range(2, int(sqrt(digit)) + 1):
if digit % i == 0:
prime_flag = 1
break
return prime_flag == 0
async def main(input_n: int) -> None:
"""
>>> run(main(10))
10th fibo series is 55
10 is not a prime number
>>> run(main(20))
20th fibo series is 6765
20 is not a prime number
"""
try:
ret_fib, ret_prime = (
await gather(fib(input_n), is_prime(input_n))
if input_n >= 0
else (None, await is_prime(input_n))
)
print(f"{input_n}th fibo series is", ret_fib) if ret_fib else print(
"Invalid input"
)
print(f"{input_n} is a prime number") if ret_prime else print(
f"{input_n} is not a prime number"
)
except ValueError:
print("Invalid input!")
if __name__ == "__main__":
doctest.testmod(verbose=True)
try:
n = int(input("Enter a number: "))
run(main(n))
except ValueError:
print("Input must an integer!")
|
from telethon import TelegramClient, sync, events
import random
import time
import threading
import asyncio
import utils
# start client with config file
params = utils.Params()
params.start_client()
print("Client runs...")
# set target users to respond
params.convert_str_targets_to_id()
params.set_targets_usernames()
# send message to target user
async def send_next_message(target_index, message_obj):
global params
target_id = params.targets[target_index]
target_username = params.targets_usernames[target_index]
message_text = params.get_message_to_send(target_index)
await params.client.send_message(target_id, message_text)
print("[msg_id: {0:d}] Response \"{1:s}\" sent to \"{2:s}\" ({3:d})".format(
message_obj.id, message_text, target_username, target_id))
# conditions for aborting response
def abort_response(target_index, message_obj):
global params
# if the message is already manually read
# to do later ...
# if the message is the last in chat
if (message_obj.id != params.last_message_id[target_index]):
print("[msg_id: {0:d}] But it is not the last".format(message_obj.id))
return True
return False
# newly received message event
@params.client.on(events.NewMessage(chats=tuple(params.targets)))
async def normal_handler(event):
global params
for i in range(len(params.targets)):
# set message metas
cur_user_id = event.message.peer_id.user_id
cur_msg_id = event.message.id
if (cur_user_id == params.targets[i] and
event.message.out == False and
event.message.media != None and
event.message.sticker == None):
# remember the last received message
params.last_message_id[i] = cur_msg_id
print("[msg_id: {0:d}] New meme received from \"{1:s}\" ({2:d})".format(
cur_msg_id, params.targets_usernames[i], cur_user_id))
# сheck for abort
if abort_response(i, event.message) == True: return
# check for nite mode
if (params.is_night() == True):
print("[msg_id: {0:d}] It is night. Sleeping...".format(cur_msg_id))
while (params.is_night() == True):
await asyncio.sleep(60)
# wait randomized time before sending response
await asyncio.sleep(random.randint(
params.response_delay[0], params.response_delay[1]))
# check for abort
if abort_response(i, event.message) == True: return
# finally sending the message
await event.message.mark_read()
await send_next_message(i, event.message)
break
# run client in loop
params.client.run_until_disconnected()
print("Client stops.")
|
from .redis import RedisInterface
from .utils import calc_ttl, parse_rate
from typing import Optional
class RateLimit:
def __init__(self, redis: RedisInterface):
self.redis = redis.pool
self.encoding = redis._encoding
async def _handling(
self, rate: str, key: str, value: str, incr: bool, incrby: int = 1
) -> Optional[dict]:
count, seconds = parse_rate(rate)
redis_key = f"[{key}][{value}][{count}/{seconds}]"
current = await self.redis.get(redis_key, encoding=self.encoding)
if current:
current = int(current)
if current >= count:
return {
"ratelimited": True,
"data": {
"limit": count,
"remaining": count - (current if current else 0),
"reset": calc_ttl(await self.redis.ttl(redis_key)),
},
}
if incr:
value = await self.redis.incrby(redis_key, incrby)
if value == 1:
await self.redis.expire(redis_key, seconds)
return {
"ratelimited": False,
"data": {
"limit": count,
"remaining": count - (current if current else 0),
"reset": calc_ttl(await self.redis.ttl(redis_key)),
},
}
async def set(self, rate: str, key: str, value: str, incrby: int = 1) -> None:
await self._handling(rate, key, value, incr=True, incrby=incrby)
async def get(self, rate: str, key: str, value: str) -> dict:
return await self._handling(rate, key, value, incr=False)
async def set_and_get(self, rate: str, key: str, value: str, incrby: int = 1) -> dict:
return await self._handling(rate, key, value, incr=True, incrby=incrby)
async def delete(self, rate: str, key: str, value: str) -> None:
count, seconds = parse_rate(rate)
redis_key = f"[{key}][{value}][{count}/{seconds}]"
await self.redis.delete(redis_key)
|
import sys
from collections import Counter
def has_conflict(c1, c2):
"""Return True if two rows of character table conflict."""
hasConflict = True
s1 = set(i for i, c in enumerate(c1) if c == '1')
s1c = set(i for i, c in enumerate(c1) if c == '0')
s2 = set(i for i, c in enumerate(c2) if c == '1')
s2c = set(i for i, c in enumerate(c2) if c == '0')
for a in [s1, s1c]:
for b in [s2, s2c]:
if len(a.intersection(b)) == 0:
hasConflict = False
return hasConflict
if __name__ == "__main__":
'''
Given: An inconsistent character table C on at most 100 taxa.
Return: A submatrix of C′ representing a consistent character table on the same taxa and formed by deleting a single
row of C. (If multiple solutions exist, you may return any one.)
'''
character_table = sys.stdin.read().splitlines()
conflictCount = Counter()
for i in range(len(character_table)):
for j in range(i + 1, len(character_table)):
if has_conflict(character_table[i], character_table[j]):
conflictCount[i] += 1
conflictCount[j] += 1
skipIndex = max(conflictCount, key=lambda x: conflictCount[x])
for i in range(len(character_table)):
if i != skipIndex:
print(character_table[i])
|
from selene.api import * |
N, M = map(int, input().split())
S = [ list(input()) for i in range(N)]
ans = 0
U = [ [0 for _ in range(M)] for _ in range(N)]
D = [ [0 for _ in range(M)] for _ in range(N)]
L = [ [0 for _ in range(M)] for _ in range(N)]
R = [ [0 for _ in range(M)] for _ in range(N)]
for i in range(N):
cnt = 0
for j in range(M):
if S[i][j] == '#':
cnt = 0
else:
L[i][j] = cnt
cnt += 1
cnt = 0
for j in range(M-1,-1,-1):
if S[i][j] == '#':
cnt = 0
else:
R[i][j] = cnt
cnt += 1
for j in range(M):
cnt = 0
for i in range(N):
if S[i][j] == '#':
cnt = 0
else:
D[i][j] = cnt
cnt += 1
cnt = 0
for i in range(N-1,-1,-1):
if S[i][j] == '#':
cnt = 0
else:
U[i][j] = cnt
cnt += 1
for i in range(N):
for j in range(M):
ans += (L[i][j] + R[i][j])*(U[i][j]+D[i][j])
print(ans)
|
#!/usr/bin/python
#\file inheritance_two.py
#\brief inheritance from two classes
#\author Akihiko Yamaguchi, info@akihikoy.net
#\version 0.1
#\date Jan.06, 2016
'''Define a space.
min and max should be list or None. '''
class TSpaceDef(object):
def __init__(self,dim=0,min=None,max=None):
self.D= dim
self.Min= min
self.Max= max
@property
def Bounds(self):
return [self.Min if self.Min is not None else [], self.Max if self.Max is not None else []]
'''Define a discrete action (selection) space. '''
class TSelectDef(object):
def __init__(self,num=None):
self.N= num
'''Define a composite space.
'state': state space.
'action': (continuous) action space.
'select': discrete action space (selection).
'''
class TCompSpaceDef(TSpaceDef,TSelectDef):
def __init__(self,type=None,dim=0,min=None,max=None,num=0):
self.Type= type
if self.Type in ('state','action'):
TSpaceDef.__init__(self,dim=dim,min=min,max=max)
TSelectDef.__init__(self)
elif self.Type==('select'):
TSpaceDef.__init__(self,dim=1,min=0 if num>0 else None,max=num-1 if num>0 else None)
TSelectDef.__init__(self,num=num)
#Check if self.Type is type.
def Is(self,type):
return self.Type==type
if __name__=='__main__':
cspdef= TCompSpaceDef('state',2,[-1,-2],[1,2])
print 'cspdef:',cspdef.__dict__,cspdef.D,cspdef.Is('state'),cspdef.Is('action'),cspdef.Is('select'),cspdef.Bounds
cspdef= TCompSpaceDef('action',1,[-1.5],[1.5])
print 'cspdef:',cspdef.__dict__,cspdef.D,cspdef.Is('state'),cspdef.Is('action'),cspdef.Is('select'),cspdef.Bounds
cspdef= TCompSpaceDef('select',num=5)
print 'cspdef:',cspdef.__dict__,cspdef.D,cspdef.Is('state'),cspdef.Is('action'),cspdef.Is('select'),cspdef.Bounds
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The above lines tell the shell to use python as interpreter when the
# script is called directly, and that this file uses utf-8 encoding,
# because of the country specific letter in my surname.
'''
Name: prog3
Author: Martin Bo Kristensen Grønholdt.
Version: 1.0 (30/10-2016)
Program to convert square feet to acres.
'''
def main():
# Get the area in square feet
square_feet = input('Input the total square feet? ')
# Calculate the acres, by first converting the input to
# a floating point value, divide by 43560, convert this to a
# string and add it to the initial message string.
print('Acres: ' + str(float(square_feet) / 43560))
#Run this when invoked directly
if __name__ == '__main__':
main()
|
from typing import List
from .. import InvitesRepo
from ....domain.entities import Invite
class FakeInvitesRepo(InvitesRepo):
def __init__(self) -> None:
self.__invites: List[Invite] = []
def save(self, invite: Invite) -> Invite:
index = next((i for i, to_find_invite in enumerate(self.__invites) if to_find_invite.id == invite.id), None)
if index is None:
self.__invites.append(invite)
else:
self.__invites[index] = invite
return invite
|
#!/usr/bin/env python
import numpy as np
from math import *
class Atom:
def __init__(self,element,position_file,orb_pop): #define atom based on its element and a list of its valence AOs populations
#which is loaded as a list if ints, the ordering of valence oribitals
#in this list is the same as the one listed in 'ffs_sorted.dats'
self.element = element
self.posFile = position_file #see data folder for examples of position files
self.orbPop = np.array(orb_pop) #list of number of electron in each type of orbital;
#orbPop[0] is the core 'population', i.e. either 0 or 1 (switch on/off)
def getAOparams(self): #obtain Gaussian fit parameters for aspherical electron scattering of the atom's different
#valence orbitals
with open('../data/ffs_sorted.dat','r') as fo:
self.AOparams = []
flines = fo.readlines()
for line in flines:
if line.split()[0]==self.element:
self.AOparams.append(map(float,line.split()[3:])) #creates an array of the form[[a params for spherical FF],[b params for spherical FF],
#[a's for core],[b's for core],...[a's for last AO],[b's for the last AO]]
self.AOparams = np.array(self.AOparams)
def getXYZ(self):
with open(self.posFile,'r') as fo:
flines = fo.readlines()
self.XYZ = np.zeros((len(flines),3),dtype=float)
counter = 0
for line in flines:
xyz = np.array(map(float,line.split()))
self.XYZ[counter] = xyz
counter+=1
def getSFs(self): #calculates the atomic spherical structure
s_grid = np.array([0.01*k for k in range(101)])
self.SFs = np.zeros(101, dtype=float)
for k in range(8):
self.SFs = np.add(self.AOparams[0][k]*np.exp(-1.0*self.AOparams[1][k]*np.square(s_grid)), self.SFs)
def getSFa(self): #calculates the aspherical structure factor for atom in question
s_grid = np.array([0.01*k for k in range(101)])
dim = len(self.orbPop)
if dim == 2: #dealing with only s AO; same as calculation spherically symmetric case
FF_core = np.zeros(101,dtype=float)
FF_s = np.zeros(101,dtype=float)
for k in range(8):
FF_core = np.add(self.AOparams[2][k]*np.exp(-self.AOparams[3][k]*np.square(s_grid)), FF_core) #
FF_s = np.add(self.AOparams[4][k]*np.exp(-1.0*self.AOparams[5][k]*np.square(s_grid)), FF_s)
self.SFa = [FF_core,FF_s]
elif dim == 4: #dealing with s, p0, and p1 AOs
FF_core = np.zeros(101,dtype=float)
FF_s = np.zeros(101,dtype=float)
FF_p0 = np.zeros(101,dtype=float)
ff_p1 = np.zeros(101,dtype=float)
FF_p1 = np.zeros((101,101),dtype=float)
for k in range(8):
FF_core = np.add(self.AOparams[2][k]*np.exp(-1.0*self.AOparams[3][k]*np.square(s_grid)), FF_core)
FF_s = np.add(self.AOparams[4][k]*np.exp(-1.0*self.AOparams[5][k]*np.square(s_grid)), FF_s)
FF_p0 = np.add(self.AOparams[6][k]*np.exp(-1.0*self.AOparams[7][k]*np.square(s_grid)), FF_p0)
ff_p1 = np.add(self.AOparams[8][k]*np.exp(-1.0*self.AOparams[9][k]*np.square(s_grid)), ff_p1)
beta_grid = np.array([-pi+k*pi/50.0 for k in range(101)])
a_p = (3.0/2.0)*np.square(np.sin(beta_grid))
b_p = np.square(np.cos(beta_grid)) - (0.5)*np.square(np.sin(beta_grid))
for k in range(101):
FF_p1[k] = np.add(a_p*FF_p0[k],b_p*ff_p1[k])
self.SFa = [FF_core,FF_s,FF_p0,FF_p1]
elif dim == 5: #dealing with s, d1, d2, d3 AOs
FF_core = np.zeros(101,dtype=float)
FF_s = np.zeros(101,dtype=float)
ff_d1 = np.zeros(101,dtype=float)
ff_d2 = np.zeros(101,dtype=float)
ff_d2 = np.zeros(101,dtype=float)
ff_d3 = np.zeros(101,dtype=float)
FF_d1 = np.zeros((101,101),dtype=float)
FF_d2 = np.zeros((101,101),dtype=float)
FF_d3 = np.zeros((101,101),dtype=float)
for k in range(8):
FF_core = np.add(self.AOparams[2][k]*np.exp(-1.0*self.AOparams[3][k]*np.square(s_grid)), FF_core)
FF_s = np.add(self.AOparams[4][k]*np.exp(-self.AOparams[5][k]*np.square(s_grid)), FF_s)
ff_d1 = np.add(self.AOparams[6][k]*np.exp(-self.AOparams[7][k]*np.square(s_grid)), ff_d1)
ff_d2 = np.add(self.AOparams[8][k]*np.exp(-self.AOparams[9][k]*np.square(s_grid)), ff_d2)
ff_d3 = np.add(self.AOparams[10][k]*np.exp(-self.AOparams[11][k]*np.square(s_grid)), ff_d3)
beta_grid = np.array([-pi+k*(pi/50.0) for k in range(101)])
a_d1 = (1.0/4.0) + 1.50*np.square(np.cos(beta_grid)) + (9.0/4.0)*np.power(np.cos(beta_grid),4)
b_d1 = 3.0*(np.square(np.cos(beta_grid)) - np.power(np.cos(beta_grid),4))
c_d1 = (3.0/4.0) - 1.50*np.square(np.cos(beta_grid)) + 0.75*np.power(np.cos(beta_grid),4)
a_d2 = 1.50*(np.square(np.cos(beta_grid)) - np.power(np.cos(beta_grid),4))
b_d2 = 0.50 - 1.50*np.square(np.cos(beta_grid)) + 2.0*np.power(np.cos(beta_grid),4)
c_d2 = 0.50*(1 - np.power(np.cos(beta_grid),4))
a_d3 = (3.0/8.0) - 0.75*np.square(np.cos(beta_grid)) + (3.0/8.0)*np.power(np.cos(beta_grid),4)
b_d3 = 0.50*(1.0 - np.power(np.cos(beta_grid),4))
c_d3 = (1.0/8.0) + 0.75*np.square(np.cos(beta_grid)) + (1.0/8.0)*np.power(np.cos(beta_grid),4)
for k in range(101):
FF_d1[k] = a_d1*ff_d1[k] + b_d1*ff_d2[k] + c_d1*ff_d3[k]
FF_d2[k] = a_d2*ff_d1[k] + b_d2*ff_d2[k] + c_d2*ff_d3[k]
FF_d3[k] = a_d3*ff_d1[k] + b_d3*ff_d2[k] + c_d3*ff_d3[k]
#print FF_d1[k]
self.SFa = [FF_core,FF_s,FF_d1,FF_d2,FF_d3]
else:
print("Invalid orbital population array length.\nValid lengths are 2 (core,s), 4 (core,s,p0,p1), and 5 (core,s,d1,d2,d3).\nReturning 0.\n")
self.sFa = 0
|
import os, re
################## CHANGE HERE ONLY ##################
# Type of file that you want to update
# typeFile = PinSettingsPrg,
# IncItem,
# PropertyModelConfigurationXml,
# SignalConfigurationXml,
# HalPortBridgeV2Prg,
# All
# typeFile = "HalPortBridgeV2Prg"
# Family chip
family = "S32M"
# The name of Package
all_packages = [ "S32MTV-CZ_64",
"S32MTV244C_64",
"S32MTV244C_80" ]
# The path of repository
ksdk_path = "e:/C55SDK/sdk_codebase/"
# Unix standard
unix_standard = '\r\n'
######################################################
def update_and_write_data_to_file(fFile, wdata, line_ending):
for line in wdata:
line = line.replace("\r\n", line_ending)
fFile.write(line)
######################################################
def pinsettings_create_data(mdata):
raw_data = []
array_index = []
line_index = 0
temp_line = 0
temp_pin = ""
for line in mdata:
if line.count("ElectricalFunctionalProperties()"):
array_index.append(line_index)
line_index +=1
if len(array_index) != 2:
print "Something seems wrong !!!"
else:
for i in range(0, array_index[0]):
raw_data.append(mdata[i])
for i in range(array_index[0], array_index[1]):
if mdata[i].count(" %- PT") == 1:
temp_line = i
temp_pin = mdata[i].replace(" %- ","").strip("\r\n")
if i == temp_line + 3:
raw_data.append(" %:count=%get_item_config_sequence(" + temp_pin + "_DigitalFilter,PinMuxInit)\r\n")
raw_data.append(" %include Common\GenInitConfigSequence.prg(PinMuxInit)\r\n")
raw_data.append(" %:count=%get_item_config_sequence(" + temp_pin + "_InitValue,PinMuxInit)\r\n")
raw_data.append(" %include Common\GenInitConfigSequence.prg(PinMuxInit)\r\n")
raw_data.append(mdata[i])
for i in range(array_index[1], len(mdata)):
raw_data.append(mdata[i])
return raw_data
######################################################
def incIteam_create_data(mdata):
raw_data = []
line_index = 0
temp_line = 0
for line in mdata:
line_index += 1
raw_data.append(line)
if line.count("<electrical_property name=\"Slew Rate Field\" brief_name=\"Slew Rate\" symbol_suffix=\"_SRE\" />"):
raw_data.append(" <electrical_property name=\"Digital Filter Field\" brief_name=\"Digital Filter\" symbol_suffix=\"_DigitalFilter\" />\r\n")
if line.count("<electrical_property name=\"Over-Current Field\" brief_name=\"Over-Current Enable\" symbol_suffix=\"_OCE\" />"):
raw_data.append(" <electrical_property name=\"Initial Value Field\" brief_name=\"Initial Value\" symbol_suffix=\"_InitValue\" />\r\n")
temp_data = re.search(r'^.*<Symbol>(PT\S\d*)</Symbol>.*', line, re.M|re.I)
if temp_data:
temp_pin = temp_data.group(1)
if line.count("<Name>Pull Select Field</Name>") == 1:
temp_line = line_index
if (line_index == temp_line + 12) and (temp_line != 0):
temp_line = 0
raw_data.append(" <GrupItem>\r\n")
raw_data.append(" <TPinFuncPropItem>\r\n")
raw_data.append(" <Name>Initial Value Field</Name>\r\n")
raw_data.append(" <Symbol>" + temp_pin + "_InitValue</Symbol>\r\n")
raw_data.append(" <Hint>Initial Value Control</Hint>\r\n")
raw_data.append(" <ItemLevel>BASIC</ItemLevel>\r\n")
raw_data.append(" <EditLine>true</EditLine>\r\n")
raw_data.append(" <DefaultValue><Automatic></DefaultValue>\r\n")
raw_data.append(" <CanDelete>true</CanDelete>\r\n")
raw_data.append(" <IconPopup>true</IconPopup>\r\n")
raw_data.append(" <SortStyle>alpha used eicons</SortStyle>\r\n")
raw_data.append(" <PinFunctionalPropertyId>InitValue</PinFunctionalPropertyId>\r\n")
raw_data.append(" <ForcePinName>" + temp_pin + "</ForcePinName>\r\n")
raw_data.append(" </TPinFuncPropItem>\r\n")
raw_data.append(" </GrupItem>\r\n")
raw_data.append(" <GrupItem>\r\n")
raw_data.append(" <TPinFuncPropItem>\r\n")
raw_data.append(" <Name>Digital Filter Field</Name>\r\n")
raw_data.append(" <Symbol>" + temp_pin + "_DigitalFilter</Symbol>\r\n")
raw_data.append(" <Hint>Digital Filter Control</Hint>\r\n")
raw_data.append(" <ItemLevel>BASIC</ItemLevel>\r\n")
raw_data.append(" <EditLine>true</EditLine>\r\n")
raw_data.append(" <DefaultValue><Automatic></DefaultValue>\r\n")
raw_data.append(" <CanDelete>true</CanDelete>\r\n")
raw_data.append(" <IconPopup>true</IconPopup>\r\n")
raw_data.append(" <SortStyle>alpha used eicons</SortStyle>\r\n")
raw_data.append(" <PinFunctionalPropertyId>DigitalFilter</PinFunctionalPropertyId>\r\n")
raw_data.append(" <ForcePinName>" + temp_pin + "</ForcePinName>\r\n")
raw_data.append(" </TPinFuncPropItem>\r\n")
raw_data.append(" </GrupItem>\r\n")
return raw_data
######################################################
def property_create_data(mdata): # not used anymore
raw_data = []
line_index = 0
temp_line = 0
for line in mdata:
line_index += 1
raw_data.append(line)
if line.count("</enum_property>") and mdata[line_index].count("InitValue\" caption=\"Initial Value\" description=\"Initial Value\" default=\"state_0\">"):
temp_data = re.search(r'^.*<enum_property id=\"(PORT\S)_PCR(\d*)_InitValue.*', mdata[line_index], re.M|re.I)
temp_port = temp_data.group(1)
temp_pin = temp_data.group(2)
raw_data.append(" <enum_property id=\"" + temp_port + "_PCR" + temp_pin + "_DigitalFilter\" caption=\"Digital Filter\" description=\"Digital Filter\" default=\"state_0\">\r\n")
raw_data.append(" <state id=\"state_0\" caption=\"Digital Filter is configured on the corresponding pin\" description=\"Digital Filter is only configured for digital pin\">\r\n")
raw_data.append(" <configuration>\r\n")
raw_data.append(" <const_assign register=\"" + temp_port + "_PCR" + temp_pin + "\" check_conflict=\"true\" bit_field_value=\"0\" />\r\n")
raw_data.append(" </configuration>\r\n")
raw_data.append(" </state>\r\n")
raw_data.append(" <state id=\"state_1\" caption=\"Digital Filter is configured on the corresponding pin\" description=\"Digital Filter is only configured for digital pin\">\r\n")
raw_data.append(" <configuration>\r\n")
raw_data.append(" <const_assign register=\"" + temp_port + "_PCR" + temp_pin + "\" check_conflict=\"true\" bit_field_value=\"0x1\" />\r\n")
raw_data.append(" </configuration>\r\n")
raw_data.append(" </state>\r\n")
raw_data.append(" </enum_property>\r\n")
return raw_data
######################################################
def signal_xml_create_data(mdata):
raw_data = []
line_index = 0
temp_line = 0
for line in mdata:
line_index += 1
raw_data.append(line)
if line.count("</functional_property_declaration>") and mdata[line_index].count("</functional_properties_declarations>"):
raw_data.append(" <functional_property_declaration id=\"InitValue\" name=\"Initial Value\" description=\"Initial Value\">\r\n")
raw_data.append(" <state_declaration id=\"state_0\" name=\"Low\" description=\"Low\" />\r\n")
raw_data.append(" <state_declaration id=\"state_1\" name=\"High\" description=\"High\" />\r\n")
raw_data.append(" </functional_property_declaration>\r\n")
raw_data.append(" <functional_property_declaration id=\"DigitalFilter\" name=\"Digital Filter\" description=\"Digital Filter\">\r\n")
raw_data.append(" <state_declaration id=\"state_0\" name=\"Disabled\" description=\"Disabled\" />\r\n")
raw_data.append(" <state_declaration id=\"state_1\" name=\"Enabled\" description=\"Enabled\" />\r\n")
raw_data.append(" </functional_property_declaration>\r\n")
temp_data = re.search(r'^.*<pin name=\"PT(\S)(\d*)\" coords=\".*', line, re.M|re.I)
if temp_data:
temp_port = temp_data.group(1)
temp_pin = temp_data.group(2)
if line.count("</functional_property>") and mdata[line_index].count("<functional_property id=\"PS\" default=\"state_"):
raw_data.append(" <functional_property id=\"DigitalFilter\" default=\"state_0\">\r\n")
raw_data.append(" <state id=\"state_0\">\r\n")
raw_data.append(" <configuration>\r\n")
raw_data.append(" <assign register=\"PORT" + temp_port + "_PCR" + temp_pin + "\" bit_field=\"DigitalFilter\" bit_field_mask=\"0x8\" bit_field_value=\"0\" configuration_step=\"init_PORT" + temp_port + "\"/>\r\n")
raw_data.append(" </configuration>\r\n")
raw_data.append(" </state>\r\n")
raw_data.append(" <state id=\"state_1\">\r\n")
raw_data.append(" <configuration>\r\n")
raw_data.append(" <assign register=\"PORT" + temp_port + "_PCR" + temp_pin + "\" bit_field=\"DigitalFilter\" bit_field_mask=\"0x8\" bit_field_value=\"0x1\" configuration_step=\"init_PORT" + temp_port + "\"/>\r\n")
raw_data.append(" </configuration>\r\n")
raw_data.append(" </state>\r\n")
raw_data.append(" </functional_property>\r\n")
raw_data.append(" <functional_property id=\"InitValue\" default=\"state_0\">\r\n")
raw_data.append(" <state id=\"state_0\">\r\n")
raw_data.append(" <configuration>\r\n")
raw_data.append(" <assign register=\"PORT" + temp_port + "_PCR" + temp_pin + "\" bit_field_value=\"0\" />\r\n")
raw_data.append(" </configuration>\r\n")
raw_data.append(" </state>\r\n")
raw_data.append(" <state id=\"state_1\">\r\n")
raw_data.append(" <configuration>\r\n")
raw_data.append(" <assign register=\"PORT" + temp_port + "_PCR" + temp_pin + "\" bit_field_value=\"0x1\" />\r\n")
raw_data.append(" </configuration>\r\n")
raw_data.append(" </state>\r\n")
raw_data.append(" </functional_property>\r\n")
return raw_data
######################################################
def hal_port_create_data(mdata):
raw_data = []
line_index = 0
added_digital = 0
for line in mdata:
if line.count("digitalFilter"):
added_digital = 1
break
if added_digital == 1:
print "This code is applied"
return raw_data
else:
for line in mdata:
line_index += 1
raw_data.append(line)
if line.count("%endif") and mdata[line_index].count("%if defined(initValue)"):
raw_data.append(" %define digitalFilter %get(%str_replace(instance,\"PORT\",\"PT\")%'pin'_digitalFilter, Value)\r\n")
raw_data.append(" %if defined(digitalFilter)\r\n")
raw_data.append(" %if %str_replace(digitalFilter, \"state_\", \"\") = '0'\r\n")
raw_data.append(" %append PE_G_CRI_PinSettings_Strucure_writes %>5.digitalFilter = false,\r\n")
raw_data.append(" %else\r\n")
raw_data.append(" %append PE_G_CRI_PinSettings_Strucure_writes %>5.digitalFilter = true,\r\n")
raw_data.append(" %endif\r\n")
raw_data.append(" %endif\r\n")
if line.count("%undef! clearIntFlag") and mdata[line_index].count("%undef! mux"):
raw_data.append(" %undef! digitalFilter\r\n")
return raw_data
######################################################
def Make_Pinsettings(package):
file_path_import = os.path.join(ksdk_path, "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Beans/PinSettings/Drivers/" + family + "/PinSettings_" + package + ".prg")
file_path_export = ksdk_path + "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Beans/PinSettings/Drivers/" + family + "/PinSettings_" + package + "_new.prg"
# print file_path_import
# print file_path_export
directory_export = os.path.dirname(file_path_export)
print directory_export
if not os.path.exists(directory_export):
print "File does not exist"
else:
file_import = open(file_path_import, "rb").readlines()
file_export = open(file_path_export, "wb")
local_data = pinsettings_create_data(file_import)
update_and_write_data_to_file(file_export, local_data, unix_standard)
file_export.close()
os.renames(directory_export + "/PinSettings_" + package + ".prg", directory_export + "/PinSettings_" + package + "_old.prg")
os.renames(directory_export + "/PinSettings_" + package + "_new.prg", directory_export + "/PinSettings_" + package + ".prg")
print "Done Make_Pinsettings"
def Make_IncIteam(package):
file_path_import = os.path.join(ksdk_path, "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Beans/PinSettings/Inc" + package + ".item")
file_path_export = ksdk_path + "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Beans/PinSettings/Inc" + package + "_new.item"
directory_export = os.path.dirname(file_path_export)
print directory_export
if not os.path.exists(directory_export):
print "File does not exist"
else:
file_import = open(file_path_import, "rb").readlines()
file_export = open(file_path_export, "wb")
local_data = incIteam_create_data(file_import)
update_and_write_data_to_file(file_export, local_data, unix_standard)
file_export.close()
os.renames(directory_export + "/Inc" + package + ".item", directory_export + "/Inc" + package + "_old.item")
os.renames(directory_export + "/Inc" + package + "_new.item", directory_export + "/Inc" + package + ".item")
print "Done Make_IncIteam"
def Make_PropertyXml(package):
file_path_import = os.path.join(ksdk_path, "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/CPUs/" + package + "/property_model_configuration.xml")
file_path_export = ksdk_path + "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/CPUs/" + package + "/property_model_configuration_new.xml"
directory_export = os.path.dirname(file_path_export)
print directory_export
if not os.path.exists(directory_export):
print "File does not exist"
else:
file_import = open(file_path_import, "rb").readlines()
file_export = open(file_path_export, "wb")
local_data = property_create_data(file_import)
update_and_write_data_to_file(file_export, local_data, unix_standard)
file_export.close()
os.renames(directory_export + "/property_model_configuration.xml", directory_export + "/property_model_configuration_old.xml")
os.renames(directory_export + "/property_model_configuration_new.xml", directory_export + "/property_model_configuration.xml")
print "Done Make_PropertyXml"
def Make_SignalXml(package):
file_path_import = os.path.join(ksdk_path, "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/CPUs/" + package + "/signal_configuration.xml")
file_path_export = ksdk_path + "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/CPUs/" + package + "/signal_configuration_new.xml"
directory_export = os.path.dirname(file_path_export)
print directory_export
if not os.path.exists(directory_export):
print "File does not exist"
else:
file_import = open(file_path_import, "rb").readlines()
file_export = open(file_path_export, "wb")
local_data = signal_xml_create_data(file_import)
update_and_write_data_to_file(file_export, local_data, unix_standard)
file_export.close()
os.renames(directory_export + "/signal_configuration.xml", directory_export + "/signal_configuration_old.xml")
os.renames(directory_export + "/signal_configuration_new.xml", directory_export + "/signal_configuration.xml")
print "Done Make_SignalXml"
def Make_HalPort(package):
file_path_import = os.path.join(ksdk_path, "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Drivers/" + family + "/HAL_Port_Bridge_V2.prg")
directory_export = os.path.dirname(file_path_import)
file_import = open(file_path_import, "rb").readlines()
local_data = hal_port_create_data(file_import)
if len(local_data) != 0:
file_path_export = ksdk_path + "tools/pex/Repositories/SDK_RELEASE_VERSION_ID_Repository/Drivers/" + family + "/HAL_Port_Bridge_V2_new.prg"
file_export = open(file_path_export, "wb")
update_and_write_data_to_file(file_export, local_data, unix_standard)
file_export.close()
os.renames(directory_export + "/HAL_Port_Bridge_V2.prg", directory_export + "/HAL_Port_Bridge_V2_old.prg")
os.renames(directory_export + "/HAL_Port_Bridge_V2_new.prg", directory_export + "/HAL_Port_Bridge_V2.prg")
print "Done Make_HalPort"
else:
print "This file is updated already"
def Make_All(package):
Make_Pinsettings(package)
Make_IncIteam(package)
# Make_PropertyXml(package)
Make_SignalXml(package)
# Make_HalPort(package)
for pk in all_packages:
print ">>>>>>>>>>>>> Start " + pk + " <<<<<<<<<<<<<"
Make_All(pk)
print ">>>>>>>>>>>>> Finish " + pk + " <<<<<<<<<<<<<"
|
__author__ = "Micah Price"
__email__ = "98mprice@gmail.com"
import config
from keras.models import Sequential, load_model
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.utils.data_utils import get_file
from keras.optimizers import RMSprop
import praw
import numpy as np
import random
import sys
import os
import io
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
a = np.log(a) / temperature
dist = np.exp(a)/np.sum(np.exp(a))
choices = range(len(a))
return np.random.choice(choices, p=dist)
with io.open('data/comments.txt', encoding='utf-8') as f:
comment_text = f.read()
comment_words = set(comment_text.split())
comment_words = sorted(comment_words)
comment_word_indices = dict((c, i) for i, c in enumerate(comment_words))
comment_indices_word = dict((i, c) for i, c in enumerate(comment_words))
maxlen = 30
comment_list_words=comment_text.split()
comment_model = load_model('models/comments.h5')
with io.open('data/titles.txt', encoding='utf-8') as f:
title_text = f.read()
title_words = set(title_text.split())
title_words = sorted(title_words)
title_word_indices = dict((c, i) for i, c in enumerate(title_words))
title_indices_word = dict((i, c) for i, c in enumerate(title_words))
title_list_words=title_text.split()
title_model = load_model('models/titles.h5')
def generate_comments(length):
start_index = random.randint(0, len(comment_list_words) - maxlen - 1)
diversity = 0.8
sentence = comment_list_words[start_index: start_index + maxlen]
generated = ' '.join(sentence)
comments = []
str = ''
for i in range(length):
x = np.zeros((1, maxlen, len(comment_words)))
for t, word in enumerate(sentence):
x[0, t, comment_word_indices[word]] = 1.
preds = comment_model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_word = comment_indices_word[next_index]
generated += next_word
del sentence[0]
sentence.append(next_word)
if next_word == '<break>':
comments.append(str)
str = ''
else:
str += ' '
str += next_word
return comments
def generate_title():
start_index = random.randint(0, len(title_list_words) - maxlen - 1)
diversity = 1.2
sentence = title_list_words[start_index: start_index + maxlen]
generated = ' '.join(sentence)
str = ''
for i in range(100):
x = np.zeros((1, maxlen, len(title_words)))
for t, word in enumerate(sentence):
x[0, t, title_word_indices[word]] = 1.
preds = title_model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_word = title_indices_word[next_index]
generated += next_word
del sentence[0]
sentence.append(next_word)
if next_word == '<break>':
if i >= 2:
break
else:
str += ' '
str += next_word
if len(str) >= 300:
str = str[0:300]
return str
reddit = praw.Reddit(client_id=config.client_id,
client_secret=config.client_secret,
user_agent=config.user_agent,
username=config.username,
password=config.password)
def post_comment_chain(submission, comment_chain):
if len(comment_chain[0]) > 0:
comment = submission.reply(comment_chain[0])
if len(comment_chain) > 1:
for comment_str in comment_chain[1:]:
comment = comment.reply(comment_str)
def get_random_submission():
id = reddit.subreddit('totallynotrobots').random()
submission = praw.models.Submission(reddit, id)
return submission.url
def post_to_reddit():
print(reddit.user.me())
submission = reddit.subreddit('totally_humans').submit(title=generate_title(), url=get_random_submission())
comment_chain_count = random.randint(1, 5)
print('comment_chain_count', comment_chain_count)
for i in range(comment_chain_count):
length = random.randint(5, 80)
print('length', length)
comment_chain = generate_comments(length)
if len(comment_chain) > 0:
post_comment_chain(submission, comment_chain)
post_to_reddit()
|
names = list()
votes = list()
sumVotes = 0
inFile = open('input.txt')
for line in inFile:
if len(line) < 2:
continue
else:
line = line.split()
partyName = ' '.join(line[:-1])
partyVotes = int(line[-1])
names.append(partyName)
votes.append(partyVotes)
sumVotes += partyVotes
inFile.close()
mandates = list()
fracPart = []
sumMandates = 0
for i in range(len(names)):
partyMandates = (votes[i] * 450) / sumVotes
sumMandates += int(partyMandates)
mandates.append(int(partyMandates))
fracPart.append(partyMandates - int(partyMandates))
# fracPart = enumerate(fracPart)
# , key=lambda x: (x[1], x[0]), reverse=True)
# print(mandates)
while sumMandates < 450:
i = 0
for j in range(1, len(fracPart)):
if (
(fracPart[j] > fracPart[i]) or # если следующий остаток больше
(fracPart[j] == fracPart[i] and votes[j] > votes[i])
): # если пред и след остатки равны, но след голосов больше
i = j
mandates[i] += 1
sumMandates += 1
fracPart[i] = 0
for k in range(len(names)):
print(names[k], mandates[k])
for k in range(len(names)):
print(names[k], mandates[k])
|
# Loaded by ~/.pdbrc, because .pdbrc can only contain pdb commands.
# Command line history.
import atexit, os, readline
histfile = os.path.expanduser("~/.var/pdb_history")
try:
readline.read_history_file(histfile)
except IOError:
pass
atexit.register(readline.write_history_file, histfile)
del histfile
readline.set_history_length(200)
# Cleanup any variables that could otherwise clutter up the namespace.
del atexit, os, readline
|
import time as timer
import heapq
import random
import networkx as nx
import numpy as np
import math
import matplotlib.pyplot as plt
from single_agent_planner import compute_heuristics, a_star, \
get_location, get_sum_of_cost, construct_MDD_for_agent, reconstruct_MDD, updateMDD
def detect_collision(path1, path2):
timestep = max(len(path1), len(path2))
for t in range(timestep):
loc1 = get_location(path1, t)
loc2 = get_location(path2, t)
if loc1 == loc2:
return ([loc1], t)
if t < timestep - 1:
loc1_next = get_location(path1, t+1)
loc2_next = get_location(path2, t+1)
if loc1 == loc2_next and loc2 == loc1_next:
return ([loc1, loc2], t+1)
return None
def detect_collisions(paths):
collisions = []
num_of_agents = len(paths)
for i in range(num_of_agents - 1):
for j in range(i + 1, num_of_agents):
collision_t = detect_collision(paths[i], paths[j])
if collision_t is not None:
collision = {'a1': i, 'a2': j, 'loc': collision_t[0], 'timestep': collision_t[1]}
collisions.append(collision)
return collisions
def standard_splitting(collision):
loc = collision['loc']
timestep = collision['timestep']
a1 = collision['a1']
a2 = collision['a2']
# Vertex collision
if len(loc) == 1:
constraints.append({'agent': a1, 'loc': loc, 'timestep': timestep, 'positive': False})
constraints.append({'agent': a2, 'loc': loc, 'timestep': timestep, 'positive': False})
return constraints
if len(loc) == 2:
reverse_loc = loc.copy()
reverse_loc.reverse()
constraints.append({'agent': a1, 'loc': loc, 'timestep': timestep, 'positive': False})
constraints.append({'agent': a2, 'loc': reverse_loc, 'timestep': timestep, 'positive': False})
return constraints
def disjoint_splitting(collision):
constraints = []
loc = collision['loc']
timestep = collision['timestep']
a1 = collision['a1']
a2 = collision['a2']
lucky_number = random.randint(0, 1)
# Vertex collision
if len(loc) == 1:
if lucky_number == 0:
constraints.append({'agent': a1, 'loc': loc, 'timestep': timestep, 'positive': True})
constraints.append({'agent': a1, 'loc': loc, 'timestep': timestep, 'positive': False})
else:
constraints.append({'agent': a2, 'loc': loc, 'timestep': timestep, 'positive': True})
constraints.append({'agent': a2, 'loc': loc, 'timestep': timestep, 'positive': False})
return constraints
if len(loc) == 2:
reverse_loc = loc.copy()
reverse_loc.reverse()
if lucky_number == 0:
constraints.append({'agent': a1, 'loc': loc, 'timestep': timestep, 'positive': True})
constraints.append({'agent': a1, 'loc': loc, 'timestep': timestep, 'positive': False})
else:
constraints.append({'agent': a2, 'loc': reverse_loc, 'timestep': timestep, 'positive': True})
constraints.append({'agent': a2, 'loc': reverse_loc, 'timestep': timestep, 'positive': False})
return constraints
def paths_violate_constraint(constraint, paths):
assert constraint['positive'] is True
rst = []
for i in range(len(paths)):
if i == constraint['agent']:
continue
curr = get_location(paths[i], constraint['timestep'])
prev = get_location(paths[i], constraint['timestep'] - 1)
if len(constraint['loc']) == 1: # vertex constraint
if constraint['loc'][0] == curr:
rst.append(i)
else: # edge constraint
if constraint['loc'][0] == prev or constraint['loc'][1] == curr \
or constraint['loc'] == [curr, prev]:
rst.append(i)
return rst
# Construct initial MDDs for all agents
def construct_MDD(my_map, num_of_agents,starts, goals, h_values, paths, constraints):
MDD = []
# Construct MDD for every agent
for i in range(num_of_agents):
MDD.append(construct_MDD_for_agent(my_map, i, starts[i], goals[i], h_values[i], len(paths[i]) - 1, constraints))
return MDD
# Compute CG heuristic
def compute_CG(MDD, num_of_agents, starts):
# Construct conflict graph according to cardinal conflict
conflict_graph = construct_conflict_graph(num_of_agents, MDD, starts)
# Compute CG heuristic from minimum vertex cover
h_value = get_MVC(conflict_graph) # construct confict_graph
# h_value = 0
return h_value
def compute_DG(MDD, num_of_agents, starts, goals):
# Construct confict graph according to cardinal conflict
dependency_graph = construct_dependency_graph(num_of_agents, MDD, starts, goals)
# Compute CG heuristic from minimum vertex cover
h_value = get_MVC(dependency_graph) # construct confict_graph
# h_value = 0
return h_value
def compute_WDG(my_map, MDD, paths, constraints, num_of_agents, starts, goals):
# Construct confict graph according to cardinal conflict
dependency_graph = construct_dependency_graph(num_of_agents, MDD, starts, goals)
# Compute weights and add them to dependency_graph
weighted_graph = compute_weights(my_map, paths, constraints, num_of_agents, dependency_graph, starts, goals)
# Compute WDG heuristic from edge-weighted minimum vertex cover
h_value = compute_EWMVC(weighted_graph)
return h_value
# Compute weights for each edge in the dependecy graph
def compute_weights(my_map, paths, constraints, num_of_agents, dependency_graph, starts, goals):
G = dependency_graph.copy()
# For every pair of agents
for i in range(num_of_agents - 1):
for j in range(i, num_of_agents):
# If agent i and agent j have conflicts
if (i, j) in G.edges:
# Use CBS to get the cost of conflict-free paths between two agent
constraints_ij = [constraint.copy() for constraint in constraints if constraint['agent']==i or constraint['agent']==j]
for constraint in constraints_ij:
if constraint['agent'] == i:
constraint['agent'] = 0
elif constraint['agent'] == j:
constraint['agent'] = 1
starts_2 = [starts[i], starts[j]]
goals_2 = [goals[i], goals[j]]
cbs = CBSSolver(my_map, starts_2, goals_2)
cost, root_paths, root_constraints = cbs.find_solution(disjoint = False, heuristic = 'None', weight = True, constraints = constraints_ij)
weight = cost - len(paths[i]) - len(paths[j]) + 2
G.add_edge(i, j, weight = weight)
return G
# Compute the size of EWMVC of the dependency graph
# Use branch and bound
def compute_EWMVC(dependency_graph):
G = dependency_graph.copy()
# Divide dependency graph into multiple connected components
connected_subgraphs = [G.subgraph(c) for c in nx.connected_components(G)]
# Compute EWMVC
EWMVC = 0
for component in connected_subgraphs:
EWMVC += compute_EWMVC_component(component)
# print(EWMVC)
return EWMVC
# Compute EWMVC for connected component
def compute_EWMVC_component(graph):
G = graph.copy()
num_of_nodes = nx.number_of_nodes(G)
nodes = nx.nodes(G)
possible_values = {}
# Get possible values for each agent
for node in nodes:
possible_values[node] = []
maximum = min([G.edges[edge]['weight'] for edge in G.edges(node)])
for i in range(maximum + 1):
possible_values[node].append(i)
value_list = {}
best = float('inf')
best = bnb(possible_values, value_list, nodes, G, best)
return best
def bnb(possible_values, value_list, nodes, G, best):
if len(value_list) == len(possible_values):
cost = sum([value for _, value in value_list.items()])
return cost
else:
value_list_copy = value_list.copy()
unassigned_nodes = [node for node in nodes if node not in value_list_copy]
node = unassigned_nodes[0]
for value in possible_values[node]:
if isViolated(value_list_copy, G, node, value):
continue
value_list_copy[node] = value
cost = bnb(possible_values, value_list_copy, nodes, G, best)
if cost < best:
best = cost
return best
def isViolated(value_list, G, node, value):
for key, val in value_list.items():
# If the node is connected to other nodes that is already assigned
if (key, node) in G.edges():
# Check whether the sum of two nodes
# are greater than the weight of the edge betwen them
if val + value < G.edges[key, node]['weight']:
return True
return False
def construct_dependency_graph(num_of_agents, MDD, starts, goals):
dependency_graph = nx.Graph()
# Check whether agent i and agent j have cardinal conflicts
for i in range(num_of_agents - 1):
for j in range(i + 1, num_of_agents):
# Merge two MDDs
joint_MDD, max_level = merge_MDD(MDD[i], starts[i], goals[i], MDD[j], starts[j], goals[j])
# If two agent are dependent
if isDependent(joint_MDD, goals[i], goals[j], max_level) \
or hasCardinal(MDD[i], starts[i], MDD[j], starts[j]):
dependency_graph.add_nodes_from([i, j])
dependency_graph.add_edge(i, j)
return dependency_graph
# Merge two MDDs
def merge_MDD(MDD1, start1, goal1, MDD2, start2, goal2):
# If depths of MDD1 and MDD2 are not the same
len1 = len(reconstruct_MDD(MDD1, start1))
len2 = len(reconstruct_MDD(MDD2, start2))
MDD1_copy = MDD1.copy()
MDD2_copy = MDD2.copy()
if len1 > len2:
edges = []
for i in range(len2, len1):
edges.append(((goal2, i-1), (goal2, i)))
MDD2_copy.add_edges_from(edges)
elif len1 < len2:
edges = []
for i in range(len1, len2):
edges.append(((goal1, i-1), (goal1, i)))
MDD1_copy.add_edges_from(edges)
# Merge MDDs
joint_MDD = {0:[(start1, start2)]}
for i in range(max(len1, len2) - 1):
joint_MDD[i+1] = []
# For each pair at level i
for pair in joint_MDD[i]:
successor1 = [successor for successor, _ in list(MDD1_copy.successors((pair[0], i)))]
successor2 = [successor for successor, _ in list(MDD2_copy.successors((pair[1], i)))]
cross_product = [(x, y) for x in successor1 for y in successor2 if x != y]
for new_pair in cross_product:
if new_pair not in joint_MDD[i+1]:
joint_MDD[i+1].append(new_pair)
if len(joint_MDD[i+1]) == 0:
return joint_MDD, max(len1, len2)-1
return joint_MDD, max(len1, len2)-1
# Whether two agents are dependent
def isDependent(joint_MDD, goal1, goal2, max_level):
# If the joint MDD has arrived at the max level
if max_level in joint_MDD:
if (goal1, goal2) in joint_MDD[max_level]:
return False
return True
# Construct confict graph according to cardinal conflict
def construct_conflict_graph(num_of_agents, MDD, starts):
conflict_graph = nx.Graph()
# Check whether agent i and agent j have cardinal conflicts
for i in range(num_of_agents - 1):
for j in range(i + 1, num_of_agents):
# If agent i and agent j have cardinal conflicts
if hasCardinal(MDD[i], starts[i], MDD[j], starts[j]):
conflict_graph.add_nodes_from([i, j])
conflict_graph.add_edge(i, j)
return conflict_graph
# A conflict is cardinal
# iff the contested vertex (or edge) is the only vertex (or edge)
# at level t of the MDDs for both agents.
def hasCardinal(MDD1, start1, MDD2, start2):
# Reconstruct MDD according to timestep
MDD1 = reconstruct_MDD(MDD1, start1)
MDD2 = reconstruct_MDD(MDD2, start2)
# Get the smaller cost of two MDD
cost = min(len(MDD1), len(MDD2))
for timestep in range(cost):
# Cardinal vertex
if len(MDD1[timestep]) == 1 and len(MDD2[timestep]) == 1 \
and MDD1[timestep][0] == MDD2[timestep][0]:
return True
# Cardinal Edge
if timestep < cost - 1:
if len(MDD1[timestep]) == 1 and len(MDD2[timestep]) == 1 \
and len(MDD1[timestep+1]) == 1 and len(MDD2[timestep+1]) == 1 \
and MDD1[timestep][0] == MDD2[timestep+1][0] \
and MDD1[timestep+1][0] == MDD2[timestep][0]:
return True
return False
# Compute minimum vertex cover
def get_MVC(G):
upperbound = nx.number_of_nodes(G)
C = []
MVC = EMVC(G, upperbound, C)
return MVC
# Recursive algorithm to find a lower bound of the size of minimum vertex cover
def EMVC(G, upperbound, C):
if nx.is_empty(G):
return len(C)
# Compute clique-based lower bound
cliques = get_disjoint_cliques(G)
ClqLB = 0
for clique in cliques:
ClqLB += len(clique) - 1
# Compute degree-based lower bound
H = G.copy()
num_of_edges = nx.number_of_edges(G)
nodes = []
degrees = []
for degree in G.degree():
nodes.append(degree[0])
degrees.append(degree[1])
DegLB = compute_DegLB(H, nodes, degrees, num_of_edges)
# Compute MVC
if len(C) + max(DegLB, ClqLB) >= upperbound:
return upperbound
# Select a vertex v from V with the maximum degree
largest_index = np.argmax(degrees)
vertex = nodes[largest_index]
# Get the neighbours of the vertex with the maximum degree
neighbors = [n for n in G.neighbors(vertex)]
A = G.copy()
A.remove_nodes_from(neighbors)
A.remove_node(vertex)
B = G.copy()
B.remove_node(vertex)
c1 = EMVC(A, upperbound, C + neighbors)
c2 = EMVC(B, min(upperbound, c1), C + [vertex])
return min(c1, c2)
def compute_DegLB(H, nodes, degrees, num_of_edges):
i = 0
total_degrees = 0
while total_degrees < num_of_edges:
# Select the vertex with the largest degree
largest_index = np.argmax(degrees)
total_degrees += degrees[largest_index]
H.remove_node(nodes[largest_index])
degrees.remove(degrees[largest_index])
nodes.remove(nodes[largest_index])
i += 1
num_of_edges_afterRemove = nx.number_of_edges(H)
max_degree_afterRemove = max(degrees)
DegLB = math.floor(i+num_of_edges_afterRemove/max_degree_afterRemove)
return DegLB
def get_disjoint_cliques(G):
disjoint_cliques = []
existing_nodes = []
# Get all the maximal cliques
cliques = list(nx.find_cliques(G))
# Sort them by their sizes
cliques.sort(key = len, reverse = True)
for clique in cliques:
if len(disjoint_cliques) == 0:
disjoint_cliques.append(clique)
# Mark nodes in the clique as existing nodes
existing_nodes = existing_nodes + clique
else:
# If there are nodes already marked as existing nodes
if len(set(clique).intersection(set(existing_nodes))) == 0:
disjoint_cliques.append(clique)
existing_nodes = existing_nodes + clique
if nx.number_of_nodes(G) == len(existing_nodes):
return disjoint_cliques
else:
# Nodes that are not in the existing nodes
nodes = [node for node in nx.nodes(G) if node not in existing_nodes]
# Subgraph that contains nodes aht are not in the existing ndoes
subgraph = G.subgraph(nodes)
disjoint_cliques = disjoint_cliques + get_disjoint_cliques(subgraph)
return disjoint_cliques
def draw_graph(G):
pos = nx.spring_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G, pos, node_size=400)
# edges
nx.draw_networkx_edges(G, pos, edgelist=G.edges, width=6)
# labels
nx.draw_networkx_labels(G, pos, font_size=20, font_family='sans-serif')
labels = nx.get_edge_attributes(G,'weight')
nx.draw_networkx_edge_labels(G,pos,edge_labels=labels)
plt.axis('off')
plt.show()
class CBSSolver(object):
"""The high-level search of CBS."""
def __init__(self, my_map, starts, goals):
"""my_map - list of lists specifying obstacle positions
starts - [(x1, y1), (x2, y2), ...] list of start locations
goals - [(x1, y1), (x2, y2), ...] list of goal locations
"""
self.my_map = my_map
self.starts = starts
self.goals = goals
self.num_of_agents = len(goals)
self.heuristic = 'None'
self.num_of_generated = 0
self.num_of_expanded = 0
self.CPU_time = 0
self.construct_MDD = 0
self.update_MDD = 0
self.open_list = []
self.sum_cost = 0
# compute heuristics for the low-level search
self.heuristics = []
for goal in self.goals:
self.heuristics.append(compute_heuristics(my_map, goal))
def push_node(self, node):
heapq.heappush(self.open_list, (node['cost'], len(node['collisions']), self.num_of_generated, node))
# print("Generate node {}".format(self.num_of_generated))
self.num_of_generated += 1
def pop_node(self):
_, _, id, node = heapq.heappop(self.open_list)
# print("Expand node {}".format(id))
self.num_of_expanded += 1
return node
def find_solution(self, disjoint=True, heuristic='None', weight = False, constraints=[]):
""" Finds paths for all agents from their start locations to their goal locations
disjoint - use disjoint splitting or not
"""
self.heuristic = heuristic
self.start_time = timer.time()
root = {'cost': 0,
'constraints': [],
'paths': [],
'collisions': [],
'MDD': []}
root['constraints'] = constraints.copy()
for i in range(self.num_of_agents): # Find initial path for each agent
path = a_star(self.my_map, self.starts[i], self.goals[i], self.heuristics[i],
i, root['constraints'])
if path is None:
raise BaseException('No solutions')
root['paths'].append(path)
root['cost'] = get_sum_of_cost(root['paths'])
root['collisions'] = detect_collisions(root['paths'])
# Construct initial MDD
# Record the time for constructing new MDD
if heuristic != 'None':
start_construct = timer.time()
MDD = construct_MDD(self.my_map, self.num_of_agents, self.starts, self.goals, self.heuristics, root['paths'], [])
self.construct_MDD += timer.time() - start_construct
h = 0
if heuristic == 'CG':
# Compute CG heuristic
h= compute_CG(MDD, self.num_of_agents, self.starts)
elif heuristic == 'DG':
# Compute DG heuristic
h = compute_DG(MDD, self.num_of_agents, self.starts, self.goals)
elif heuristic == 'WDG':
# Compute WDG heuristic
h = compute_WDG(self.my_map, MDD, root['paths'],root['constraints'], self.num_of_agents, self.starts, self.goals)
root['MDD'] = MDD
# Store the MDD for each agent without any constraints
MDD_all = []
for i in range(self.num_of_agents):
mdd_i = {}
mdd_i[len(root['paths'][i])-1] = MDD[i].copy()
MDD_all.append(mdd_i)
self.push_node(root)
while len(self.open_list) > 0:
P = self.pop_node()
if len(P['collisions']) == 0:
if weight:
cost = get_sum_of_cost(P['paths'])
return cost, P['paths'], root['constraints']
self.sum_cost = self.print_results(P)
return P['paths']
collision = P['collisions'][0]
if disjoint:
constraints = disjoint_splitting(collision)
else:
constraints = standard_splitting(collision)
for constraint in constraints:
isAdd = True
Q = {}
Q['constraints'] = P['constraints'] + [constraint]
Q['paths'] = [path.copy() for path in P['paths']]
Q['MDD'] = [MDD.copy() for MDD in P['MDD']]
# Q['paths'] = P['paths'].copy()
if constraint['positive'] == False:
a = constraint['agent']
path = a_star(self.my_map, self.starts[a], self.goals[a], self.heuristics[a],
a, Q['constraints'])
if path is not None:
Q['paths'][a] = path.copy()
if heuristic != 'None':
# Update MDD for agent a
if len(P['paths'][a]) < len(path):
# Construct new MDD with new depth for agent a
mdd_temp = 0
# If the MDD with new depth is already in the depth
if (len(path) - 1) in MDD_all[a]:
mdd_temp = MDD_all[a][len(path)-1].copy()
else:
# Record the time for constructing new MDD
start_construct = timer.time()
# Construct MDD with new depth for agent a
mdd_temp = construct_MDD_for_agent(self.my_map, a, self.starts[a], self.goals[a],self.heuristics[a], len(path) - 1, [])
self.construct_MDD += timer.time() - start_construct
MDD_all[a][len(path)-1] = mdd_temp.copy()
Q['MDD'][a] = mdd_temp.copy()
# Record the time for updating MDD
start_update = timer.time()
Q['MDD'][a] = updateMDD(Q['MDD'][a], a, self.starts[a], self.goals[a], len(path) - 1, Q['constraints'])
self.update_MDD += timer.time() - start_update
else:
isAdd = False
if isAdd:
Q['collisions'] = detect_collisions(Q['paths'])
h_value = 0
if heuristic == 'CG':
# Compute CG heuristic
h_value = compute_CG(Q['MDD'], self.num_of_agents, self.starts)
elif heuristic == 'DG':
# Compute DG heuristic
h_value = compute_DG(Q['MDD'], self.num_of_agents, self.starts, self.goals)
elif heuristic == 'WDG':
# Compute DG heuristic
h_value = compute_WDG(self.my_map, Q['MDD'], Q['paths'],Q['constraints'], self.num_of_agents, self.starts, self.goals)
Q['cost'] = get_sum_of_cost(Q['paths']) + h_value
self.push_node(Q)
return root['paths']
def print_results(self, node):
print("\n Found a solution! \n")
CPU_time = timer.time() - self.start_time
print("Use heuristic: {}".format(self.heuristic))
print("CPU time (s): {:.2f}".format(CPU_time))
print("Construct MDD time (s): {:.2f}".format(self.construct_MDD))
print("Update MDD time (s): {:.2f}".format(self.update_MDD))
print("Run time (s): {:.2f}".format(CPU_time-self.construct_MDD - self.update_MDD))
print("Sum of costs: {}".format(get_sum_of_cost(node['paths'])))
print("Expanded nodes: {}".format(self.num_of_expanded))
print("Generated nodes: {}".format(self.num_of_generated))
return get_sum_of_cost(node['paths'])
def output_result(self):
CPU_time = timer.time() - self.start_time
heuristic = self.heuristic
construct_MDD_time = self.construct_MDD
update_MDD_time = self.update_MDD
running_time = CPU_time - construct_MDD_time - update_MDD_time
sum_cost = self.sum_cost
expanded_nodes = self.num_of_expanded
generated_nodes = self.num_of_generated
return CPU_time, heuristic, construct_MDD_time, update_MDD_time, running_time, sum_cost, expanded_nodes, generated_nodes
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_preprocessing
----------------------------------
Tests for `preprocessing` module.
"""
import pytest
from sktutor.preprocessing import (GroupByImputer, MissingValueFiller,
OverMissingThresholdDropper,
ValueReplacer, FactorLimiter,
SingleValueAboveThresholdDropper,
SingleValueDropper, ColumnExtractor,
ColumnDropper, DummyCreator,
ColumnValidator, TextContainsDummyExtractor,
BitwiseOperator, BoxCoxTransformer,
InteractionCreator, StandardScaler,
PolynomialFeatures, ContinuousFeatureBinner,
TypeExtractor, GenericTransformer,
MissingColumnsReplacer,
SklearnPandasWrapper)
from sktutor.pipeline import make_union
from sklearn.pipeline import make_pipeline
import numpy as np
import pandas as pd
import pandas.testing as tm
from random import shuffle
from sklearn.preprocessing import OrdinalEncoder
@pytest.mark.usefixtures("missing_data")
@pytest.mark.usefixtures("missing_data2")
class TestGroupByImputer(object):
def test_groups_most_frequent(self, missing_data):
# Test imputing most frequent value per group.
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_mean(self, missing_data):
# Test imputing mean by group.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7 + 2/3, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_median(self, missing_data):
# Test imputing median by group.
prep = GroupByImputer('median', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1.5, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_most_frequent(self, missing_data):
# Test imputing most frequent with no group by.
prep = GroupByImputer('most_frequent')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, 2, 4, 4, 7, 8, 2, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 7.0, 9.0, 4.0, 9.0],
'd': ['a', 'a', 'a', 'a', 'e', 'f', 'a', 'h', 'j', 'j'],
'e': [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
'f': ['a', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_mean(self, missing_data):
# Test imputing mean with no group by.
prep = GroupByImputer('mean')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 5, 5, 4, 4, 7, 8, 5, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 5, 4, 4, 4, 7, 9, 5, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_median(self, missing_data):
# Test imputing median with no group by.
prep = GroupByImputer('median')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 4, 4, 4, 4, 7, 8, 4, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 4, 4, 4, 4, 7, 9, 4, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_value_error(self, missing_data):
# Test limiting options without a group by.
prep = GroupByImputer('stdev')
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_key_error(self, missing_data):
# Test imputing with np.nan when a new group level is introduced in
# Transform.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
new_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
new_data = pd.DataFrame(new_dict)
# set equal to the expected for test means group
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7+2/3, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
result = prep.transform(new_data)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_most_frequent(self, missing_data2):
# Test most frequent with group by with 2 columns.
prep = GroupByImputer('most_frequent', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', 'a', 'e', 'e', 'f', 'f', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_mean(self, missing_data2):
# Test mean with group by with 2 columns.
prep = GroupByImputer('mean', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_median(self, missing_data2):
# Test median with group by with 2 columns.
prep = GroupByImputer('median', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
@pytest.mark.usefixtures("missing_data_numeric")
class TestMissingValueFiller(object):
def test_missing_factors(self, missing_data_factors):
# Test filling in missing factors with a string.
prep = MissingValueFiller('Missing')
result = prep.fit_transform(missing_data_factors)
exp_dict = {'c': ['a', 'Missing', 'a', 'b', 'b', 'Missing', 'c', 'a',
'a', 'c'],
'd': ['a', 'a', 'Missing', 'Missing', 'e', 'f', 'Missing',
'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_missing_numeric(self, missing_data_numeric):
# Test filling in missing numeric data with a number.
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, missing_data_numeric):
# Test unordered index is handled properly
new_index = list(missing_data_numeric.index)
shuffle(new_index)
missing_data_numeric.index = new_index
prep = MissingValueFiller(0)
result = prep.fit_transform(missing_data_numeric)
exp_dict = {'a': [2, 2, 0, 0, 4, 4, 7, 8, 0, 8],
'c': [1, 2, 0, 4, 4, 4, 7, 9, 0, 9],
'e': [1, 2, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestOverMissingThresholdDropper(object):
def test_drop_20(self, missing_data):
# Test dropping columns with missing over a threshold.
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(1.5)
svatd
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
svatd = OverMissingThresholdDropper(-1)
svatd
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = OverMissingThresholdDropper(.2)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
class TestValueReplacer(object):
def test_mapper(self, full_data_factors):
# Test replacing values with mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_inverse_mapper(self, full_data_factors):
# Test replacing values with inverse_mapper.
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
prep = ValueReplacer(inverse_mapper=inv_mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, full_data_factors):
# Test throwing error when replacing values with a non-existant column.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
with pytest.raises(ValueError):
prep.fit(full_data_factors)
def test_2_mappers_value_error(self):
# Test throwing error when specifying mapper and inverse_mapper.
mapper = {'c': {'a': 'z', 'b': 'z'},
'e': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
inv_mapper = {'c': {'z': ['a', 'b']},
'd': {'z': ['a', 'b'],
'y': ['c', 'd'],
'x': ['e', 'f'],
'w': ['g', 'h', 'j']
}
}
with pytest.raises(ValueError):
prep = ValueReplacer(mapper=mapper, inverse_mapper=inv_mapper)
prep
def test_no_mappers_value_error(self):
# Test throwing error when not specifying mapper or inverse_mapper.
with pytest.raises(ValueError):
prep = ValueReplacer()
prep
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
mapper = {'c': {'a': 'z', 'b': 'z'},
'd': {'a': 'z', 'b': 'z', 'c': 'y', 'd': 'y', 'e': 'x',
'f': 'x', 'g': 'w', 'h': 'w', 'j': 'w'
}
}
prep = ValueReplacer(mapper)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': ['z', 'z', 'z', 'z', 'z', 'c', 'c', 'z', 'z', 'c'],
'd': ['z', 'z', 'y', 'y', 'x', 'x', 'w', 'w', 'w', 'w']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data_factors")
class TestFactorLimiter(object):
def test_limiter(self, missing_data_factors):
# Test limiting factor levels to specified levels with default.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_extra_column_value_error(self, missing_data_factors):
# Test throwing error when limiting values with a non-existant column.
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'e': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
fl = FactorLimiter(factors)
with pytest.raises(ValueError):
fl.fit(missing_data_factors)
def test_unordered_index(self, missing_data_factors):
# Test unordered index is handled properly
new_index = list(missing_data_factors.index)
shuffle(new_index)
missing_data_factors.index = new_index
factors = {'c': {'factors': ['a', 'b'],
'default': 'a'
},
'd': {'factors': ['a', 'b', 'c', 'd'],
'default': 'd'
}
}
prep = FactorLimiter(factors)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'a', 'a', 'a', 'a', 'a'],
'd': ['a', 'a', 'd', 'd', 'd', 'd', 'd', 'd', 'd', 'd']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestSingleValueAboveThresholdDropper(object):
def test_drop_70_with_na(self, missing_data):
# test dropping columns with over 70% single value, including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_70_without_na(self, missing_data):
# test dropping columns with over 70% single value, not including NaNs.
prep = SingleValueAboveThresholdDropper(.7, dropna=True)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_threshold_high_value_error(self, missing_data):
# Test throwing error with threshold set too high.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(1.5)
prep
def test_threshold_low_value_error(self, missing_data):
# Test throwing error with threshold set too low.
with pytest.raises(ValueError):
prep = SingleValueAboveThresholdDropper(-1)
prep
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = SingleValueAboveThresholdDropper(.7, dropna=False)
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("single_values_data")
class TestSingleValueDropper(object):
def test_without_na(self, single_values_data):
# Test dropping columns with single values, excluding NaNs as a value.
prep = SingleValueDropper(dropna=True)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'e': [1, 2, None, None, None, None, None, None, None,
None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_with_na(self, single_values_data):
# Test dropping columns with single values, including NaNs as a value.
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, single_values_data):
# Test unordered index is handled properly
new_index = list(single_values_data.index)
shuffle(new_index)
single_values_data.index = new_index
prep = SingleValueDropper(dropna=False)
prep.fit(single_values_data)
result = prep.transform(single_values_data)
exp_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'd': [1, 1, 1, 1, 1, 1, 1, 1, 1, None],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnExtractor(object):
def test_extraction(self, missing_data):
# Test extraction of columns from a DataFrame.
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_column_missing_error(self, missing_data):
# Test throwing error when an extraction is requested of a missing.
# column
prep = ColumnExtractor(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnExtractor(['a', 'b', 'c'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data")
class TestColumnDropper(object):
def test_drop_multiple(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_single(self, missing_data):
# Test extraction of columns from a DataFrame
prep = ColumnDropper('d')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_error(self, missing_data):
# Test throwing error when dropping is requested of a missing column
prep = ColumnDropper(['a', 'b', 'z'])
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
prep = ColumnDropper(['d', 'e', 'f', 'g', 'h'])
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
@pytest.mark.usefixtures("full_data_factors_subset")
@pytest.mark.usefixtures("missing_data_factors")
class TestDummyCreator(object):
def test_default_dummies(self, full_data_factors):
# Test creating dummies variables from a DataFrame
prep = DummyCreator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_fit_transform(self, full_data_factors):
# Test creating dummies variables from a DataFrame
prep = DummyCreator()
result = prep.fit_transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_first_dummies(self, full_data_factors):
# Test dropping first dummies for each column.
kwargs = {'drop_first': True}
prep = DummyCreator(**kwargs)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_drop_first_dummies_missing_levels(self, full_data_factors,
full_data_factors_subset):
# Test dropping first dummies for each column.
kwargs = {'drop_first': True}
prep = DummyCreator(**kwargs)
prep.fit(full_data_factors)
result = prep.transform(full_data_factors_subset)
exp_dict = {'c_b': [1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 1, 1, 0, 0, 1],
'd_b': [0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 0, 0, 0, 0, 0],
'd_d': [1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_dummy_na_false_dummies(self, missing_data_factors):
# Test not creating dummies for NaNs.
prep = DummyCreator()
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c_a': [1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'd_a': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_dummy_na_true_dummies(self, missing_data_factors):
# Test creating dummies for NaNs.
kwargs = {'dummy_na': True}
prep = DummyCreator(**kwargs)
prep.fit(missing_data_factors)
result = prep.transform(missing_data_factors)
exp_dict = {'c_a': [1, 0, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 0, 1, 0, 0, 1],
'c_nan': [0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
'd_a': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
'd_nan': [0, 0, 1, 1, 0, 0, 1, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_fillin_missing_dummies(self, full_data_factors):
# Test filling missing dummies with a transform data missing levels
# present in the fitting data set.
prep = DummyCreator()
prep.fit(full_data_factors)
new_dict = {'c': ['b', 'c'],
'd': ['a', 'b']
}
new_data = pd.DataFrame(new_dict)
result = prep.transform(new_data)
exp_dict = {'c_a': [0, 0],
'c_b': [1, 0],
'c_c': [0, 1],
'd_a': [1, 0],
'd_b': [0, 1],
'd_c': [0, 0],
'd_d': [0, 0],
'd_e': [0, 0],
'd_f': [0, 0],
'd_g': [0, 0],
'd_h': [0, 0],
'd_j': [0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
prep = DummyCreator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c_a': [1, 1, 1, 0, 0, 0, 0, 1, 1, 0],
'c_b': [0, 0, 0, 1, 1, 0, 0, 0, 0, 0],
'c_c': [0, 0, 0, 0, 0, 1, 1, 0, 0, 1],
'd_a': [1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd_b': [0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
'd_c': [0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
'd_d': [0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
'd_e': [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
'd_f': [0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
'd_g': [0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
'd_h': [0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
'd_j': [0, 0, 0, 0, 0, 0, 0, 0, 1, 1]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("full_data_factors")
class TestColumnValidator(object):
def test_order(self, full_data_factors):
# Test extraction of columns from a DataFrame
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'d': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c']
}
new_data = pd.DataFrame(new_dict)
result = prep.transform(new_data)
exp_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_missing_columns_error(self, full_data_factors):
# Test throwing an error when the new data is missing columns
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'d': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
new_data = pd.DataFrame(new_dict)
with pytest.raises(ValueError):
prep.transform(new_data)
def test_new_columns_error(self, full_data_factors):
# Test throwing an error when the new data is missing columns
prep = ColumnValidator()
prep.fit(full_data_factors)
new_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j'],
'e': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
new_data = pd.DataFrame(new_dict)
with pytest.raises(ValueError):
prep.transform(new_data)
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)
shuffle(new_index)
full_data_factors.index = new_index
prep = ColumnValidator()
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("text_data")
class TestTextContainsDummyExtractor(object):
def test_mapper(self, text_data):
# Test text contains dummy with mapper.
mapper = {'a':
{'a_1':
[{'pattern': 'birthday', 'kwargs': {'case': False}},
{'pattern': 'bday', 'kwargs': {'case': False}}
],
'a_2':
[{'pattern': 'b.*day', 'kwargs': {'case': False}}
],
},
'b':
{'b_1':
[{'pattern': 'h.*r', 'kwargs': {'case': False}}
],
'b_2':
[{'pattern': '!', 'kwargs': {'case': False}},
]
}
}
prep = TextContainsDummyExtractor(mapper)
prep.fit(text_data)
result = prep.transform(text_data)
exp_dict = {'a': ['Happy Birthday!', 'It\'s your bday!'],
'b': ['Happy Arbor Day!', 'Happy Gilmore'],
'c': ['a', 'b'],
'a_1': [1, 1],
'a_2': [1, 1],
'b_1': [1, 1],
'b_2': [1, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_extra_column_value_error(self, text_data):
# Test throwing error when replacing values with a non-existant column.
mapper = {'a':
{'a_1':
[{'pattern': 'birthday', 'kwargs': {'case': False}},
{'pattern': 'bday', 'kwargs': {'case': False}}
],
'a_2':
[{'pattern': 'b.*day', 'kwargs': {'case': False}}
],
},
'd':
{'b_1':
[{'pattern': 'h.*r', 'kwargs': {'case': False}}
],
'b_2':
[{'pattern': '!', 'kwargs': {'case': False}},
]
}
}
prep = TextContainsDummyExtractor(mapper)
with pytest.raises(ValueError):
prep.fit(text_data)
def test_unordered_index(self, text_data):
# Test unordered index is handled properly
new_index = list(text_data.index)
shuffle(new_index)
text_data.index = new_index
mapper = {'a':
{'a_1':
[{'pattern': 'birthday', 'kwargs': {'case': False}},
{'pattern': 'bday', 'kwargs': {'case': False}}
],
'a_2':
[{'pattern': 'b.*day', 'kwargs': {'case': False}}
],
},
'b':
{'b_1':
[{'pattern': 'h.*r', 'kwargs': {'case': False}}
],
'b_2':
[{'pattern': '!', 'kwargs': {'case': False}},
]
}
}
prep = TextContainsDummyExtractor(mapper)
prep.fit(text_data)
result = prep.transform(text_data)
exp_dict = {'a': ['Happy Birthday!', 'It\'s your bday!'],
'b': ['Happy Arbor Day!', 'Happy Gilmore'],
'c': ['a', 'b'],
'a_1': [1, 1],
'a_2': [1, 1],
'b_1': [1, 1],
'b_2': [1, 0]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
@pytest.mark.usefixtures("boolean_data")
class TestBitwiseOperator(object):
def test_operator_value_error(self, text_data):
# Test throwing error when using invalid operator parameter
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
with pytest.raises(ValueError):
prep = BitwiseOperator('with', mapper)
prep
def test_or_mapper_boolean(self, boolean_data):
# Test bitwise or applied to booleans
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [True, True, False, False],
'b': [True, False, False, True],
'c': [False, True, True, False],
'd': [True, False, True, False],
'e': [False, True, False, True],
'f': [1, 1, 1, 1],
'g': [1, 1, 0, 1],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_or_mapper_binary(self, boolean_data):
# Test bitwise or applied to integers
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [1, 1, 0, 0],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'f': [1, 1, 1, 1],
'g': [1, 1, 0, 1],
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_or_extra_column_value_error(self, text_data):
# Test throwing error when replacing values with a non-existant column.
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
with pytest.raises(ValueError):
prep.fit(text_data)
def test_and_mapper_boolean(self, boolean_data):
# Test bitwise and applied to booleans
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('and', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [True, True, False, False],
'b': [True, False, False, True],
'c': [False, True, True, False],
'd': [True, False, True, False],
'e': [False, True, False, True],
'f': [0, 0, 0, 0],
'g': [1, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_and_mapper_binary(self, boolean_data):
# Test bitwise and applied to integers
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('and', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [1, 1, 0, 0],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'f': [0, 0, 0, 0],
'g': [1, 0, 0, 0]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_and_extra_column_value_error(self, text_data):
# Test throwing error when replacing values with a non-existant column.
mapper = {'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('and', mapper)
with pytest.raises(ValueError):
prep.fit(text_data)
def test_unordered_index(self, boolean_data):
# Test unordered index is handled properly
new_index = list(boolean_data.index)
shuffle(new_index)
boolean_data.index = new_index
mapper = {
'f': ['c', 'd', 'e'],
'g': ['a', 'b']
}
prep = BitwiseOperator('or', mapper)
prep.fit(boolean_data)
result = prep.transform(boolean_data)
exp_dict = {'a': [True, True, False, False],
'b': [True, False, False, True],
'c': [False, True, True, False],
'd': [True, False, True, False],
'e': [False, True, False, True],
'f': [1, 1, 1, 1],
'g': [1, 1, 0, 1],
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
@pytest.mark.usefixtures("full_data_numeric")
class TestBoxCoxTransformer(object):
def test_fit_transfrom(self, full_data_numeric):
# test default functionalty
prep = BoxCoxTransformer()
result = prep.fit_transform(full_data_numeric)
exp_dict = {'a': [0.71695113, 0.71695113, 0.71695113,
1.15921005, 1.48370246, 1.48370246,
2.1414305, 2.30371316, 2.30371316,
2.30371316],
'c': [0., 0.8310186, 1.47159953, 2.0132148,
2.0132148, 2.0132148, 3.32332097, 4.0444457,
4.0444457, 4.0444457],
'e': [0., 0.89952678, 1.67649211, 2.38322965,
3.04195191, 3.66477648, 4.25925117,
4.83048775, 5.38215505, 5.91700138]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_then_transform(self, full_data_numeric):
# test using fit then transform
prep = BoxCoxTransformer()
prep.fit(full_data_numeric)
result = prep.transform(full_data_numeric)
exp_dict = {'a': [0.71695113, 0.71695113, 0.71695113,
1.15921005, 1.48370246, 1.48370246,
2.1414305, 2.30371316, 2.30371316,
2.30371316],
'c': [0., 0.8310186, 1.47159953, 2.0132148,
2.0132148, 2.0132148, 3.32332097, 4.0444457,
4.0444457, 4.0444457],
'e': [0., 0.89952678, 1.67649211, 2.38322965,
3.04195191, 3.66477648, 4.25925117,
4.83048775, 5.38215505, 5.91700138]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_unordered_index(self, full_data_numeric):
# Test unordered index is handled properly
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
prep = BoxCoxTransformer()
result = prep.fit_transform(full_data_numeric)
exp_dict = {'a': [0.71695113, 0.71695113, 0.71695113,
1.15921005, 1.48370246, 1.48370246,
2.1414305, 2.30371316, 2.30371316,
2.30371316],
'c': [0., 0.8310186, 1.47159953, 2.0132148,
2.0132148, 2.0132148, 3.32332097, 4.0444457,
4.0444457, 4.0444457],
'e': [0., 0.89952678, 1.67649211, 2.38322965,
3.04195191, 3.66477648, 4.25925117,
4.83048775, 5.38215505, 5.91700138]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
@pytest.mark.usefixtures("interaction_data")
class TestInteractionCreator(object):
def test_interactions(self, interaction_data):
# test generation of interactions
prep = InteractionCreator(columns1=['a', 'b'],
columns2=['c', 'd', 'e'])
result = prep.fit_transform(interaction_data)
exp_dict = {'a': [2, 3, 4, 5],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'a:c': [0, 3, 4, 0],
'a:d': [2, 0, 4, 0],
'a:e': [0, 3, 0, 5],
'b:c': [0, 0, 0, 0],
'b:d': [1, 0, 0, 0],
'b:e': [0, 0, 0, 1]
}
expected = pd.DataFrame(exp_dict)
print(result)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test__extra_column_value_error(self, interaction_data):
# test value error with non-existent columns
prep = InteractionCreator(columns1=['a', 'f'],
columns2=['c', 'd', 'g'])
with pytest.raises(ValueError):
prep.fit_transform(interaction_data)
def test_unordered_index(self, interaction_data):
# Test unordered index is handled properly
new_index = list(interaction_data.index)
shuffle(new_index)
interaction_data.index = new_index
prep = InteractionCreator(columns1=['a', 'b'],
columns2=['c', 'd', 'e'])
result = prep.fit_transform(interaction_data)
exp_dict = {'a': [2, 3, 4, 5],
'b': [1, 0, 0, 1],
'c': [0, 1, 1, 0],
'd': [1, 0, 1, 0],
'e': [0, 1, 0, 1],
'a:c': [0, 3, 4, 0],
'a:d': [2, 0, 4, 0],
'a:e': [0, 3, 0, 5],
'b:c': [0, 0, 0, 0],
'b:d': [1, 0, 0, 0],
'b:e': [0, 0, 0, 1]
}
expected = pd.DataFrame(exp_dict, index=new_index)
print(result)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
@pytest.mark.usefixtures("full_data_numeric")
class TestStandardScaler(object):
def test_fit_transform(self, full_data_numeric):
# test default functionalty
prep = StandardScaler()
result = prep.fit_transform(full_data_numeric)
exp_dict = {'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [-1.45260037, -1.10674314, -0.76088591, -0.41502868,
-0.41502868, -0.41502868, 0.62254302, 1.31425748,
1.31425748, 1.31425748],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_transform_defined_columns(self, full_data_numeric):
# test defining which columns to apply standardization to
prep = StandardScaler(columns=['a', 'e'])
result = prep.fit_transform(full_data_numeric)
exp_dict = {
'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_then_transform(self, full_data_numeric):
# test using fit then transform
prep = StandardScaler()
prep.fit(full_data_numeric)
result = prep.transform(full_data_numeric)
exp_dict = {'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [-1.45260037, -1.10674314, -0.76088591, -0.41502868,
-0.41502868, -0.41502868, 0.62254302, 1.31425748,
1.31425748, 1.31425748],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_then_transform_defined_columns(self, full_data_numeric):
# test defining which columns to apply standardization to
prep = StandardScaler(columns=['a', 'e'])
prep.fit(full_data_numeric)
result = prep.transform(full_data_numeric)
exp_dict = {
'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_fit_then_partial_transform(self, full_data_numeric):
# test using fit then transform on specified columns
prep = StandardScaler()
prep.fit(full_data_numeric)
result = prep.transform(X=full_data_numeric, partial_cols=['c', 'e'])
exp_dict = {'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [-1.45260037, -1.10674314, -0.76088591, -0.41502868,
-0.41502868, -0.41502868, 0.62254302, 1.31425748,
1.31425748, 1.31425748],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989]
}
expected = pd.DataFrame(exp_dict)
expected = expected[['c', 'e']]
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=True)
def test_unordered_index(self, full_data_numeric):
# Test unordered index is handled properly
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
prep = StandardScaler()
prep.fit(full_data_numeric)
result = prep.transform(full_data_numeric)
exp_dict = {'a': [-1.11027222, -1.11027222, -1.11027222, -0.71374643,
-0.31722063, -0.31722063, 0.87235674, 1.26888254,
1.26888254, 1.26888254],
'c': [-1.45260037, -1.10674314, -0.76088591, -0.41502868,
-0.41502868, -0.41502868, 0.62254302, 1.31425748,
1.31425748, 1.31425748],
'e': [-1.5666989, -1.21854359, -0.87038828, -0.52223297,
-0.17407766, 0.17407766, 0.52223297, 0.87038828,
1.21854359, 1.5666989],
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False,
check_like=False)
def test_inverse_transform(self, full_data_numeric):
# test inverse_transform
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
prep = StandardScaler()
transformed = prep.fit_transform(full_data_numeric)
original = prep.inverse_transform(transformed)
tm.assert_frame_equal(
full_data_numeric,
original,
check_dtype=False,
check_like=True
)
def test_inverse_partial_transform(self, full_data_numeric):
# test inverse_transform
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
prep = StandardScaler()
transformed = prep.fit_transform(full_data_numeric)
partial_original = prep.inverse_transform(
transformed, partial_cols=['a', 'e']
)
tm.assert_frame_equal(
full_data_numeric[['a', 'e']],
partial_original,
check_dtype=False,
check_like=True
)
def test_inverse_transform_defined_columns(self, full_data_numeric):
# test defining which columns to apply standardization to
prep = StandardScaler(columns=['a', 'e'])
prep.fit(full_data_numeric)
transformed = prep.fit_transform(full_data_numeric)
result = prep.inverse_transform(transformed)
tm.assert_frame_equal(
result, full_data_numeric, check_dtype=False, check_like=True
)
@pytest.mark.usefixtures("full_data_numeric")
class TestPolynomialFeatures(object):
def test_polynomial_features(self, full_data_numeric):
# test polynomial feature creation
prep = PolynomialFeatures(degree=3)
result = prep.fit_transform(full_data_numeric)
exp_dict = {
'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'a^2': [4, 4, 4, 9, 16, 16, 49, 64, 64, 64],
'a*c': [2, 4, 6, 12, 16, 16, 49, 72, 72, 72],
'a*e': [2, 4, 6, 12, 20, 24, 49, 64, 72, 80],
'c^2': [1, 4, 9, 16, 16, 16, 49, 81, 81, 81],
'c*e': [1, 4, 9, 16, 20, 24, 49, 72, 81, 90],
'e^2': [1, 4, 9, 16, 25, 36, 49, 64, 81, 100],
'a^3': [8, 8, 8, 27, 64, 64, 343, 512, 512, 512],
'a^2*c': [4, 8, 12, 36, 64, 64, 343, 576, 576, 576],
'a^2*e': [4, 8, 12, 36, 80, 96, 343, 512, 576, 640],
'a*c^2': [2, 8, 18, 48, 64, 64, 343, 648, 648, 648],
'a*c*e': [2, 8, 18, 48, 80, 96, 343, 576, 648, 720],
'a*e^2': [2, 8, 18, 48, 100, 144, 343, 512, 648, 800],
'c^3': [1, 8, 27, 64, 64, 64, 343, 729, 729, 729],
'c^2*e': [1, 8, 27, 64, 80, 96, 343, 648, 729, 810],
'c*e^2': [1, 8, 27, 64, 100, 144, 343, 576, 729, 900],
'e^3': [1, 8, 27, 64, 125, 216, 343, 512, 729, 1000]
}
expected = pd.DataFrame(exp_dict)
expected = expected[[
'a', 'c', 'e', 'a^2', 'a*c', 'a*e',
'c^2', 'c*e', 'e^2', 'a^3', 'a^2*c',
'a^2*e', 'a*c^2', 'a*c*e', 'a*e^2',
'c^3', 'c^2*e', 'c*e^2', 'e^3'
]]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_polynomial_features_interactions(self, full_data_numeric):
# test polynomial feature creation
prep = PolynomialFeatures(interaction_only=True)
result = prep.fit_transform(full_data_numeric)
exp_dict = {
'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'a*c': [2, 4, 6, 12, 16, 16, 49, 72, 72, 72],
'a*e': [2, 4, 6, 12, 20, 24, 49, 64, 72, 80],
'c*e': [1, 4, 9, 16, 20, 24, 49, 72, 81, 90],
}
expected = pd.DataFrame(exp_dict)
expected = expected[[
'a', 'c', 'e', 'a*c', 'a*e', 'c*e'
]]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_unordered_index(self, full_data_numeric):
# Test unordered index is handled properly
new_index = list(full_data_numeric.index)
shuffle(new_index)
full_data_numeric.index = new_index
# test polynomial feature creation
prep = PolynomialFeatures(degree=3)
result = prep.fit_transform(full_data_numeric)
exp_dict = {
'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'a^2': [4, 4, 4, 9, 16, 16, 49, 64, 64, 64],
'a*c': [2, 4, 6, 12, 16, 16, 49, 72, 72, 72],
'a*e': [2, 4, 6, 12, 20, 24, 49, 64, 72, 80],
'c^2': [1, 4, 9, 16, 16, 16, 49, 81, 81, 81],
'c*e': [1, 4, 9, 16, 20, 24, 49, 72, 81, 90],
'e^2': [1, 4, 9, 16, 25, 36, 49, 64, 81, 100],
'a^3': [8, 8, 8, 27, 64, 64, 343, 512, 512, 512],
'a^2*c': [4, 8, 12, 36, 64, 64, 343, 576, 576, 576],
'a^2*e': [4, 8, 12, 36, 80, 96, 343, 512, 576, 640],
'a*c^2': [2, 8, 18, 48, 64, 64, 343, 648, 648, 648],
'a*c*e': [2, 8, 18, 48, 80, 96, 343, 576, 648, 720],
'a*e^2': [2, 8, 18, 48, 100, 144, 343, 512, 648, 800],
'c^3': [1, 8, 27, 64, 64, 64, 343, 729, 729, 729],
'c^2*e': [1, 8, 27, 64, 80, 96, 343, 648, 729, 810],
'c*e^2': [1, 8, 27, 64, 100, 144, 343, 576, 729, 900],
'e^3': [1, 8, 27, 64, 125, 216, 343, 512, 729, 1000]
}
expected = pd.DataFrame(exp_dict, index=new_index)
expected = expected[[
'a', 'c', 'e', 'a^2', 'a*c', 'a*e',
'c^2', 'c*e', 'e^2', 'a^3', 'a^2*c',
'a^2*e', 'a*c^2', 'a*c*e', 'a*e^2',
'c^3', 'c^2*e', 'c*e^2', 'e^3'
]]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
@pytest.mark.usefixtures("missing_data")
class TestContinuousFeatureBinner(object):
def test_feature_binning(self, missing_data):
# test standard use
prep = ContinuousFeatureBinner(
field='a',
bins=[0, 3, 6, 9]
)
result = prep.fit_transform(missing_data)
expected = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '234', '456',
'456', '789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None, None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None],
'a_GRP': ['(0, 3]', '(0, 3]', 'Other', 'Other',
'(3, 6]', '(3, 6]', '(6, 9]',
'(6, 9]', 'Other', '(6, 9]']
}
expected = pd.DataFrame(expected, index=missing_data.index)
expected = expected[['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a_GRP']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_missing_field_error(self, missing_data):
# test that specifying a field that doesn't exist returns error
prep = ContinuousFeatureBinner(
field='x',
bins=[0, 3, 6, 9]
)
with pytest.raises(ValueError):
prep.fit_transform(missing_data)
def test_feature_binning_right(self, missing_data):
# test standard use
prep = ContinuousFeatureBinner(
field='a',
bins=[0, 4, 8],
right_inclusive=False
)
result = prep.fit_transform(missing_data)
expected = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '234', '456',
'456', '789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None, None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None],
'a_GRP': ['[0, 4)', '[0, 4)', 'Other', 'Other',
'[4, 8)', '[4, 8)', '[4, 8)', 'Other',
'Other', 'Other']
}
expected = pd.DataFrame(expected, index=missing_data.index)
expected = expected[['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a_GRP']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_feature_binning_range(self, missing_data):
# test use with range()
prep = ContinuousFeatureBinner(
field='a',
bins=range(10)
)
result = prep.fit_transform(missing_data)
expected = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '234', '456',
'456', '789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None, None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None],
'a_GRP': ['(1, 2]', '(1, 2]', 'Other', 'Other',
'(3, 4]', '(3, 4]', '(6, 7]',
'(7, 8]', 'Other', '(7, 8]']
}
expected = pd.DataFrame(expected, index=missing_data.index)
expected = expected[['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a_GRP']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_feature_binning_numpy_range(self, missing_data):
# test standard use
prep = ContinuousFeatureBinner(
field='a',
bins=np.arange(0, 10, 3)
)
result = prep.fit_transform(missing_data)
expected = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '234', '456',
'456', '789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None, None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None],
'a_GRP': ['(0, 3]', '(0, 3]', 'Other', 'Other',
'(3, 6]', '(3, 6]', '(6, 9]',
'(6, 9]', 'Other', '(6, 9]']
}
expected = pd.DataFrame(expected, index=missing_data.index)
expected = expected[['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a_GRP']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_unordered_index(self, missing_data):
# Test unordered index is handled properly
new_index = list(missing_data.index)
shuffle(new_index)
missing_data.index = new_index
# test standard use
prep = ContinuousFeatureBinner(
field='a',
bins=[0, 3, 6, 9]
)
result = prep.fit_transform(missing_data)
expected = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '234', '456',
'456', '789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None, None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None],
'a_GRP': ['(0, 3]', '(0, 3]', 'Other', 'Other',
'(3, 6]', '(3, 6]', '(6, 9]',
'(6, 9]', 'Other', '(6, 9]']
}
expected = pd.DataFrame(expected, index=missing_data.index)
expected = expected[['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'a_GRP']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
@pytest.mark.usefixtures("missing_data")
class TestTypeExtractor(object):
def test_type_extractor_numeric(self, missing_data):
# test standard use
prep = TypeExtractor('numeric')
result = prep.fit_transform(missing_data)
expected = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'e': [1, 2, None, None, None, None, None, None, None, None],
}
expected = pd.DataFrame(expected, index=missing_data.index)
expected = expected[['a', 'c', 'e']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_type_extractor_categorical(self, missing_data):
# test standard use
prep = TypeExtractor('categorical')
result = prep.fit_transform(missing_data)
expected = {
'b': ['123', '123', '123', '234', '456',
'456', '789', '789', '789', '789'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'f': ['a', 'b', None, None, None, None, None, None, None, None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None],
}
expected = pd.DataFrame(expected, index=missing_data.index)
expected = expected[['b', 'd', 'f', 'g', 'h']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_type_extractor_feature_union(self, missing_data):
# test in typical use case with FeatureUnion()
fu = make_union(
make_pipeline(
TypeExtractor('numeric'),
),
make_pipeline(
TypeExtractor('categorical'),
)
)
result = fu.fit_transform(missing_data)
exp_dict = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'e': [1, 2, None, None, None, None, None, None, None, None],
'b': ['123', '123', '123', '234', '456',
'456', '789', '789', '789', '789'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'f': ['a', 'b', None, None, None, None, None, None, None, None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None],
}
expected = pd.DataFrame(exp_dict)
expected = expected[['a', 'c', 'e', 'b', 'd', 'f', 'g', 'h']]
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_type_extractor_feature_union_none(self, missing_data):
# test in typical use case with FeatureUnion() with one dtype
# without results
missing_data = missing_data[['a', 'c', 'e']]
fu = make_union(
make_pipeline(
TypeExtractor('numeric'),
),
make_pipeline(
TypeExtractor('categorical'),
)
)
result = fu.fit_transform(missing_data)
exp_dict = {
'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'e': [1, 2, None, None, None, None, None, None, None, None],
}
expected = pd.DataFrame(exp_dict)
expected = expected[['a', 'c', 'e']]
tm.assert_frame_equal(result, expected, check_dtype=False)
@pytest.mark.usefixtures("missing_data2")
class TestGenericTransformer(object):
def test_generic_transformer(self, missing_data2):
# test simple function use
def add_bias(df):
df['bias'] = 1
return df
prep = GenericTransformer(add_bias)
result = prep.fit_transform(missing_data2)
data_dict = {
'a': [1, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'bias': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
}
expected = pd.DataFrame(data_dict)
expected = expected[['a', 'b', 'c', 'd', 'bias']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_generic_transformer_pipe_parameters(self, missing_data2):
# test with function that takes parameters in a pipeline
def add_bias(df):
df['bias'] = 1
return df
def add_columns(df, col1, col2):
df['new_col'] = df[col1] + df[col2]
return df
prep = make_pipeline(
GenericTransformer(add_bias),
GenericTransformer(
function=add_columns,
params={'col1': 'a', 'col2': 'bias'}
)
)
result = prep.fit_transform(missing_data2)
data_dict = {
'a': [1, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'bias': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'new_col': [2, 3, None, None, 5, 5, 8, 9, None, 9]
}
expected = pd.DataFrame(data_dict)
expected = expected[['a', 'b', 'c', 'd', 'bias', 'new_col']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_generic_transformer_unordered(self, missing_data2):
# Test unordered index is handled properly
new_index = list(missing_data2.index)
shuffle(new_index)
missing_data2.index = new_index
def add_bias(df):
df['bias'] = 1
return df
def add_columns(df, col1, col2):
df['new_col'] = df[col1] + df[col2]
return df
prep = make_pipeline(
GenericTransformer(add_bias),
GenericTransformer(
function=add_columns,
params={'col1': 'a', 'col2': 'bias'}
)
)
result = prep.fit_transform(missing_data2)
data_dict = {
'a': [1, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123', '123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'bias': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'new_col': [2, 3, None, None, 5, 5, 8, 9, None, 9]
}
expected = pd.DataFrame(data_dict, index=new_index)
expected = expected[['a', 'b', 'c', 'd', 'bias', 'new_col']]
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
@pytest.mark.usefixtures("full_data_numeric")
class TestMissingColumnsReplacer(object):
def test_missing_transformer(self, full_data_numeric):
# test two missing colums
cols = ['a', 'b', 'c', 'd', 'e']
prep = MissingColumnsReplacer(cols, 0)
result = prep.fit_transform(full_data_numeric)
data_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'b': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(data_dict).reindex(
['a', 'c', 'e', 'b', 'd'], axis=1)
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_missing_transformer_none_missing(self, full_data_numeric):
# test no missing colums
cols = ['a', 'c', 'e']
prep = MissingColumnsReplacer(cols, 0)
result = prep.fit_transform(full_data_numeric)
data_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
}
expected = pd.DataFrame(data_dict)
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
def test_missing_transformer_unordered(self, full_data_numeric):
# Test unordered index is handled properly
new_index = sorted(list(full_data_numeric.index), reverse=True)
full_data_numeric.index = new_index
cols = ['a', 'b', 'c', 'd', 'e']
prep = MissingColumnsReplacer(cols, 0)
result = prep.fit_transform(full_data_numeric)
data_dict = {'a': [2, 2, 2, 3, 4, 4, 7, 8, 8, 8],
'c': [1, 2, 3, 4, 4, 4, 7, 9, 9, 9],
'e': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
'b': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'd': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
}
expected = pd.DataFrame(data_dict, index=new_index).reindex(
['a', 'c', 'e', 'b', 'd'], axis=1)
tm.assert_frame_equal(
result,
expected,
check_dtype=False,
)
@pytest.mark.usefixtures("full_data_factors")
class TestSklearnPandasWrapper(object):
def test_ordinal(self, full_data_factors):
# Test with OrdinalEncoder
kwargs = {'categories':
[['c', 'b', 'a'],
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j']]}
prep = SklearnPandasWrapper(OrdinalEncoder(**kwargs))
prep.fit(full_data_factors)
new_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j', 'j']
}
new_data = pd.DataFrame(new_dict)
result = prep.transform(new_data)
exp_dict = {'c': [2, 2, 2, 1, 1, 0, 0, 2, 2, 0],
'd': [0, 1, 2, 3, 4, 5, 6, 7, 8, 8]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_missing_columns_error(self, full_data_factors):
# Test throwing an error when the new data is missing columns
kwargs = {'categories':
[['c', 'b', 'a'],
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j']]}
prep = SklearnPandasWrapper(OrdinalEncoder(**kwargs))
prep.fit(full_data_factors)
new_dict = {'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c']
}
new_data = pd.DataFrame(new_dict)
with pytest.raises(ValueError):
prep.transform(new_data)
def test_unordered_index(self, full_data_factors):
# Test unordered index is handled properly
new_index = list(full_data_factors.index)[::-1]
full_data_factors.index = new_index
kwargs = {'categories':
[['c', 'b', 'a'],
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'j']]}
prep = SklearnPandasWrapper(OrdinalEncoder(**kwargs))
prep.fit(full_data_factors)
result = prep.transform(full_data_factors)
exp_dict = {'c': [2, 2, 2, 1, 1, 0, 0, 2, 2, 0],
'd': [0, 1, 2, 3, 4, 5, 6, 7, 8, 8]
}
expected = pd.DataFrame(exp_dict, index=new_index)
tm.assert_frame_equal(result, expected, check_dtype=False)
|
import numpy as np;
import pandas as pd;
import matplotlib.pyplot as plt;
import pylab;
s = pd.Series([7,"Hesisenber",3.14,-92393929,'happy foo'], index = ['A','B','C']);
print(s); |
from django.shortcuts import render
from .models import Chef, Join
from manager.models import Restaurant
from django.http import HttpResponseRedirect
def render_orders(request, chef_id, res_id):
res = Restaurant.objects.get(id=res_id)
chef = Chef.objects.filter(restaurant=res).get(chef_id=chef_id)
joins = Join.objects.filter(chef=chef, ready=False).order_by('created_at')
return render(request, 'chef-orders.html', {'chef': chef, 'res': res_id, 'joins': joins})
def dish_complete(request, join_id):
join = Join.objects.get(id=join_id)
join.chef.accumulator -= join.dish.time_to_do
chef_id = str(join.chef.chef_id)
res_id = str(join.chef.restaurant.id)
join.chef.save()
join.ready = True
join.save()
return HttpResponseRedirect('/chef/home/' + chef_id + "/" + res_id)
|
# Generated by Django 3.2.4 on 2021-07-06 14:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pay', '0005_rename_payment_id_forms_order_id'),
]
operations = [
migrations.AddField(
model_name='forms',
name='made_on',
field=models.DateTimeField(auto_now_add=True, null=True),
),
]
|
# Generated by Django 2.2.4 on 2019-08-15 23:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0009_auto_20190815_2330'),
]
operations = [
migrations.AlterField(
model_name='pyproduct',
name='cost',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='pyproduct',
name='price',
field=models.IntegerField(),
),
]
|
import smtplib
from email.message import EmailMessage
from gsheet import authenticate
# USE THE AUTHENTICATION
client = authenticate("creds/creds.json")
# OPEN SHEET AND DO SOME MODIFICATIONS
sheet_url = "https://docs.google.com/spreadsheets/d/1DNKBYN3SQHPvjdy7eQ018t5HDaU-qDuzULcGqV5qJm8/edit#gid=408336874"
workbook = client.open_by_url(sheet_url)
selected_tab = workbook.worksheet("Sheet1")
# CREDENTIALS
creds = open("creds/aa_gmail.txt").readlines()
my_email = creds[0][:-1]
my_password = creds[1]
# RECIPIENT DETAILS
# recipient_emails = [
# 'mahmudur.rahman99@gmail.com',
# 'neelameherunnesa@gmail.com',
# 'marsdecoder@gmail.com'
# ]
starting_row = int(input("Enter the Starting Row Number: "))
finishing_row = int(input("Enter the Final Row Number: "))
collected_mails = selected_tab.get(f"H{starting_row}:H{finishing_row}")
recipient_emails = []
for mail in collected_mails:
recipient_emails.append(mail[0])
# print(recipient_emails)
# MESSAGE DETAILS
def send_email_to(e_mail, smtp):
msg = EmailMessage()
msg['From'] = "Amber Abder"
msg['To'] = e_mail
msg['Subject'] = "HEAR WHAT PAST ATTENDEES HAD TO SAY ABOUT THEIR SIGNAL EXPERIENCE."
msg_body = '''
Hi,
If you’ve never attended SIGNAL live before,
then you may not know why thousands of your peers keep it on their can’t miss list each year.
So we reached out to some past attendees to find out what they love about SIGNAL,
and we’re excited to share what we learned.
'''
msg.set_content(msg_body)
# SEND MESSAGE
smtp.send_message(msg)
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:
smtp.login(my_email, my_password)
for i in range(len(recipient_emails)):
try:
send_email_to(recipient_emails[i], smtp)
except:
row_no = starting_row + i
print(f"{row_no} missed!") |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-19 00:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('evesde', '0014_auto_20161019_0013'),
]
operations = [
migrations.AlterField(
model_name='invmarketgroup',
name='description',
field=models.CharField(max_length=3000, null=True),
),
]
|
#!/usr/bin/python3
##生成一个六位的数字
import random
dir (random)
for i in range(6):
print (random.randint(0,9),end = "")
##随机生成1-100内的偶数
print (random.randrange(0,101,2))
##生成一个六位的数字和字母,其中第一位,第三位,第六位是随机字母
for i in range(6):
if i%2 == 0:
print (chr(random.randint(65,90)),end = "")
else:
print (i,end = "")
##生成0-4位随机数字和字母的组合
for i in range(4):
ran_count = random.randint(0,3)
if i == ran_count:
print (chr(random.randint(65,90)),end = "")
else:
print (i,end = "")
for i in range(4):
rand_count = random.randint(0,3)
if i == rand_count:
print (chr(random.randint(65,90)),end = "")
else:
print (random.randint(0,9),end = "")
check_code = ''
for i in range(4):
rand_count = random.randint(0,3)
if i == rand_count:
rand_count = chr(random.randint(65,90))
check_code += str(rand_count)
print (check_code)
|
import math
import os
import random
import re
import sys
from dateutil.parser import parse
# Complete the time_delta function below.
def time_delta(t1, t2):
dt1 = parse(t1)
dt2 = parse(t2)
diff = int((dt1 - dt2).total_seconds())
if diff < 0:
diff = -diff
#print(diff)
return str(diff)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
t1 = input()
t2 = input()
# t1 = "Sun 10 May 2015 17:30:00 +0530"
# t2 = "Sun 10 May 2015 12:05:00 -0000"
delta = time_delta(t1, t2)
fptr.write(delta + '\n')
fptr.close() |
#! /usr/bin/python
# -*- coding:UTF-8 -*-
import string
import os, sys
import glob
from PIL import Image
Model_list=['pedestrian','person','car','van','bus','trunk','motor','bicycle','awning-tricycle','tricycle']
# pedestrian, person, car, van, bus, truck, motor, bicycle, awning-tricycle, and tricycle
# 1. VEDAI 图像存储位置
###src_img_dir = "/home/lxj/VisDrone/VisDrone2018-DET-train/images"
src_img_dir = "/home/wxy/mmdetection/data/VisDrone/test/images"
# 2. VEDAI 图像的 ground truth 的 txt 文件存放位置
###src_txt_dir = "/home/lxj/VisDrone/VisDrone2018-DET-train/annotations"
src_txt_dir = "/home/wxy/mmdetection/data/VisDrone/test/annotations"
src_xml_dir = "/home/wxy/mmdetection/data/VisDrone/test/xmls"
img_Lists = glob.glob(src_img_dir + '/*.jpg')
img_basenames = [] # e.g. 100.jpg
for item in img_Lists:
img_basenames.append(os.path.basename(item))
img_names = [] # e.g. 100
for item in img_basenames:
temp1, temp2 = os.path.splitext(item)
img_names.append(temp1)
for img in img_names:
im = Image.open((src_img_dir + '/' + img + '.jpg'))
print(img)
width, height = im.size
# open the crospronding txt file
gt = open(src_txt_dir + '/' + img + '.txt').read().splitlines()
#gt = open(src_txt_dir + '/gt_' + img + '.txt').read().splitlines()
# write in xml file
#os.mknod(src_xml_dir + '/' + img + '.xml')
xml_file = open((src_xml_dir + '/' + img + '.xml'), 'w')
xml_file.write('<annotation>\n'
'')
xml_file.write(' <folder>VOC2007</folder>\n')
xml_file.write(' <filename>' + str(img) + '.jpg' + '</filename>\n')
xml_file.write(' <size>\n')
xml_file.write(' <width>' + str(width) + '</width>\n')
xml_file.write(' <height>' + str(height) + '</height>\n')
xml_file.write(' <depth>3</depth>\n')
xml_file.write(' </size>\n')
xml_file.write(' <segmented>0</segmented>\n')
# write the region of image on xml file
for img_each_label in gt:
spt = img_each_label.split(',') #这里如果txt里面是以逗号‘,’隔开的,那么就改为spt = img_each_label.split(',')。
#print(spt)
if int(spt[4])==0:
continue
xml_file.write(' <object>\n')
xml_file.write(' <name>' + str(Model_list[int(spt[5])-1]) + '</name>\n')
xml_file.write(' <pose>Unspecified</pose>\n')
xml_file.write(' <truncated>' + str(spt[7]) + '</truncated>\n')
xml_file.write(' <difficult>0</difficult>\n')
xml_file.write(' <bndbox>\n')
xml_file.write(' <xmin>' + str(spt[0]) + '</xmin>\n')
xml_file.write(' <ymin>' + str(spt[1]) + '</ymin>\n')
xml_file.write(' <xmax>' + str(int(spt[0])+int(spt[2])) + '</xmax>\n')
xml_file.write(' <ymax>' + str(int(spt[1])+int(spt[3])) + '</ymax>\n')
xml_file.write(' </bndbox>\n')
xml_file.write(' </object>\n')
xml_file.write('</annotation>')
|
__all__ = ["ex2", "setting"] |
from sys import argv, exit
# print("I know that these are the words the user typed on the command line: ", argv)
from helpers import alphabet_position, rotate_character
def encrypt(text, rot):
alphaup = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
alphalow = "abcdefghijklmnopqrstuvwxyz"
new_out = ""
for letter in text:
letter_value = rotate_character(letter, rot)
if letter in alphaup == True:
new_out = new_out + letter_value
else:
new_out = new_out + letter_value
return new_out
def user_input_is_valid(cl_args):
if len(cl_args) == 1:
return False
elif cl_args[1].isnumeric() == False:
return False
else:
return True
# print(encrypt(input("Text?")))
|
from queue import Queue
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
from src.d3_network.command import Command
from src.robot.hardware.command.stm_command_definition import commands_from_stm
from src.robot.robot_controller import RobotController
class TestScenarioRobotController(TestCase):
def setUp(self):
network_ctrl = MagicMock()
network_ctrl.attach_mock(Mock(return_value=None), 'wait_message')
self.ctrl = RobotController(MagicMock(), MagicMock(), MagicMock(), MagicMock())
def test_scenario_1_easy_going(self):
self.__set_up_scenario_1()
self.ctrl.main_loop()
self.assertEqual(3, self.ctrl._stm_done_queue.qsize())
self.assertEqual(1, self.ctrl._stm_sent_queue.empty())
self.assertRaises(IndexError, self.ctrl._stm_commands_todo.pop)
def test_scenario_2_task_failure(self):
self.__set_up_scenario_1()
self.ctrl.main_loop()
self.assertEqual(3, self.ctrl._stm_done_queue.qsize())
self.assertEqual(1, self.ctrl._stm_sent_queue.empty())
self.assertRaises(IndexError, self.ctrl._stm_commands_todo.pop)
def test_scenario_3_ir_signal(self):
self.__set_up_scenario_3()
self.ctrl.main_loop()
self.assertEqual(4, self.ctrl._stm_done_queue.qsize())
self.assertTrue(self.ctrl._stm_sent_queue.empty())
self.assertRaises(IndexError, self.ctrl._stm_commands_todo.pop)
def test_scenario_4_many_moves_and_ir(self):
self.__set_up_scenario_4()
self.ctrl.main_loop()
self.assertTrue(self.ctrl._stm_sent_queue.empty())
self.assertRaises(IndexError, self.ctrl._stm_commands_todo.pop)
self.assertEqual(5, self.ctrl._stm_done_queue.qsize())
def test_scenario_5_no_ack_but_success(self):
self.__set_up_scenario_5()
self.ctrl.main_loop()
self.assertTrue(self.ctrl._stm_sent_queue.empty())
self.assertRaises(IndexError, self.ctrl._stm_commands_todo.pop)
self.assertEqual(3, self.ctrl._stm_done_queue.qsize())
def test_scenario_6_no_ack_but_failed(self):
self.__set_up_scenario_6()
self.ctrl.main_loop()
self.assertTrue(self.ctrl._stm_sent_queue.empty())
self.assertRaises(IndexError, self.ctrl._stm_commands_todo.pop)
self.assertEqual(3, self.ctrl._stm_done_queue.qsize())
def __set_up_scenario_1(self):
self.ctrl._network_request_queue = Queue()
self.ctrl._network_request_queue.put({'command': 'move-forward', 'amplitude': 13.0})
self.ctrl._network_request_queue.put({'command': 'move-right', 'amplitude': 10.0})
self.ctrl._network_request_queue.put({'command': 'end-signal'})
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
def __set_up_scenario_2(self):
self.ctrl._network_request_queue = Queue()
self.ctrl._network_request_queue.put({'command': 'move-forward', 'amplitude': 13.0})
self.ctrl._network_request_queue.put({'command': 'move-right', 'amplitude': 10.0})
self.ctrl._network_request_queue.put({'command': 'end-signal'})
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.UNSUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
def __set_up_scenario_3(self):
self.ctrl._network_request_queue = Queue()
self.ctrl._network_request_queue.put({'command': Command.MOVE_FORWARD, 'amplitude': 13.0})
self.ctrl._network_request_queue.put({'command': Command.MOVE_RIGHT, 'amplitude': 10.0})
self.ctrl._network_request_queue.put({'command': Command.INFRARED_SIGNAL})
self.ctrl._network_request_queue.put({'command': 'end-signal'})
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(commands_from_stm.Feedback(bytearray(b'\xb0\x75\x12\xc9')))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
def __set_up_scenario_4(self):
self.ctrl._network_request_queue = Queue()
self.ctrl._network_request_queue.put({'command': Command.ACTION, 'actions': [
{'command': Command.MOVE_BACKWARD, 'amplitude': 18},
{'command': Command.MOVE_FORWARD, 'amplitude': 90},
{'command': Command.MOVE_LEFT, 'amplitude': 30}]})
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl.execute()
self.ctrl._network_request_queue.put({'command': Command.INFRARED_SIGNAL})
self.ctrl._network_request_queue.put({'command': 'end-signal'})
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(commands_from_stm.Feedback(bytearray(b'\xb0\x75\x12\xc9')))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
def __set_up_scenario_5(self):
self.ctrl._network_request_queue = Queue()
self.ctrl._network_request_queue.put({'command': Command.ACTION, 'actions': [
{'command': Command.MOVE_BACKWARD, 'amplitude': 18},
{'command': Command.MOVE_BACKWARD, 'amplitude': 18}]})
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl.execute()
self.ctrl._network_request_queue.put({'command': 'end-signal'})
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
def __set_up_scenario_6(self):
self.ctrl._network_request_queue = Queue()
self.ctrl._network_request_queue.put({'command': Command.ACTION, 'actions': [
{'command': Command.MOVE_BACKWARD, 'amplitude': 18},
{'command': Command.MOVE_BACKWARD, 'amplitude': 18}]})
self.ctrl._network_request_queue.put({'command': 'end-signal'})
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.UNSUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.TASK_RECEIVED_ACK.value))
self.ctrl._stm_responses_deque.append(
commands_from_stm.Feedback(commands_from_stm.Message.SUCCESSFULL_TASK.value))
|
#coding:utf-8
from django.db import models
from django.db.models import signals
from django.contrib.auth.models import User
from django.core.mail import send_mail
class Document(models.Model):
[...]
class Comment(models.Model):
[...]
def notify_admin(sender, instance, created, **kwargs):
'''Notify the administrator that a new user has been added.'''
if created:
subject = 'New user created'
message = 'User %s was added' % instance.username
from_addr = 'no-reply@example.com'
recipient_list = ('admin@example.com',)
send_mail(subject, message, from_addr, recipient_list)
signals.post_save.connect(notify_admin, sender=User)
# post_save signal 由 Django 提供,每次保存或创建模型时都会激活。connect() 方法带有两个参数:一个回调参数(notify_admin)和 sender 参数,后者指定该回调只关注 User 模型的保存事件。
#contrib.auth.siganls.py
定义了三个signal
from django.dispatch import Signal
user_logged_in = Signal(providing_args=['request', 'user']) #登入
user_login_failed = Signal(providing_args=['credentials']) #错误
user_logged_out = Signal(providing_args=['request', 'user']) #登出
def notify(sender, request,user, **kwargs):
signals.user_logged_in.connect(notify)
|
import u12
d1 = u12.U12(serialNumber=100054654)
d2 = u12.U12(serialNumber=100035035)
print('d1')
for i in range(8):
print(d1.eAnalogIn(i))
print('d2')
for i in range(8):
print(d2.eAnalogIn(i))
|
# Python 3.x
# encoding="utf-8"
# terminal based script
# Search Archive & Folder with same name in given path
# for windows OS better run with admin rights to write results (cmd)
# or simply run archfs.py file in folder (open file)
# Author: Farid Kemyakov aka Rev3n4nt
# CoAuthor: Aziz Kemyakov aka DoomStal
import sys
import re
from os import stat, listdir, path
# Add here path where to skip search:
skip = [
r'C:/windows',
r'C:/Program Files',
r'C:/Program Files (x86)',
]
# add here archive extentions:
extensions = [
'.zip',
'.7z',
'.rar',
'.tar.gz', # complicated must be higher than normals
'.tar',
'.gz',
]
# Result will be written here:
output = open('result.txt', 'w')
skip = [ re.compile(raw, re.IGNORECASE) for raw in skip ]
archives = dict()
dirs_all = dict()
_print_r = 0
def print_r(str):
global _print_r
sys.stdout.write('\b'*_print_r)
sys.stdout.write(' '*_print_r)
sys.stdout.write('\r')
sys.stdout.write(str)
_print_r = len(str)
def walk(cwd, depth=0):
#if depth > 2:
# return
try:
all = [cwd+'/'+f for f in listdir(cwd)]
except WindowsError:
print('no access to '+cwd+'\n')
return
name = path.splitext( path.basename(cwd.lower()) )[0]
if name not in dirs_all:
dirs_all[name] = []
dirs_all[name].append(cwd)
print_r(cwd)
files = [f for f in all if path.isfile(f)]
for f in files:
ext = next( (x for x in extensions if f.endswith(x)), None)
if( ext ):
name = path.basename(f.lower())[:-len(ext)]
if name not in archives:
archives[name] = []
archives[name].append(f)
print_r('archive found '+name+'\n')
print_r(' '+f+'\n')
dirs = [f for f in all if path.isdir(f)]
for d in dirs:
if( any( reg.match(d) for reg in skip ) ):
print_r('skipped '+d+'\n')
continue
walk(d, depth+1)
# Add here path to search:
walk('D:/Games')
# walk('D:/DEV')
print_r('searching matches\n')
for d in sorted(dirs_all):
if d in archives:
print('\n***** archives ********')
output.write('\n***** archives ********\n')
for f in archives[d]:
st = stat(f)
print(str(st.st_size)+''+f)
output.write(str(st.st_size/1048576)+' mb\n '+f+'\n')
print('\n***** directories *****')
output.write('\n***** directories *****\n')
for f in dirs_all[d]:
print(f)
output.write(f+'\n') |
Python2.7 安装以及安装pip
一、准备工作
1.下载Python源码包
$ wget http://python.org/ftp/python/2.7.3/Python-2.7.3.tar.bz2 --no-check-certificate
2.查看是否安装make工具
$ rpm -qa|grep make
automake-1.11.1-4.el6.noarch
make-3.81-20.el6.x86_64
### 如果没有安装make工具
yum -y install gcc automake autoconf libtool make
3.查看是否安装zlib库
$ rpm -qa|grep zlib
zlib-devel-1.2.3-29.el6.x86_64
zlib-1.2.3-29.el6.x86_64
### 如果没有安装安装zlib
yum install zlib-devel
4.检查是否安装ssl 库
$ rpm -qa|grep openssl
openssl-devel-1.0.1e-16.el6_5.x86_64
openssl-static-1.0.1e-16.el6_5.x86_64
openssl098e-0.9.8e-17.el6.centos.2.x86_64
openssl-1.0.1e-16.el6_5.x86_64
openssl-perl-1.0.1e-16.el6_5.x86_64
安装openssl
yum install openssl*
5.安装bzip2依赖库
yum install -y bzip2*
二、编译安装
$ cp Python-2.7.3.tar.bz2 /usr/src/
$ tar -jxvf Python-2.7.3.tar.bz2
$ vi Modules/Setup.dist
找到一下内容把注释去掉
#SSL=/usr/local/ssl
#_ssl _ssl.c \
# -DUSE_SSL -I$(SSL)/include -I$(SSL)/include/openssl \
# -L$(SSL)/lib -lssl -lcrypto
......
#zlib zlibmodule.c -I$(prefix)/include -L$(exec_prefix)/lib -lz
python安装了2.7之后终端无法使用退格,上下左右
yum install readline-devel
然后重新编译安装python,终端控制符可用!
$ ./configure --prefix=/usr/local/python2.7
$ make all
$ make install
$ make clean
$ make distclean
建立python2.7 软链
$ mv /usr/bin/python /usr/bin/python.bak
$ ln -s /usr/local/python2.7/bin/python2.7 /usr/bin/python2.7
$ ln -s /usr/bin/python2.7 /usr/bin/python
解决yum无法使用的问题
$ yum update
因为centos6.5 下yum默认使用的是python2.6
vim /usr/bin/yum
#!/usr/bin/python
修改为
#!/usr/bin/python2.6
三、安装python-pip工具
$ wget https://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11-py2.7.egg --no-check-certificate
$ chmod +x setuptools-0.6c11-py2.7.egg
$ sh setuptools-0.6c11-py2.7.egg
安装pip
$ wget https://pypi.python.org/packages/source/p/pip/pip-1.3.1.tar.gz --no-check-certificate
$ cp pip-1.3.1.tar.gz /usr/src/
$ tar zxvf pip-1.3.1.tar.gz
$ cd pip-1.3.1
$ python setup.py install
$ ln -s /usr/local/python2.7/bin/pip /usr/bin/pip
|
from enum import IntEnum
from struct import pack, unpack
from attr import attr, attributes
from rv.errors import ModuleOwnershipError, PatternOwnershipError
from rv.lib.validators import in_range
from rv.modules.module import Module
class NOTE(IntEnum):
"""All notes available for patterns or mapping."""
(
C0,
c0,
D0,
d0,
E0,
F0,
f0,
G0,
g0,
A0,
a0,
B0,
C1,
c1,
D1,
d1,
E1,
F1,
f1,
G1,
g1,
A1,
a1,
B1,
C2,
c2,
D2,
d2,
E2,
F2,
f2,
G2,
g2,
A2,
a2,
B2,
C3,
c3,
D3,
d3,
E3,
F3,
f3,
G3,
g3,
A3,
a3,
B3,
C4,
c4,
D4,
d4,
E4,
F4,
f4,
G4,
g4,
A4,
a4,
B4,
C5,
c5,
D5,
d5,
E5,
F5,
f5,
G5,
g5,
A5,
a5,
B5,
C6,
c6,
D6,
d6,
E6,
F6,
f6,
G6,
g6,
A6,
a6,
B6,
C7,
c7,
D7,
d7,
E7,
F7,
f7,
G7,
g7,
A7,
a7,
B7,
C8,
c8,
D8,
d8,
E8,
F8,
f8,
G8,
g8,
A8,
a8,
B8,
C9,
c9,
D9,
d9,
E9,
F9,
f9,
G9,
g9,
A9,
a9,
B9,
) = range(1, 121)
ALL_NOTES = set(NOTE)
class NOTECMD(IntEnum):
"""All notes and commands available for patterns."""
(
C0,
c0,
D0,
d0,
E0,
F0,
f0,
G0,
g0,
A0,
a0,
B0,
C1,
c1,
D1,
d1,
E1,
F1,
f1,
G1,
g1,
A1,
a1,
B1,
C2,
c2,
D2,
d2,
E2,
F2,
f2,
G2,
g2,
A2,
a2,
B2,
C3,
c3,
D3,
d3,
E3,
F3,
f3,
G3,
g3,
A3,
a3,
B3,
C4,
c4,
D4,
d4,
E4,
F4,
f4,
G4,
g4,
A4,
a4,
B4,
C5,
c5,
D5,
d5,
E5,
F5,
f5,
G5,
g5,
A5,
a5,
B5,
C6,
c6,
D6,
d6,
E6,
F6,
f6,
G6,
g6,
A6,
a6,
B6,
C7,
c7,
D7,
d7,
E7,
F7,
f7,
G7,
g7,
A7,
a7,
B7,
C8,
c8,
D8,
d8,
E8,
F8,
f8,
G8,
g8,
A8,
a8,
B8,
C9,
c9,
D9,
d9,
E9,
F9,
f9,
G9,
g9,
A9,
a9,
B9,
) = range(1, 121)
EMPTY = 0
NOTE_OFF = 128
ALL_NOTES_OFF = 129 # notes of all synths off
CLEAN_SYNTHS = 130 # stop and clean all synths
STOP = 131
PLAY = 132
SET_PITCH = 133
PREV_TRACK = 134
class PatternEffect(IntEnum):
"""Effects available for the EE effect column."""
SLIDE_UP = 0x01
SLIDE_DOWN = 0x02
SLIDE_TO_NOTE = 0x03
VIBRATO = 0x04
ARPEGGIO = 0x08
SET_SAMPLE_OFFSET = 0x09
SET_SAMPLE_OFFSET_BY_PERCENTAGE = 0x07
SLIDE_VELOCITY_UP_DOWN = 0x0A
SET_PLAYING_SPEED = 0x0F
FINESLIDE_UP = 0x11
FINESLIDE_DOWN = 0x12
SET_BYPASS_SOLO_MUTE_FLAGS = 0x13
RESET_BYPASS_SOLO_MUTE_FLAGS = 0x14
CHANGE_RELATIVE_NOTE_XX_AND_FINETUNE_YY = 0x15
RETRIGGER = 0x19
CUT = 0x1C
DELAY = 0x1D
SET_BPM_TO_XXYY = 0x1F
NOTE_PROBABILITY = 0x20
NOTE_PROBABILITY_WITH_RANDOM_VELOCITY = 0x21
WRITE_RANDOM_VALUE_0_XXYY_TO_CONTROLLER = 0x22
WRITE_RANDOM_VALUE_XX_YY_TO_CONTROLLER = 0x23
NOTE_FROM_LINE_XXYY = 0x24
RANDOM_NOTE_FROM_LINE_XX_YY = 0x25
NOTE_FROM_TRACK_XXYY = 0x26
RANDOM_NOTE_FROM_TRACK_XX_YY = 0x27
NOTE_FROM_LINE_XXYY_ON_TRACK_0 = 0x28
RANDOM_NOTE_FROM_LINE_XX_YY_ON_TRACK_0 = 0x29
STOP_PLAYING = 0x30
JUMP_TO_LINE_XXYY = 0x31
SET_JUMP_ADDRESS_MODE = 0x32
DELETE_EVENT_ON_TRACK_XX_WITH_PROBABILITY_YY = 0x38
CYCLIC_SHIFT_TRACK_DOWN_BY_YY_LINES = 0x39
GENERATE_NEW_ITERATION_OF_YY_LINE_POLYRHYTHM_ON_TRACK_XX = 0x3A
COPY_TRACK_XX_TO_PATTERN_NAMED_YY = 0x3B
DELAY_EVENT_FOR_0x00_PCT_OF_LINE = 0x40
DELAY_EVENT_FOR_0x01_PCT_OF_LINE = 0x41
DELAY_EVENT_FOR_0x02_PCT_OF_LINE = 0x42
DELAY_EVENT_FOR_0x03_PCT_OF_LINE = 0x43
DELAY_EVENT_FOR_0x04_PCT_OF_LINE = 0x44
DELAY_EVENT_FOR_0x05_PCT_OF_LINE = 0x45
DELAY_EVENT_FOR_0x06_PCT_OF_LINE = 0x46
DELAY_EVENT_FOR_0x07_PCT_OF_LINE = 0x47
DELAY_EVENT_FOR_0x08_PCT_OF_LINE = 0x48
DELAY_EVENT_FOR_0x09_PCT_OF_LINE = 0x49
DELAY_EVENT_FOR_0x0A_PCT_OF_LINE = 0x4A
DELAY_EVENT_FOR_0x0B_PCT_OF_LINE = 0x4B
DELAY_EVENT_FOR_0x0C_PCT_OF_LINE = 0x4C
DELAY_EVENT_FOR_0x0D_PCT_OF_LINE = 0x4D
DELAY_EVENT_FOR_0x0E_PCT_OF_LINE = 0x4E
DELAY_EVENT_FOR_0x0F_PCT_OF_LINE = 0x4F
DELAY_EVENT_FOR_0x10_PCT_OF_LINE = 0x50
DELAY_EVENT_FOR_0x11_PCT_OF_LINE = 0x51
DELAY_EVENT_FOR_0x12_PCT_OF_LINE = 0x52
DELAY_EVENT_FOR_0x13_PCT_OF_LINE = 0x53
DELAY_EVENT_FOR_0x14_PCT_OF_LINE = 0x54
DELAY_EVENT_FOR_0x15_PCT_OF_LINE = 0x55
DELAY_EVENT_FOR_0x16_PCT_OF_LINE = 0x56
DELAY_EVENT_FOR_0x17_PCT_OF_LINE = 0x57
DELAY_EVENT_FOR_0x18_PCT_OF_LINE = 0x58
DELAY_EVENT_FOR_0x19_PCT_OF_LINE = 0x59
DELAY_EVENT_FOR_0x1A_PCT_OF_LINE = 0x5A
DELAY_EVENT_FOR_0x1B_PCT_OF_LINE = 0x5B
DELAY_EVENT_FOR_0x1C_PCT_OF_LINE = 0x5C
DELAY_EVENT_FOR_0x1D_PCT_OF_LINE = 0x5D
DELAY_EVENT_FOR_0x1E_PCT_OF_LINE = 0x5E
DELAY_EVENT_FOR_0x1F_PCT_OF_LINE = 0x5F
@attributes(slots=True)
class Note:
"""A single note, for use within a :py:class:`Pattern`."""
note = attr(converter=NOTECMD, default=NOTECMD.EMPTY)
vel = attr(converter=int, validator=in_range(0, 129), default=0)
module = attr(converter=int, validator=in_range(0, 0xFFFF), default=0)
ctl = attr(converter=int, validator=in_range(0, 0xFFFF), default=0)
val = attr(converter=int, validator=in_range(0, 0xFFFF), default=0)
pattern = attr(default=None)
def __str__(self):
tokens = [
"%s%i" % (attr[0], getattr(self, attr))
for attr in ["note", "vel", "ctl", "val"]
if hasattr(self, attr)
]
return "".join(tokens)
@property
def project(self):
return self.pattern.project
@property
def module_index(self):
return None if self.module == 0 else self.module - 1
@property
def mod(self):
if self.project is None:
raise PatternOwnershipError("Pattern not owned by a project")
if self.module_index is None:
return None
elif self.module_index < len(self.project.modules):
return self.project.modules[self.module_index]
@mod.setter
def mod(self, new_mod: Module):
if new_mod.parent is None:
raise ModuleOwnershipError("Module must be attached to a project")
self.module = new_mod.index + 1
@property
def controller(self):
return self.ctl >> 8
@controller.setter
def controller(self, value):
self.ctl |= (value & 0xFF) << 8
@property
def effect(self):
return self.ctl & 0xFF
@effect.setter
def effect(self, value):
self.ctl |= value & 0xFF
@property
def val_xx(self):
return self.val >> 8
@val_xx.setter
def val_xx(self, value):
self.val |= (value & 0xFF) << 8
@property
def val_yy(self):
return self.val & 0xFF
@val_yy.setter
def val_yy(self, value):
self.val |= value & 0xFF
@property
def raw_data(self):
return pack("<BBHHH", self.note, self.vel, self.module, self.ctl, self.val)
@raw_data.setter
def raw_data(self, raw_data):
self.note, self.vel, self.module, self.ctl, self.val = unpack(
"<BBHHH", raw_data
)
def clone(self):
note = self.__class__()
for name in ["note", "vel", "module", "ctl", "val"]:
setattr(note, name, getattr(self, name))
return note
def is_empty(self):
return not (self.note or self.vel or self.ctl or self.val)
def tabular_repr(self, is_on=False, note_fmt="NN VV MMMM CC EE XXYY"):
if self.note == NOTECMD.NOTE_OFF:
nn = "=="
elif self.note == NOTECMD.PREV_TRACK:
nn = "<<"
elif self.note == NOTECMD.SET_PITCH:
nn = "SP"
elif self.note == NOTECMD.EMPTY:
nn = "//" if is_on else ".."
else:
nn = NOTECMD(self.note).name
vv = " " if self.vel == 0 else "{:02X}".format(self.vel - 1)
mmmm = " " if self.module == 0 else "{:04X}".format(self.module - 1)
if self.controller or self.effect:
cc = "{:02X}".format(self.controller)
ee = "{:02X}".format(self.effect)
else:
cc = " "
ee = " "
if self.val_xx or self.val_yy:
xx = "{:02X}".format(self.val_xx)
yy = "{:02X}".format(self.val_yy)
else:
xx = " "
yy = " "
return (
note_fmt.replace("NN", nn)
.replace("VV", vv)
.replace("MMMM", mmmm)
.replace("CC", cc)
.replace("EE", ee)
.replace("XX", xx)
.replace("YY", yy)
)
|
from numpy import pi
def deg2rad(d):
return d / 180 * pi
def rad2deg(r):
return r / pi * 180 |
"""Simulation of queuing models with simulus."""
from __future__ import absolute_import
import sys
if sys.version_info[:2] < (2, 8):
raise ImportError("QModels requires Python 2.8 and above (%d.%d detected)." %
sys.version_info[:2])
del sys
from .rng import *
from .mm1 import *
__version__ = '1.0.1'
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Author: shoumuzyq@gmail.com
# https://shoumu.github.io
# Created on 16/2/17 23:49
def increasing_triplet(nums):
small = 9999999999
big = 9999999999
for num in nums:
if num <= small:
small = num
elif num <= big:
big = num
else:
return True
return False
print(increasing_triplet([1, 0, 0, 0, 0, 1000]))
|
""" Prometheus Client Metrics request handler """
from prometheus_client import multiprocess
from prometheus_client import generate_latest, CollectorRegistry, CONTENT_TYPE_LATEST
from .collect import collect_metrics
from ..web import base
class MetricsHandler(base.RequestHandler):
# NOTE: Unauthenticated due to internal exposure only
def get(self):
""" Generate the latest metrics for the Prometheus Scraper """
def metrics_handler(_, start_response):
# Run metrics collection
collect_metrics()
# Fulfill the request
registry = CollectorRegistry()
multiprocess.MultiProcessCollector(registry)
data = generate_latest(registry)
write = start_response('200 OK', [
('Content-Type', CONTENT_TYPE_LATEST),
('Content-Length', str(len(data)))
])
write(data)
return []
return metrics_handler
|
#!/usr/bin/env python
# coding: utf-8
# In[18]:
import pandas as pd
import numpy as np
import tushare as ts
from time import sleep
import os
pro = ts.pro_api('7d1f3465439683e262b5b06a8aaefa886ea48aafe2cda73c130beb97')
#df = pro.trade_cal(exchange='', start_date='20180101', end_date='20181231')
#data = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
#df = pro.daily(ts_code='000001.SZ,600000.SH', start_date='20180701', end_date='20180718')
#df = pro.daily(trade_date='20180810')
"""
2020年8月后创业板开通20%涨幅,算法要改,目前是20%算作2个板来计算最高板,卖法未完善,待改进
"""
#不显示科学计数法
np.set_printoptions(suppress=True)
pd.set_option('display.float_format', lambda x: '%.2f' % x)
trade_day = pro.trade_cal(exchange='', start_date='20150101', end_date='20201231')
trade_day = trade_day.loc[trade_day['is_open']==1,:]
trade_days = list(trade_day['cal_date'])
def get_limit(day):
df = pro.daily(trade_date=day)
df = df.loc[df['pct_chg']>9.9,:]
return df
# In[19]:
def add_limit_type(series):
df = pro.daily(ts_code=series['ts_code'], end_date=series['trade_date']).head(15)
#print(df)
#设定为3 最佳
if df['amount'][0]>df['amount'][1]*3:
return "分歧板"
elif df['amount'][0]*3<df['amount'][1]:
return "加速板"
else:
return "一般板"
|
from django.conf.urls import patterns
from django.conf.urls import include
from django.conf.urls import url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'jimovpn.views.home', name='home'),
# url(r'^jimovpn/', include('jimovpn.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^user/', include('vpn_user.urls')),
url(r'^$', 'vpn_user.views.index')
)
|
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture |
from random import randint
import datetime
x = []
counter = 0;
for num in range(0,100):
randomNumber = randint(0,10000)
x.append(randomNumber)
for j in range(0,len(x)):
minimum = x[j]
indexofmin = j
for i in range(j+1,len(x)):
if x[i] < minimum:
minimum = x[i]
indexofmin = i
temp = x[j]
x[j] = minimum
x[indexofmin] = temp
print x |
#imports
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
#locators and links
LOC_ADD_TO_BASKET_BUTTON = (By.CSS_SELECTOR, '[class*="btn-add-to-basket"]')
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/"
#test
def test_existance_of_basket_button(browser):
browser.get(link)
#to check workability of language changing
#by default this line of code is commented so you can check button search first
#time.sleep(30)
#get list of buttons with the locator
basket_button = WebDriverWait(browser, 5).until(
EC.presence_of_element_located(LOC_ADD_TO_BASKET_BUTTON))
#checks whether there is a basket button on a page
assert basket_button is not None |
from memnet1 import MemNet
def main():
print("===> Building model")
model = MemNet(1, 64, 6, 6)
print(model)
if __name__ == "__main__":
main()
|
# Generated by Django 3.2.7 on 2021-10-05 02:15
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('reclamacoes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='solicitacoes',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('data_solicitada', models.DateTimeField(blank=True, default=datetime.datetime(2021, 10, 4, 23, 15, 29, 617247), null=True)),
('status_concluido', models.BooleanField(blank=True, default=False, null=True)),
('reclamacoes', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='Reclamacoes', to='reclamacoes.reclamacoes')),
],
options={
'db_table': 'solicitacoes',
},
),
]
|
# -*- coding: utf-8 -*-
import nysol._nysolshell_core as n_core
from nysol.mcmd.nysollib.core import NysolMOD_CORE
from nysol.mcmd.nysollib import nysolutil as nutil
class Nysol_Mcal(NysolMOD_CORE):
_kwd ,_inkwd ,_outkwd = n_core.getparalist("mcal",3)
def __init__(self,*args, **kw_args) :
super(Nysol_Mcal,self).__init__("mcal",nutil.args2dict(args,kw_args,Nysol_Mcal._kwd))
def mcal(self,*args, **kw_args):
return Nysol_Mcal(nutil.args2dict(args,kw_args,Nysol_Mcal._kwd)).addPre(self)
setattr(NysolMOD_CORE, "mcal", mcal)
|
import numpy as np
import matplotlib.pyplot as plt
import math
import itertools
from fnc import *
def f(x):
return (2 ** x) * ((x-1) ** 2) - 2
def f_d1(x):
return (2 ** x) * (-1 + x) * (2 - np.log(2) + x * np.log(2))
def f_d2(x):
return (2 ** x) * (2 - 4 * np.log(2) - 2 * x * (-2 + np.log(2)) * np.log(2) + (np.log(2) ** 2) + (x ** 2) * (np.log(2)**2))
a = 5
F = [f, f_d1, f_d2]
print(
' E(H_p)\t| E_der(H_p)\t| E_der2(H_p)\t| E(H_Ch)\t| E_der(H_Ch)\t| E_der2(H_Ch) |'
)
print('-' * 97)
x = np.linspace(-a, a, num=200)
X = np.linspace(-a, a, num=3)
print(float("{0:.10f}".format(E_Hermite(x, X, F))), '\t| ', end='')
print(float("{0:.10f}".format(E_Hermite_derivative(x, X, F))), '\t| ', end='')
print(float("{0:.10f}".format(E_Hermite_derivative2(x, X, F))), '\t| ', end='')
X_Ch = cheb(3, a)
print(float("{0:.10f}".format(E_Hermite(x, X_Ch, F))), '\t| ', end='')
print(float("{0:.10f}".format(E_Hermite_derivative(x, X_Ch, F))), '\t| ', end='')
print(float("{0:.10f}".format(E_Hermite_derivative2(x, X_Ch, F))), '\t|')
X = np.linspace(-a, a, num=3)
x = np.linspace(-a, a, num=200)
X_Ch = cheb(3, a)
plt.plot(x, f(x), 'r', label='f(x)')
plt.plot(x, Hermite(x, X, F), 'b', label='H_p(x)')
plt.plot(x, Hermite(x, X_Ch, F), 'g', label='H_Ch(x)')
plt.legend()
plt.show()
# 2nd der
print(' E_der(H_p)\t| E_der(L_p)\t| E_der(H_Ch)\t| E_der(L_Ch)\t|')
print('-' * 65)
N = 3
X = np.linspace(-a, a, num=N)
print(float("{0:.9f}".format(E_Hermite_derivative2(x, X, F))),
'\t| ',
end='')
print(float("{0:.9f}".format(E_L_derivative2(x, X, F))), '\t| ', end='')
X_Ch = cheb(N, a)
print(float("{0:.9f}".format(E_Hermite_derivative2(x, X_Ch, F))),
'\t| ',
end='')
print(float("{0:.9f}".format(E_L_derivative2(x, np.array(X_Ch), F))), '\t|')
print(
'deg\t| E_der2(H_p)\t| E_der2(L_p)\t| E_der2(H_Ch)\t| E_der2(L_Ch)\t|')
print('-' * 73)
N = 3
print(N * 2 - 1, '\t|', end=' ')
X = np.linspace(-a, a, num=N)
print(float("{0:.10f}".format(E_Hermite_derivative2(x, X, F))), '\t| ', end='')
X = np.linspace(-a, a, num=N * 2 - 1)
print(float("{0:.10f}".format(E_L_derivative2(x, X, F))), '\t| ', end='')
X_Ch = cheb(N, a)
print(float("{0:.10f}".format(E_Hermite_derivative2(x, X_Ch, F))),
'\t| ',
end='')
X_Ch = cheb(N * 2 - 1, a)
print(float("{0:.10f}".format(E_L_derivative2(x, np.array(X_Ch), F))), '\t|') |
"""
Task 2
Input data:
Create a function which takes as input two dicts with structure mentioned above, then computes and returns the total price of stock.
"""
stock = {
"banana": 6,
"apple": 0,
"orange": 32,
"pear": 15,
}
prices = {
"banana": 4,
"apple": 2,
"orange": 1.5,
"pear": 3
}
res = 0
for key, value in stock.items():
res += prices[key] * value
print(res)
|
# Generated by Django 2.2.4 on 2019-08-10 10:49
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chandler', '0002_auto_20190810_0929'),
]
operations = [
migrations.AlterField(
model_name='cart_item',
name='product',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='chandler.Product'),
preserve_default=False,
),
]
|
subject = ['audio&speech', 'nlp', 'vision', 'recommend']
all_subject_name = ''
# ----------------------------------------
# TODO : Concatenate all subject name.
# >>> audio&speech nlp vision recommend
for i in subject:
all_subject_name+= i+' '
print(all_subject_name)
# ----------------------------------------
if all_subject_name == 'audio&speech nlp vision recommend ':
print('[+] Stage1 Success!\n')
else:
print('[-] Stage1 Fail...\n')
grade = [4.5, 3.5, 4.0, 4.0]
avg = 0
# ----------------------------------------
# TODO : Compute average of grade. Use len() function.
for i in grade:
avg += i
avg /= len(grade)
print(avg)
# ----------------------------------------
if avg == 4:
print('[+] Stage2 Success!\n')
else:
print('[-] Stage2 Fail...\n')
grade = [4.5, 3.5, 4.0, 4.0]
# ----------------------------------------
# TODO : Change all grade to 4.5. Use range(), len() function.
for i in range(len(grade)):
grade[i] = 4.5
print(grade)
# ----------------------------------------
if grade == [4.5, 4.5, 4.5, 4.5]:
print('[+] Stage3 Success!\n')
else:
print('[-] Stage3 Fail...\n')
subject = ['audio&speech', 'nlp', 'vision', 'recommend']
grade = [4.5, 3.5, 4.0, 4.0]
# ----------------------------------------
# TODO : Print like this. Use zip() function.
# >>> audio&speech : 4.5/nlp : 3.5/vision : 4.0/recommend : 4.0/
string = ''
for i, j in zip(subject, grade):
string+= i+' : '+str(j) + '/'
print(string)
# ----------------------------------------
if string == 'audio&speech : 4.5/nlp : 3.5/vision : 4.0/recommend : 4.0/':
print('[+] Stage4 Success!\n')
else:
print('[-] Stage4 Fail...\n')
grade = {'audio&speech': 4.5, 'nlp': 3.5, 'vision': 4.0, 'recommend': 4.0}
avg = 0
# ----------------------------------------
# TODO : Compute average of grade. Use values(), len() function.
for i in grade.values():
avg += i
avg /= len(grade)
print(avg)
# ----------------------------------------
if avg == 4:
print('[+] Stage5 Success!\n')
else:
print('[-] Stage5 Fail...\n')
# all_grade is grade of two students.
all_grade = [{'audio&speech': 4.5, 'nlp': 3.5, 'vision': 4.0, 'recommend': 4.0}, {'audio&speech': 2.5, 'nlp': 3.0, 'vision': 4.5, 'recommend': 4.5}]
avg_grade = {'audio&speech': 0, 'nlp': 0, 'vision': 0, 'recommend': 0}
# ----------------------------------------
# TODO : Compute average of each subject.
# {'audio&speech':3.5, 'nlp':3.25, 'vision':4.25, 'recommend':4.25}
for student in all_grade:
for grade in student.items():
avg_grade[grade[0]] += grade[1]/len(all_grade)
print(avg_grade)
# ----------------------------------------
if str(avg_grade) == '{\'audio&speech\': 3.5, \'nlp\': 3.25, \'vision\': 4.25, \'recommend\': 4.25}':
print('[+] Stage6 Success!\n')
else:
print('[-] Stage6 Fail...\n') |
# Generated by Django 3.1.4 on 2021-01-12 04:28
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0003_auto_20210108_1213'),
('order', '0002_checkout_zipcode'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_status', models.CharField(choices=[('PENDING', 'Pending'), ('ONWAY', 'On the Way'), ('DONE', 'Done')], max_length=50)),
('checkout', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='order.checkout')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_qty', models.PositiveIntegerField()),
('price', models.PositiveIntegerField()),
('order_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='order.order')),
('product_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.product')),
],
),
]
|
def SubArray(arr,s,n):
for i in range(n):
curr_sum=arr[i]
for j in range(i+1,n):
if(curr_sum==s):
print(i,j-1) # Bcz if condition is before the summing operation
return 1
curr_sum = curr_sum+arr[j] # summing operation
print('no SubArray found:')
t=int(input('number of test cases:::'))
for k in range(t):
n,s=input("num of elements,sum::").split()
n=int(n)
s=int(s)
arr=[int(i) for i in input().split()]
SubArray(arr,s,n)
'''
suppose the sum is 33 and the array is: [1 2 10 3 20 5]
so when i pointing to 10
then j starts from 3 upto 5
when curr_sum=10+3
i pointing to 10 and j is pointing 3
next time again i pointing 10 and j pointing 20
when next time itterates j is pointing 5 bcz for loop is incrementing j value
and then if conditon satisfies..... then return
so when we have to print the sub array's index j must be decremented by 1
bcz j is pointing at 5 But the correct index is 2-4
so, i will remain same and j must be j=j-1
so, print(i,j-1)
''' |
import os
import pwd
import StringIO
import pandas as pd
from flask import Flask, render_template, make_response
import psycopg2
import psycopg2.extras
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.dates import DateFormatter
from matplotlib.figure import Figure
from sqlalchemy import create_engine
app = Flask(__name__)
def get_conn():
# The raspberry pi has been set up to allow peer authentication locally, and we've created a database
# and a role with the same name as the linux user we're running this script as. Therefore we can use an
# empty connection string.
# See for details: http://initd.org/psycopg/docs/module.html#psycopg2.connect
return psycopg2.connect('')
@app.route('/')
def index():
with get_conn() as conn:
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
destination_overview_query = """
SELECT
destination,
min(pingtime),
round(avg(pingtime), 2) AS avg,
max(pingtime)
FROM
pings
WHERE
recorded_at > now() - INTERVAL '1 hour'
GROUP BY
destination;
"""
cur.execute(destination_overview_query)
destinations = cur.fetchall()
return render_template('index.html', destinations=destinations)
@app.route('/graphs/<destination>')
def graph(destination):
with get_conn() as conn:
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
destination_history_query = """
WITH intervals AS (
SELECT
begin_time,
LEAD(begin_time)
OVER (
ORDER BY begin_time ) AS end_time
FROM
generate_series(
now() - INTERVAL '3 hours',
now(),
INTERVAL '10 minutes'
) begin_time
)
SELECT
i.begin_time AT TIME ZONE 'Europe/Berlin' AS begin_time,
i.end_time AT TIME ZONE 'Europe/Berlin' AS end_time,
p.destination,
count(p.pingtime),
round(avg(p.pingtime),2) AS avg,
max(p.pingtime),
min(p.pingtime)
FROM intervals i LEFT JOIN pings p
ON p.recorded_at >= i.begin_time AND
p.recorded_at < i.end_time
WHERE
i.end_time IS NOT NULL
AND destination = %s
GROUP BY i.begin_time, i.end_time, p.destination
ORDER BY i.begin_time ASC;
"""
cur.execute(destination_history_query, (destination,))
times = cur.fetchall()
fig = Figure()
ax = fig.add_subplot(111)
begin_times = [row['begin_time'] for row in times]
maxs = [row['max'] for row in times]
ax.plot_date(
x=begin_times,
y=maxs,
label='max',
linestyle='solid'
)
avgs = [row['avg'] for row in times]
ax.plot_date(
x=begin_times,
y=avgs,
label='avg',
linestyle='solid'
)
mins = [row['min'] for row in times]
ax.plot_date(
x=begin_times,
y=mins,
label='min',
linestyle='solid'
)
ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))
ax.set_xlabel('Time')
ax.set_ylabel('Round Trip (ms)')
ax.set_ylim(bottom=0)
ax.legend()
# Output plot as PNG
# canvas = FigureCanvasAgg(fig)
png_output = StringIO.StringIO()
# canvas.print_png(png_output, transparent=True)
fig.set_canvas(FigureCanvasAgg(fig))
fig.savefig(png_output, transparent=True)
response = make_response(png_output.getvalue())
response.headers['content-type'] = 'image/png'
return response
@app.route('/pandas/<destination>')
def pandas(destination):
engine = create_engine('postgres:///pi')
with engine.connect() as conn, conn.begin():
data = pd.read_sql_query("select recorded_at, pingtime from pings where recorded_at > now() - interval "
"'3 hours' and "
"destination='jetbrains.com'; ", conn)
engine.dispose()
df = data.set_index(pd.DatetimeIndex(data['recorded_at']))
# We have this information in the index now, so let's drop it
del df['recorded_at']
result = df.resample('10T').agg(['min', 'mean', 'max'])
fig = Figure()
ax = fig.add_subplot(111)
ax.plot(
result.index,
result['pingtime', 'max'],
label='max',
linestyle='solid'
)
ax.plot_date(
result.index,
result['pingtime', 'mean'],
label='avg',
linestyle='solid'
)
ax.plot_date(
result.index,
result['pingtime', 'min'],
label='min',
linestyle='solid'
)
ax.xaxis.set_major_formatter(DateFormatter('%H:%M'))
ax.set_xlabel('Time')
ax.set_ylabel('Round Trip (ms)')
ax.set_ylim(bottom=0)
ax.legend()
# Output plot as PNG
# canvas = FigureCanvasAgg(fig)
png_output = StringIO.StringIO()
# canvas.print_png(png_output, transparent=True)
fig.set_canvas(FigureCanvasAgg(fig))
fig.savefig(png_output, transparent=True)
response = make_response(png_output.getvalue())
response.headers['content-type'] = 'image/png'
return response
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
from setuptools import setup
setup(
name='jb-everything',
version='0.1.0',
packages=['jb_everything'],
url='https://pypi.org/project/jb-everything/',
license='MIT',
author='jeromebaum',
author_email='jerome@jeromebaum.com',
description='Large list of imports for machine learning.',
install_requires=[
'numpy~=1.19.4',
'scipy~=1.5.3',
'scikit-learn~=0.23.2',
'pandas~=1.1.4',
'lightgbm~=3.1.0',
'matplotlib~=3.3.3',
'seaborn~=0.11.0',
'xarray~=0.16.2',
'missingno~=0.4.2',
'dabl~=0.1.9',
'eli5~=0.10.1',
'dask~=2.30.0',
'jax~=0.2.7',
'requests~=2.25.0',
],
)
|
import pysam
import sys
import pdb
import os
#this extracts reads that map to a valid cell (from a given list of valid barcodes) from a sorted and indexed .bam and writes them to a .sam file
#Author: Anika Neuschulz
if len(sys.argv) == 4:
ifn = sys.argv[1]
cell_barcode_file = sys.argv[2]
mapper = sys.argv[3]
else:
print("Please provide me with (1st) a .bam file to extract reads from, (2nd) a file with valid barcodes and (3rd) if a barcode-file generated by cellranger is used for STAR mapped data (Y/N).")
sys.exit()
scriptpath = os.path.abspath(sys.argv[0])
scriptdir = "/".join(scriptpath.split("/")[:-1])
#this is a set instead of a list because that is faster for membership tests
cell_barcodes = {line.rstrip("\n") for line in open(cell_barcode_file)}
bamfile = pysam.AlignmentFile(ifn, "rb")
ofn = ifn[:-4] + "_actual_cells.sam"
reads_out = pysam.AlignmentFile(ofn, "w", template = bamfile)
reads_written = 0
sys.stdout.write('0 reads written')
#we do this check here, so it does not run for every single read, for CR trailing -1 has to be removed
if mapper == "Y":
for read in bamfile.fetch():
try:
if read.get_tag("CB")[:-2] in cell_barcodes:
reads_out.write(read)
reads_written+=1
sys.stdout.write('\r' + str(reads_written) + ' reads written.')
except KeyError:
#this catches reads with barcodes that could not be matched to a valid barcode
pass
else:
for read in bamfile.fetch():
try:
if read.get_tag("CB") in cell_barcodes:
reads_out.write(read)
reads_written+=1
sys.stdout.write('\r' + str(reads_written) + ' reads written.')
except KeyError:
#this catches reads with barcodes that could not be matched to a valid barcode
pass
reads_out.close()
|
import sys
if __name__ == "__main__":
rl = lambda : sys.stdin.readline()
d = []
for _ in xrange(int(rl())):
input = rl().split()
n, str = int(input[0]), input[1]
d.append(str[:n-1]+str[n:])
for i, v in enumerate(d):
print i+1, v
|
class Mount:
def __init__(self, src, dst):
self.src = src
self.dst = dst
def build(self, script):
script.mount(self.src, self.dst)
|
import numpy as np
import pandas as pd
# import glob
import re
import csv
from rank_bm25 import BM25Okapi
# from nltk.corpus import stopwords
# from nltk.tokenize import word_tokenize
# from sklearn.cluster import KMeans
from modules.functions import ensureUtf
from modules.functions import clean_text
from modules.functions import tokenize_cleaned_text
#Build Corpus dictionnary to process BM25 Algo
def build_corpus_dict(result_dict):
corpus_dict = {}
corpus_dict['content'] = []
corpus_dict['id'] = []
corpus_dict['label'] = []
corpus_dict['value_original'] = []
corpus_dict['value_clean_tokens'] = []
corpus_dict['value_clean'] = []
corpus_dict['seed_url'] = []
for i in range(len(result_dict['id'])):
content = (' '.join(result_dict['value_clean_tokens'][i])) + ' ' + clean_text(result_dict['label'][i])
#content = content.replace('"','')
#content = content.replace("'","")
content = content.strip()
corpus_dict['content'].append(content)
corpus_dict['id'].append(result_dict['id'][i])
corpus_dict['label'].append(result_dict['label'][i])
corpus_dict['value_original'].append(result_dict['value_original'][i])
corpus_dict['value_clean_tokens'].append(result_dict['value_clean_tokens'][i])
corpus_dict['value_clean'].append(result_dict['value_clean'][i])
corpus_dict['seed_url'].append(result_dict['seed_url'][i])
return corpus_dict
def bm25ranker(result_dict, keywords, BM25_threshold, tolerance, nb_max_result):
corpus = build_corpus_dict(result_dict)
dict_bm25ranker = {}
dict_bm25ranker['tokenized_corpus'] = [doc.split(" ") for doc in corpus['content']]
# print('bm25::bm25ranker: dict_bm25ranker[tokenized_corpus]: {}'.format(dict_bm25ranker['tokenized_corpus']))
dict_bm25ranker['bm25'] = BM25Okapi(dict_bm25ranker['tokenized_corpus'])
# print('bm25::bm25ranker: dict_bm25ranker[bm25]: {}'.format(dict_bm25ranker['bm25']))
keywords = keywords.strip()
dict_bm25ranker['tokenized_query'] = keywords.split(" ")
# print('bm25::bm25ranker: dict_bm25ranker[tokenized_query]: {}'.format(dict_bm25ranker['tokenized_query']))
dict_bm25ranker['doc_scores'] = dict_bm25ranker['bm25'].get_scores(dict_bm25ranker['tokenized_query'])
# print('bm25::bm25ranker: dict_bm25ranker[doc_scores]: {}'.format(dict_bm25ranker['doc_scores']))
dict_bm25ranker['top_n'] = dict_bm25ranker['bm25'].get_top_n(dict_bm25ranker['tokenized_query'], corpus['content'], n=100)
# print('bm25::bm25ranker: dict_bm25ranker[top_n]: {}'.format(dict_bm25ranker['top_n']))
# print('bm25::bm25ranker: corpus[content]: {}\n'.format(corpus['content']))
dict_bm25ranker['id'] = []
dict_bm25ranker['label'] = []
dict_bm25ranker['value_original'] = []
dict_bm25ranker['value_clean_tokens'] = []
dict_bm25ranker['value_clean'] = []
dict_bm25ranker['doc_scores_corpus'] = []
dict_bm25ranker['sum_doc_scores_corpus'] = -1
dict_bm25ranker['seed_url'] = []
for i in range(len(dict_bm25ranker['top_n'])):
for j in range(len(corpus['id'])):
if(dict_bm25ranker['top_n'][i]==corpus['content'][j]):
if(dict_bm25ranker['doc_scores'][j] > BM25_threshold):
dict_bm25ranker['id'].append(corpus['id'][j])
dict_bm25ranker['label'].append(corpus['label'][j])
dict_bm25ranker['value_original'].append(corpus['value_original'][j])
dict_bm25ranker['value_clean_tokens'].append(corpus['value_clean_tokens'][j])
dict_bm25ranker['value_clean'].append(corpus['value_clean'][j])
dict_bm25ranker['doc_scores_corpus'].append(dict_bm25ranker['doc_scores'][j])
dict_bm25ranker['seed_url'].append(corpus['seed_url'][j])
dict_bm25ranker['sum_doc_scores_corpus'] = sum(dict_bm25ranker['doc_scores_corpus'])
# print('bm25::bm25ranker: dict_bm25ranker[sum_doc_scores_corpus]: {}\n'.format(dict_bm25ranker['sum_doc_scores_corpus']))
dict_bm25ranker['doc_scores_corpus_normalized'] = []
if(dict_bm25ranker['sum_doc_scores_corpus'] != -1):
for i in range(len(dict_bm25ranker['doc_scores_corpus'])):
dict_bm25ranker['doc_scores_corpus_normalized'].append(dict_bm25ranker['doc_scores_corpus'][i]/dict_bm25ranker['sum_doc_scores_corpus'])
else:
print('Error in ranker!')
# print('bm25::bm25ranker: dict_bm25ranker[doc_scores_corpus_normalized]: {}\n'.format(dict_bm25ranker['doc_scores_corpus_normalized']))
# significant_values_list = get_significant_values(dict_bm25ranker['doc_scores_corpus_normalized'], tolerance)
# print('bm25::bm25ranker: significant_values_list: {}\n'.format(significant_values_list))
significant_values_list = dict_bm25ranker['doc_scores_corpus_normalized']
dict_filtered_bm25ranker = {}
dict_filtered_bm25ranker['label'] = []
dict_filtered_bm25ranker['doc_scores_corpus_normalized'] = []
dict_filtered_bm25ranker['seed_url'] = []
dict_filtered_bm25ranker['value_clean'] = []
for i in range(len(dict_bm25ranker['doc_scores_corpus_normalized'])):
if(i == (nb_max_result)):
break
else:
for j in range(len(significant_values_list)):
if(dict_bm25ranker['doc_scores_corpus_normalized'][i] == significant_values_list[j]):
dict_filtered_bm25ranker['label'].append(dict_bm25ranker['label'][i])
dict_filtered_bm25ranker['doc_scores_corpus_normalized'].append(dict_bm25ranker['doc_scores_corpus_normalized'][i])
dict_filtered_bm25ranker['seed_url'].append(dict_bm25ranker['seed_url'][i])
joiner = ' '
# print('\nbm25ranker: dict_bm25ranker[value_clean_tokens][{}]: {}\n'.format(i, dict_bm25ranker['value_clean_tokens'][i]))
current_value_clean = joiner.join(dict_bm25ranker['value_clean_tokens'][i])
dict_filtered_bm25ranker['value_clean'].append(current_value_clean)
break
return dict_filtered_bm25ranker
def get_significant_values(value_list, tolerance):
#diff_list = []
significant_value_list = []
# for i in range(len(value_list)-1):
for i in range(len(value_list)):
if(len(value_list) > 1):
current_diff = value_list[i] - value_list[i+1]
if(i==0):
if(current_diff < tolerance):
significant_value_list.append(value_list[i])
significant_value_list.append(value_list[i+1])
else:
significant_value_list.append(value_list[i])
break
else:
if(current_diff < tolerance):
significant_value_list.append(value_list[i+1])
else:
break
else:
significant_value_list.append(value_list[i])
return significant_value_list
def gen_sentence_for_classifier(dict_bm25ranker, query):
dict_classifier = {}
dict_classifier['sentence'] = []
dict_classifier['label'] = []
dict_classifier['BM25_score'] = []
dict_classifier['seed_url'] = []
query_clean_tokens = tokenize_cleaned_text(clean_text(query))
query_clean = " ".join(query_clean_tokens)
print('gen_sentence_for_classifier: query: {}'.format(query))
print('gen_sentence_for_classifier: query_clean: {}\n'.format(query_clean))
for i in range(len(dict_bm25ranker['doc_scores_corpus_normalized'])):
# print('\ngen_sentence_for_classifier: dict_bm25ranker[value_clean][{}]: {}\n'.format(i, dict_bm25ranker['value_clean'][i]))
my_current_sentence = '[CLS] ' + query_clean + ' [SEP] ' + dict_bm25ranker['value_clean'][i]
dict_classifier['sentence'].append(my_current_sentence)
dict_classifier['label'].append(0)
dict_classifier['BM25_score'].append(dict_bm25ranker['doc_scores_corpus_normalized'][i])
dict_classifier['seed_url'].append(dict_bm25ranker['seed_url'][i])
df = pd.DataFrame(dict_classifier)
return df
|
import pandas as pd
import numpy as np
import tensorflow as tf
FEATURES = ["PassengerId" ,"Pclass","Name","Sex","Age","SibSp","Parch","Ticket","Fare","Cabin","Embarked"]
LABEL = "Survived"
FILE_TRAIN = "titanic_train.csv"
FILE_TEST = "titanic_test.csv"
FILE_PREDICT = "titanic_predictions.csv"
def input_fn(file_path, repeat_count=1, shuffle=False):
# define mapping function
def decode_csv(line):
parsed_line = tf.decode_csv(line, [[0], [0], [0], [""], ["unknown"], [0.], [0], [0], [""], [0.], [""], ["U"]])
label = parsed_line[1]
del parsed_line[1] # removes the label
features = parsed_line
d = dict(zip(FEATURES, features)), label
return d
dataset = (tf.data.TextLineDataset(file_path)
.skip(1)
.map(decode_csv))
if shuffle:
dataset = dataset.shuffle(buffer_size=256)
dataset = dataset.repeat(repeat_count)
dataset = dataset.batch(32)
iterator = dataset.make_one_shot_iterator()
batch_features, batch_labels = iterator.get_next()
return batch_features, batch_labels
def predict_input_fn():
# return to this in a moment
prediction_set = pd.read_csv("titanic_predictions.csv")
return tf.estimator.inputs.pandas_input_fn(
x=prediction_set,
num_epochs=1,
shuffle=False)
def main():
# continuous cols
Pclass = tf.feature_column.numeric_column("Pclass")
Age = tf.feature_column.numeric_column("Age")
SibSp = tf.feature_column.numeric_column("SibSp")
Parch = tf.feature_column.numeric_column("Parch")
# categorial cols
Sex = tf.feature_column.categorical_column_with_vocabulary_list(
"Sex", ["female", "male", "unknown"])
Embarked = tf.feature_column.categorical_column_with_vocabulary_list(
"Embarked", ["C", "Q", "S", "U"])
#buckets
age_buckets = tf.feature_column.bucketized_column(Age,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
feature_cols = [Pclass,
SibSp,
Parch,
tf.feature_column.indicator_column(Sex),
tf.feature_column.indicator_column(Embarked),
age_buckets]
# set up classifier
m = tf.estimator.DNNClassifier(
model_dir="/tmp/titanicDNN",
feature_columns=feature_cols,
hidden_units=[10, 10],
n_classes=2)
# train model
m.train(input_fn=lambda: input_fn(FILE_TRAIN, 8, True),
steps=2000)
# evaluate model
evaluation_results = m.evaluate(input_fn=lambda: input_fn(FILE_TEST, 1, False))
for key in evaluation_results:
print(" {} was: {}".format(key, evaluation_results[key]))
# try a prediction
predictions = m.predict(input_fn=predict_input_fn())
for idx, prediction in enumerate(predictions):
survived = prediction["class_ids"][0]
if survived:
print("Survived")
else:
print("Died")
if __name__ == "__main__":
main()
|
# Generated by Django 2.2.12 on 2020-05-02 19:10
from django.db import migrations, models
import src.incidents.models
import uuid
class Migration(migrations.Migration):
dependencies = [
('incidents', '0047_cannedresponse_sendcannedresponseworkflow'),
]
operations = [
migrations.RemoveField(
model_name='incident',
name='di_division',
),
migrations.RemoveField(
model_name='incident',
name='election',
),
migrations.RemoveField(
model_name='incident',
name='letter_date',
),
migrations.RemoveField(
model_name='incident',
name='police_division',
),
migrations.RemoveField(
model_name='incident',
name='police_station',
),
migrations.RemoveField(
model_name='incident',
name='polictical_party',
),
migrations.RemoveField(
model_name='incident',
name='polling_division',
),
migrations.RemoveField(
model_name='incident',
name='polling_station',
),
migrations.RemoveField(
model_name='incident',
name='received_date',
),
migrations.RemoveField(
model_name='incident',
name='severity',
),
migrations.RemoveField(
model_name='incident',
name='ward',
),
migrations.RemoveField(
model_name='reporter',
name='accused_name',
),
migrations.RemoveField(
model_name='reporter',
name='accused_political_affiliation',
),
migrations.RemoveField(
model_name='reporter',
name='political_affiliation',
),
migrations.RemoveField(
model_name='reporter',
name='unique_id',
),
migrations.AddField(
model_name='incident',
name='language',
field=models.CharField(choices=[('SINHALA', 'Sinhala'), ('TAMIL', 'Tamil'), ('ENGLISH', 'English')], default=src.incidents.models.LanguageType('English'), max_length=10),
),
migrations.AddField(
model_name='incident',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='recipient',
name='nic',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='recipient',
name='title',
field=models.CharField(blank=True, choices=[('MR', 'Mr'), ('MRS', 'Mrs'), ('MS', 'Ms'), ('MISS', 'Miss'), ('DR', 'Dr'), ('PROFESSOR', 'Professor')], max_length=10, null=True),
),
migrations.AddField(
model_name='reporter',
name='city',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='reporter',
name='district',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='reporter',
name='gn_division',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='reporter',
name='location',
field=models.CharField(blank=True, max_length=200, null=True),
),
migrations.AddField(
model_name='reporter',
name='nic',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AddField(
model_name='reporter',
name='title',
field=models.CharField(blank=True, choices=[('MR', 'Mr'), ('MRS', 'Mrs'), ('MS', 'Ms'), ('MISS', 'Miss'), ('DR', 'Dr'), ('PROFESSOR', 'Professor')], max_length=10, null=True),
),
migrations.AddField(
model_name='reporter',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='reporter',
name='id',
field=models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='reporter',
name='reporter_type',
field=models.CharField(blank=True, choices=[('INDIVIDUAL', 'Individual'), ('ORGANIZATION', 'Organization')], default=src.incidents.models.ContactType('Individual'), max_length=50, null=True),
),
]
|
# -*- coding: utf-8 -*-
"""
@author: Duy Anh Philippe Pham
@date: 14/05/2021
@version: 2.00
@Recommandation: Python 3.7
@Révision : 11/06/21
@But: comparaison barycentre avancées
- Permet de comparer le barycentre avec le sujet moyen en comparant la position des maximums locaux
"""
import numpy as np
import sys
import glob
from skimage.feature import peak_local_max
import matplotlib.pylab as plt
sys.path.insert(1,'../libs')
import tools, display
hemi='L'
source1='../../barycentre/'+hemi+'/'
source2='../../data/'+hemi+'/'
variables='../../variables/'+hemi+'/'
destination='../../variables/barycentre/local_max/'+hemi
size=9# a adapter si nécessaire dépend de l'origne des données
# Changement des données
i=0
min_distance=1
percent=90#seuillage des valeurs
grid_size=101
if False:
dataX=None
for np_name in glob.glob(str(source2)+'*.np[yz]'):
X=np.load(np_name)
_,_,data=tools.estimate_pseudo_density(X,grid_size)
if dataX is None:
dataX=data
else:
dataX=dataX+data
dataX=dataX/np.max(dataX)
tools.save_value(dataX,'mean',destination)
if True:
data=np.load(destination+'/mean'+'.npy')
coord = peak_local_max((data>np.percentile(data, percent))*data, min_distance)
tools.save_value(coord,'mean_coord',destination)
plt.figure()
extent = (0,grid_size-1 , 0,grid_size-1)
plt.imshow(data,cmap=plt.cm.magma_r,origin='lower',extent=extent)
plt.autoscale(False)
plt.plot(coord[:, 1], coord[:, 0], 'g.')
plt.axis('on')
plt.xlabel('Precentral gyral crest scaled to 100')
plt.ylabel('Postcentral gyral crest scaled to 100')
plt.colorbar()
plt.grid(linestyle = '--', linewidth = 0.5,alpha=0.5, which='major')
plt.title(hemi+' hemisphere')
tools.save_fig('mean',destination) |
# -*- coding: utf-8 -*-
#
# * Copyright (c) 2009-2015. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
__author__ = "Marée Raphael <raphael.maree@ulg.ac.be>"
__contributors__ = ["Stévens Benjamin <b.stevens@ulg.ac.be>"]
__copyright__ = "Copyright 2010-2015 University of Liège, Belgium, http://www.cytomine.be/"
import cytomine
# connect to cytomine
cytomine_host = "XXX"
cytomine_public_key = "XXX"
cytomine_private_key = "XXX"
id_project = XXX
# Connection to Cytomine Core
conn = cytomine.Cytomine(cytomine_host, cytomine_public_key, cytomine_private_key, base_path='/api/',
working_path='/tmp/', verbose=True)
# define software parameter template
software = conn.add_software("Segmentation_Model_Predict", "pyxitSuggestedTermJobService", "ValidateAnnotation")
conn.add_software_parameter("cytomine_zoom_level", software.id, "Number", 0, True, 10, False)
conn.add_software_parameter("pyxit_target_width", software.id, "Number", 24, True, 20, False)
conn.add_software_parameter("pyxit_target_height", software.id, "Number", 24, True, 30, False)
conn.add_software_parameter("pyxit_colorspace", software.id, "Number", 2, True, 40, False)
conn.add_software_parameter("cytomine_tile_size", software.id, "Number", 512, True, 50, False)
conn.add_software_parameter("cytomine_tile_min_stddev", software.id, "Number", 5, True, 60, False)
conn.add_software_parameter("cytomine_tile_max_mean", software.id, "Number", 250, True, 70, False)
conn.add_software_parameter("cytomine_startx", software.id, "Number", 0, True, 80, False)
conn.add_software_parameter("cytomine_starty", software.id, "Number", 0, True, 90, False)
conn.add_software_parameter("cytomine_roi_term", software.id, "Number", 0, True, 100, False)
conn.add_software_parameter("cytomine_predict_step", software.id, "Number", 4, True, 110, False)
conn.add_software_parameter("cytomine_min_size", software.id, "Number", 1000, True, 120, False)
conn.add_software_parameter("cytomine_max_size", software.id, "Number", 10000000, True, 130, False)
conn.add_software_parameter("cytomine_union_min_length", software.id, "Number", 10, True, 140, False)
conn.add_software_parameter("cytomine_union_bufferoverlap", software.id, "Number", 5, True, 150, False)
conn.add_software_parameter("cytomine_union_area", software.id, "Number", 5000, True, 160, False)
conn.add_software_parameter("cytomine_union_min_point_for_simplify", software.id, "Number", 1000, True, 170, False)
conn.add_software_parameter("cytomine_union_min_point", software.id, "Number", 500, True, 180, False)
conn.add_software_parameter("cytomine_union_max_point", software.id, "Number", 1000, True, 190, False)
conn.add_software_parameter("cytomine_union_nb_zones_width", software.id, "Number", 5, True, 200, False)
conn.add_software_parameter("cytomine_union_nb_zones_height", software.id, "Number", 5, True, 210, False)
conn.add_software_parameter("pyxit_save_to", software.id, "String", "/tmp", False, 220, False)
conn.add_software_parameter("cytomine_postproc", software.id, "Boolean", "/tmp", False, 115, False)
conn.add_software_parameter("pyxit_post_classification", software.id, "Boolean", "/tmp", False, 230, False)
conn.add_software_parameter("pyxit_post_classification_save_to", software.id, "String", "/tmp", False, 240, False)
conn.add_software_parameter("cytomine_mask_internal_holes", software.id, "Boolean", "true", True, 250, False)
conn.add_software_parameter("cytomine_id_image", software.id, "Number", 0, True, 1, False)
# add software to a given project
addSoftwareProject = conn.add_software_project(id_project, software.id)
|
#!python
############################################################
##### This is a recursion example using fibonacci sequence
############################################################
def fibonacci(n):
"""fibonacci(n) returns the n-th number in the Fibonacci sequence,
which is defined with the recurrence relation:
fibonacci(0) = 0
fibonacci(1) = 1
fibonacci(n) = fibonacci(n - 1) + fibonacci(n - 2), for n > 1"""
# Check if n is negative or not an integer (invalid input)
if n < 0 or not isinstance(n, int):
raise ValueError('fibonacci is undefined for n = {!r}'.format(n))
# Implement fibonacci_recursive, _memoized, and _dynamic below, then
# change this to call your implementation to verify it passes all tests
# return fibonacci_recursive(n)
# return fibonacci_memoized(n)
return fibonacci_dynamic(n)
def fibonacci_recursive(n):
# Check if n is one of the base cases
if n == 0 or n == 1:
return n
# Check if n is larger than the base cases
elif n > 1:
# Call function recursively and add the results together
return fibonacci_recursive(n - 1) + fibonacci_recursive(n - 2)
def fibonacci_memoized(n):
# DONE: Memoize the fibonacci function's recursive implementation here
# To improve speed you can use memoization where the most frequent calls are stored
'''
Approach 1: Using a dictionary
(O)Notation, how could I implement this on this??
'''
if n > 9:
return fibonacci_memoized(n - 1) + fibonacci_memoized(n - 2)
else:
#On space complixity what is the difference between making a smaller or bigger dictionary,
#is there much more cost in space for a few more variables
frequent_calls_dict = {0:0, 1:1, 2:1, 3:2, 4:3, 5:5, 6:8, 7:13, 8:21, 9:34}
return frequent_calls_dict[n]
##################################################
##### Dynamic Programming
##### Meaning: is solving a problem by breaking it down into a subset of smaller problems,
##### solving those subproblems once and storing their solution in a data structure.
def fibonacci_dynamic(n):
# TODO: Implement the fibonacci function with dynamic programming here
##################################################
### Current Status
### Approach 1
### Of all the tests only the first and fourth pass
### Breaks with big integers
### medium integers
### floating point numbers
### Approach 2: NOT ANY WORK
### Approach 3: NOT ANY WORK
##################################################
### Scenario for this to exist, I constantly need to do fibonacci operations for
### whatever reason, then it's likely that I already got the one I need
##################################################
### What is the process I'm setting up here, for every run you get a new fibonacci
### value stored in the dictionary so that the next time if it's already there you
### don't need to do the whole operation
# 1. Set up the variables
# 2. Check if you have already have stored the value
# 3. If you don't have it get it
# 4. Add it to the dictionary
# 5. return the value
'''
Approach 1: Using a dictionary
(O)Notation, how could I implement this on this??
'''
#I need to solve this so that every time I get a solution I save it for the next time?
#Some way to not save multiple times an element on the dictionary
#Everytime a new value is added given the nature of fibonacci you can recicle solutions
# 1
fibonacci_values_dictionary = {}
# 2. Checking if I already got that key stored
if n in fibonacci_values_dictionary.keys():
# then return the value of the fibonacci sequence
return fibonacci_values_dictionary.get(n)
# 3. Stor
f = fibonacci_dynamic(n - 1) + fibonacci_dynamic(n - 2)
# 4.
fibonacci_values_dictionary = fibonacci_values_dictionary.update({n:f})
return f
'''
Approach 2: Using an array
(O)Notation, how could I implement this on this??
'''
'''
Approach 3: Using a set
(O)Notation, how could I implement this on this??
'''
# Once implemented, change fibonacci (above) to call fibonacci_dynamic
# to verify that your dynamic implementation passes all test cases
pass
def main():
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 1:
num = int(args[0])
result = fibonacci(num)
print('fibonacci({}) => {}'.format(num, result))
else:
print('Usage: {} number'.format(sys.argv[0]))
if __name__ == '__main__':
main() |
import time
print('-'*65)
print('Bouncer Bot:')
print()
print('Welcome to CLUB425,the most lit club in downtown ACTvF. Before you can enter, I need you to answer some questions...')
x = input('What is your age today? ')
if x >= '21':
print('Welcome... to the club.')
if x <= '21':
print('Filthy CHILD!')
print('Come back some other time, idiot!' )
print('-'*65)
|
import PIL
import numpy as np
import numpy as np
from PIL import Image, ImageDraw
try:
import Image
except ImportError:
from PIL import Image
def shape_to_mask(img_shape, points, shape_type=None,
line_width=10, point_size=5):
mask = np.zeros(img_shape[:2], dtype=np.uint8)
mask = PIL.Image.fromarray(mask)
draw = PIL.ImageDraw.Draw(mask)
xy = [tuple(point) for point in points]
if shape_type == 'circle':
assert len(xy) == 2, 'Shape of shape_type=circle must have 2 points'
(cx, cy), (px, py) = xy
d = math.sqrt((cx - px) ** 2 + (cy - py) ** 2)
draw.ellipse([cx - d, cy - d, cx + d, cy + d], outline=1, fill=1)
elif shape_type == 'rectangle':
assert len(xy) == 2, 'Shape of shape_type=rectangle must have 2 points'
draw.rectangle(xy, outline=1, fill=1)
elif shape_type == 'line':
assert len(xy) == 2, 'Shape of shape_type=line must have 2 points'
draw.line(xy=xy, fill=1, width=line_width)
elif shape_type == 'linestrip':
draw.line(xy=xy, fill=1, width=line_width)
elif shape_type == 'point':
assert len(xy) == 1, 'Shape of shape_type=point must have 1 points'
cx, cy = xy[0]
r = point_size
draw.ellipse([cx - r, cy - r, cx + r, cy + r], outline=1, fill=1)
else:
assert len(xy) > 2, 'Polygon must have points more than 2'
draw.polygon(xy=xy, outline=1, fill=1)
mask = np.array(mask, dtype=bool)
return mask
|
#!/usr/bin/env python3
import random
print('please enter the Dice you want to roll in the Format ndx,')
print('where n is the count of dices, and x is the number of possible values of the dice, e.g. 2d6')
diceRaw = input()
diceList = diceRaw.split("d")
diceFaces = int(diceList[1])
diceCount = int(diceList[0])
diceOut = 0
i = 0
while i < diceCount:
diceResult: int = random.randint(1, diceFaces)
diceOut: int = diceOut + diceResult
i += 1
print(diceOut)
|
#!/usr/bin/env python
## Script is used to generate training and validation datasets from sph files
from .clip import audio_clip
import argparse
parser = argparse.ArgumentParser("generate training data")
parser.add_argument("--dir", type=str, help="input folder name",
default="data")
parser.add_argument("--num", type=int, help="The number of clips for\
each speaker", default=128)
parser.add_argument("--duration", type=int, help="The duration of each \
clip", default=5)
parser.add_argument("--low", type=int, help="starting time of the audio from \
which the clip is sampled", default=0)
parser.add_argument("--high", type=int, help="ending time of the audio from \
which the clip is sampled", default=600)
parser.add_argument("--out", type=str, help="the output directory",
default="data/train")
args = parser.parse_args()
def audioclips():
audio_clip(args.dir, args.num, args.low, args.high, args.duration, args.out)
|
from jinja2 import Markup
from functools import partial
from orun.db import models
from orun import app
from orun import render_template
#from orun.template.loader import select_template
def render_field(model_name, field_name, **ctx):
model = app[model_name]
field = model._meta.fields[field_name]
templates = [
'pwa/forms/fields/%s.jinja2' % field.get_internal_type(),
'pwa/forms/fields/Field.jinja2',
]
if field.choices:
templates.insert(0, 'pwa/forms/fields/ChoiceField.jinja2')
choices = field.choices
if isinstance(choices, dict):
choices = choices.items()
elif isinstance(choices, list):
choices = tuple(choices)
ctx.setdefault('choices', choices)
ctx.update({
'field': field,
'render_field': render_field,
'render_col': render_col,
})
ctx.setdefault('prefix', '')
if field.one_to_many:
form_fields = ctx.get('form_fields')
if form_fields:
form_fields = [field.rel.model._meta.fields[f] for f in form_fields]
else:
form_fields = field.rel.model._meta.form_fields
ctx['form_fields'] = form_fields
list_fields = ctx.get('list_fields')
if list_fields:
list_fields = [field.remote_field.model._meta.fields[f] for f in list_fields]
else:
list_fields = form_fields
ctx['list_fields'] = list_fields
return Markup(
render_template(templates, **ctx)
)
def render_col(field):
s = 'record.' + field.name
if isinstance(field, models.DateField):
s = """(%s | date:'short')""" % s
elif isinstance(field, models.DateTimeField):
s = """(%s | datetime:'short')""" % s
s = """<td ng-bind="%s"></td>""" % s
return Markup(s)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Logic operator, 逻辑运算操作符。
"""
# 与
assert (True & True) == True
assert (True & False) == False
assert (False & True) == False
assert (False & False) == False
assert (True and True) == True
assert (True and False) == False
assert (False and True) == False
assert (False and False) == False
# 或
assert (True | True) == True
assert (True | False) == True
assert (False | True) == True
assert (False | False) == False
assert (True or True) == True
assert (True or False) == True
assert (False or True) == True
assert (False or False) == False
# 非
assert (not True) == False
assert (not False) == True
assert (not True) == False
assert (not False) == True
|
import re
import json
import logging
import pymysql
from scrapy import Spider, Request
from instagram_crawler.items import InstagramProfileItems
from instagram_crawler.user_cache import UserCache
logger = logging.getLogger(__name__)
class Instagram(Spider):
BASE_URL = "http://www.instagram.com/"
start_urls=[]
name='Instagram'
def __init__(self, method='mysql', *a, **kw):
super(Instagram, self).__init__(*a, **kw)
self.method = method
def parse(self, response):
return Instagram.parse_item(response)
@classmethod
def parse_item(cls, response):
javascript = "".join(response.xpath('//script[contains(text(), "sharedData")]/text()').extract())
json_data = json.loads("".join(re.findall(r'window._sharedData = (.*);', javascript)))
item = InstagramProfileItems()
data = get_extracted(json_data["entry_data"]["ProfilePage"], 0)['user']
item['username'] = data['username']
item['user_id'] = int(data['id'])
item['following'] = data['follows']['count']
item['followers'] = data['followed_by']['count']
item['profile_picture'] = data['profile_pic_url']
# TODO - resolve unicode problem (hebrew)
#item['full_name'] = data['full_name']
item['is_private'] = data['is_private']
media = data['media']
item['posts'] = media['count']
item['avg_comments'] = cls.calc_average('comments', media, len(media['nodes']))
item['avg_likes'] = cls.calc_average('likes', media, len(media['nodes']))
item['is_from_israel'] = cls.is_from_israel(media)
return item
@classmethod
def calc_average(cls, action, media, count):
return reduce(lambda x,y: x + y, map(lambda post: post[action]['count'], media['nodes'])) / count
@classmethod
def is_from_israel(cls, media):
is_from_israel = False
for node in media['nodes']:
if 'caption' in node:
is_from_israel = any(u"\u0590" <= c <= u"\u05EA" for c in node['caption'])
if is_from_israel:
break
return is_from_israel
def start_requests(self):
if self.method == 'mysql':
try:
with open('/home/ec2-user/mysqlcreds', 'r') as f:
passwd = f.readline().rstrip()
conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd=passwd, db='influencers')
table = 'influencers_manual'
curr = conn.cursor()
curr.execute("SELECT username FROM {}".format(table))
res = curr.fetchall()
for username in res:
yield self.make_requests_from_url(self.BASE_URL + username[0])
except Exception as e:
logger.error("Could not get influencers from influencers_manual db")
logger.exception(e)
else:
#generate new request for each following
try:
all_following = UserCache.get_all_parsed_user_following()
for username in all_following:
if username:
yield self.make_requests_from_url(self.BASE_URL + username)
except Exception as e:
logger.error("Could not get influencers from redis")
logger.exception(e)
def get_extracted(value, index):
try:
return value[index]
except:
return {} |
from django.contrib import admin
from .models import Productos, Proveedores, ContactoProveedores, TelefonosContactoProv, Servicios, Categoria, PreciosVentaServicio, PreciosVentaProducto
# Register your models here.
admin.site.register(Productos)
admin.site.register(Proveedores)
admin.site.register(ContactoProveedores)
admin.site.register(TelefonosContactoProv)
admin.site.register(Servicios)
admin.site.register(Categoria)
admin.site.register(PreciosVentaServicio)
admin.site.register(PreciosVentaProducto) |
from pprint import pprint
import xml.dom.minidom as minidom
import xml.dom
from xml.dom.minidom import Element
from xml.dom.minidom import NodeList
from log_4_j.event import Event
from log_4_j.dom_location_info import DomLocationInfo
class DomEvent(Event):
def __init__(self, dom_element: Element) -> None:
self._dom_element = dom_element
message_cdata = dom_element.getElementsByTagName('log4j:message')[0].firstChild
message_cdata_string = ''
if message_cdata is not None:
message_cdata_string = message_cdata.data
throwable_list = self._dom_element.getElementsByTagName('log4j:throwable')
if len(throwable_list) == 1:
throwable_element = throwable_list[0]
else:
throwable_element = dom_element.ownerDocument.createElementNS('log4j', 'throwable')
location_info_list = self._dom_element.getElementsByTagName('log4j:locationInfo')
if len(location_info_list) == 1:
location_info_element = location_info_list[0]
else:
location_info_element = self._dom_element.ownerDocument.createElementNS('log4j', 'locationInfo')
location_info = DomLocationInfo(location_info_element)
Event.__init__(
self,
dom_element.getAttribute("logger"),
dom_element.getAttribute("timestamp"),
dom_element.getAttribute("level"),
dom_element.getAttribute("thread"),
message_cdata_string,
throwable_element,
location_info
) |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import math
#영어 끝말잇기
words=["tank","kick","know","wheel","land","dream","mother","robot","tank"]
#words=["hello", "observe", "effect", "take", "either", "recognize", "encourage", "ensure", "establish", "hang", "gather", "refer", "reference", "estimate", "executive"]
#words=["hello","one","even","never","now","world","draw","white"]
n = 3
def solution(n, words):
word=[]
newwords=[]
for i in range(n):
word.append([])
for i in range(len(words)):
new = words[i]
word[i%n].insert(0,new)
total = len(word[0])*n
newwords.append(word[0].pop())
for i in range(1,total):
m = i%n
new=word[m].pop()
if(new in newwords):
return([m+1, (len(newwords)//n)+1])
break
else:
if new[0] != newwords[-1][-1] :
return([m+1, (len(newwords)//n)+1])
break
else:
newwords.append(new)
return([0,0])
solution(n, words)
# In[ ]:
#올바른 괄호 찾기
s="(()("
answer=True
string = []
for element in s:
if element == "(":
string.append(element)
else:
try:
check = string.pop()
except:
answer=False
break
if string == []:
print(answer)
else:
print(not answer)
# In[9]:
#프린터
priorities=[2,1,3,2]
prior=[]
location = 2
index=[]
print_idx = []
for i in range(len(priorities),0,-1):
index.append(i-1)
prior.append(priorities.pop())
while prior!=[]:
check=prior.pop()
idx = index.pop()
max_e = check
for element in prior:
if check < element:
max_e = element
if max_e == check:
priorities.append(check)
print_idx.append(idx)
else:
prior.insert(0,check)
index.insert(0,idx)
print(print_idx.index(location)+1)
print(priorities, print_idx)
# In[15]:
#level2 기능개발
progresses = [93,30,55,90]
speeds = [1,30,5,90]
answer=[]
day = []
for i in range(len(progresses)):
count = 0
while progresses[i] < 100:
progresses[i] += speeds[i]
count+=1
day.insert(0,count)
print(day)
while day != []:
check=day.pop()
count=1
for i in range(len(day),0,-1):
if day[i-1] < check:
day.pop()
count+=1
else:
break
answer.append(count)
print(answer)
|
# -*- coding: utf-8 -*-
import time
import random
from scrapy.http import HtmlResponse
from scrapy import signals
from fake_useragent import UserAgent
class MultoneSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class MultoneDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class RandomUaMiddleware(object):
"""随机切换UA"""
def process_request(self, request, spider):
middlewares = request.meta.get('middlewares')
if middlewares != 'selenium':
def _get_random_ua(ua_type='random'):
try:
ua_class = UserAgent()
ua = getattr(ua_class, ua_type)
return ua
except:
_get_random_ua()
new_ua = _get_random_ua()
if new_ua:
request.headers.setdefault("User-Agent", new_ua)
cookie = request.meta.get('cookies')
if cookie:
request.cookies = cookie
print('Using Cookies ', cookie)
class JSPageMiddleware(object):
# 请求动态网页
def process_request(self, request, spider):
middlewares = request.meta.get('middlewares')
if middlewares == 'selenium':
browser = spider.browser
browser.get(request.url)
browser.set_page_load_timeout(10)
# browser.set_script_timeout(5)
time.sleep(random.random() * 10)
return HtmlResponse(url=browser.current_url, body=browser.page_source, encoding="utf-8",
request=request)
class UaIpMiddleware(object):
"""随机切换Ip,UA,"""
def __init__(self, proxy_server, proxy_user, proxy_pass, ua_type, stats):
self.proxy_server = proxy_server
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
self.ua_type = ua_type
self.logger = BaseLogger()
self.stats = stats
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
proxy_config = crawler.settings.get("PROXY_CONFIG")
proxy_server = proxy_config.get('proxyServer', "")
proxy_user = proxy_config.get('proxyUser', "")
proxy_pass = proxy_config.get('proxyPass', "")
ua_type = crawler.settings.get('UA_TYPE', 'random')
return cls(proxy_server=proxy_server, proxy_user=proxy_user, proxy_pass=proxy_pass, ua_type=ua_type)
def _get_random_ua(self):
try:
ua_class = UserAgent()
ua = getattr(ua_class, self.ua_type)
if ua:
return ua
else:
self._get_random_ua()
except Exception as e:
self._get_random_ua()
def process_response(self, request, response, spider):
status = int(response.status)
current_url = request.meta.get('current_url')
# current_url = 'https://www.qiushibaike.com/'
if response.url != current_url or response.status > 300 or response.status < 200:
# time.sleep(5*random.random())
request.meta["proxy"] = self.proxy_server
request.headers["Proxy-Authorization"] = self.gen_proxy()
# request.headers["Accept-Encoding"] = "Gzip"
request._url = current_url # 把url改成原来的url再次请求
# 更换UA
new_ua = self._get_random_ua()
if new_ua:
request.headers.setdefault("User-Agent", new_ua)
return request
elif status == 429:
self.logger.info('阿步云访问过快,出现429状态')
time.sleep(2)
self.stats.inc_value("code429")
return request
else:
return response
def process_request(self, request, spider):
middlewares = request.meta.get('middlewares')
if middlewares != 'selenium':
new_ua = self._get_random_ua()
if new_ua:
request.headers.setdefault("User-Agent", new_ua)
request.meta["proxy"] = self.proxy_server
request.headers["Proxy-Authorization"] = self.gen_proxy()
def gen_proxy(self):
proxy_user = self.proxy_user
proxy_pass = self.proxy_pass
proxy_auth = "Basic " + base64.urlsafe_b64encode(bytes((proxy_user + ":" + proxy_pass), "ascii")).decode(
"utf8") # 阿布云科技
# time.sleep(random.random()*3)
return proxy_auth
|
class Pair(object):
def __init__(self, a ,b):
self.a = a
self.b = b
def maxChainLength(arr, n):
max = 0
mcl = [1 for i in range(n)]
for i in range(1,n):
for j in range(0, i):
if (arr[i].a > arr[j].b and mcl[i] < mcl[j] + 1):
mcl[i] = mcl[j] + 1
for i in range(n):
if(max < mcl[i]):
max = mcl[i]
return max
arr = [Pair(5,24),Pair(15,25),Pair(27,40),Pair(50,60)]
print('Length of maximum size chain is', maxChainLength(arr,len(arr))) |
def collatz(number):
if number % 2 == 1 and number > 1:
num = 3*number + 1
print(num)
return num
elif number % 2 == 0:
num = number // 2
print(num)
return num
n = int(input('Enter a number: \n'))
while True:
n = collatz(n)
if n == 1:
break
|
from django.forms import ModelForm
from django import forms
from models import *
class ParticipanteForm(ModelForm):
class Meta:
model = Participante
class InscricaoForm(ModelForm):
class Meta:
model = Inscricao
|
# -*- coding: utf-8 -*-
def test_vehicles(fake, vehicles):
assert len(vehicles) > 1
v = vehicles[0]
assert 'Make' in v.keys()
assert 'Model' in v.keys()
def test_make(fake, makes):
make = fake.vehicle_make()
assert len(make) > 1
assert make in makes
def test_year(fake, years):
year = fake.vehicle_year()
assert len(year) > 1
assert int(year) in years
def test_model(fake, models):
model = fake.vehicle_model()
assert len(model) >= 1
assert model in models
def test_category(fake, categories):
category = fake.vehicle_category()
assert len(category) > 1
assert category in categories
def test_vehicle_make_model(fake):
ar_mm = fake.vehicle_make_model().split()
# check to see if there are 2 words
assert len(ar_mm) >= 1
def test_vehicle_year_make_model(fake):
ar_ymm = fake.vehicle_year_make_model().split()
# check to see if there are 3 words
assert len(ar_ymm) >= 2
# check to see if first word is a number (year)
assert float(ar_ymm[0]).is_integer()
def test_vehicle_year_make_model_cat(fake):
ar_ymmc = fake.vehicle_year_make_model_cat().split()
# check to see if there are 4 words
assert len(ar_ymmc) >= 3
# check to see if first word is a number (year)
assert float(ar_ymmc[0]).is_integer()
|
# -*- coding:utf-8 -*-
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not board: return False
if not word: return False
for i in range(len(board)):
for j in range(len(board[0])):
if self.trackback(board,word,i,j):
return True
return False
def trackback(self, board, word, x, y):
if not word:
return True
#不能重复走
if x<0 or y<0 or x>=len(board) or y>=len(board[0]) or word[0]!=board[x][y]:
return False
tmp = board[x][y]
board[x][y] = ""
res = self.trackback(board,word[1:],x+1,y) or self.trackback(board,word[1:],x-1,y) \
or self.trackback(board,word[1:],x,y+1) or self.trackback(board,word[1:],x,y-1)
board[x][y] = tmp
return res
print(Solution().exist([["A","B","C","E"],["S","F","C","S"],["A","D","E","E"]],"ABCE")) |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-11-03 23:12
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.manager
class Migration(migrations.Migration):
dependencies = [
('bot', '0005_auto_20161103_2107'),
]
operations = [
migrations.AlterModelManagers(
name='groupmebot',
managers=[
('botmanager', django.db.models.manager.Manager()),
],
),
migrations.AddField(
model_name='groupmebot',
name='groupname',
field=models.TextField(default='Tests'),
),
migrations.AlterField(
model_name='groupmebot',
name='avatar_url',
field=models.URLField(default='https://www.google.com', validators=[django.core.validators.URLValidator]),
),
migrations.AlterField(
model_name='groupmebot',
name='callback_url',
field=models.URLField(default='https://www.google.com', validators=[django.core.validators.URLValidator]),
),
]
|
class Aircraft:
"""Represents an aircraft"""
__MIN_FUEL = 100
def __init__(self, flight_number="", code: str = None, units: str = None, range_max: int = None):
# Confirm what columns should be implemented for this Aircraft class.
self.code = code
self.units = units
self.range_max = int(range_max)
self.flight_number = flight_number
self.__fuel = 0
self.__fuelCheck = False
self.__maxFuel = self.__MIN_FUEL
def get_maximum_range(self):
"""Gets the maximum range of this aircraft in Kilometers"""
if self.units == 'metric':
return self.range_max
else:
return self.range_max * 1.6093 # Imperial: 1 mile = Metric: 1 kilometer
def fuel_check(self):
if self.__fuel < self.__MIN_FUEL:
print("[", self.flight_number, "] Fuel Check Failed: Current fuel below safe limit:", self.__fuel,
" less than ", self.__MIN_FUEL)
self.__fuelCheck = False
else:
print("[", self.flight_number, "] Fuel Check Complete. Current Fuel Level :", self.__fuel)
self.__fuelCheck = True
return self.__fuelCheck
def take_off(self):
if self.fuel_check():
print("[", self.flight_number, "] Cleared for take off! Fasten your seat-belt!")
else:
print("[", self.flight_number, "] Take off failed: complete pre-flight fuel check and refuel first.")
print(self.fuel_check())
def print_fuel_level(self):
print("Current fuel", self.__fuel)
def add_fuel(self, volume):
unusedFuel = 0
if volume < 0:
print("No syphoning fuel!!")
elif self.__fuel + volume <= self.__maxFuel:
self.__fuel = self.__fuel + volume
elif self.__fuel + volume > self.__maxFuel:
self.__fuel = self.__maxFuel
unusedFuel = volume - self.__fuel
return unusedFuel
def __repr__(self):
return f'{self.code}'
|
# %%
import os
import sys
# Local tools -----------------------------------
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'tools')) # noqa
from MEG_worker import MEG_Worker
# %%
worker = MEG_Worker(running_name='MEG_S01')
worker.pipeline(band_name='U07')
|
import numpy as np
a = np.array([[0.23875153, 0.58318603, 0.5816853, 0.19389093, 0.67800844], [0.20469368, 0.59248257, 0.38294148, 0.17688346, 0.89432704]])
b = np.array([[1, 0, 0, 0, 0], [0, 1, 0 , 0, 0]])
print(a - b)
c = (a - b)**2
print(c)
print(np.sum(c, axis=1) / 5)
print(np.mean(c, axis=1))
print(np.mean(np.sum(c, axis=1) / 5)) |
# !pip install ogb
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from ogb.linkproppred import LinkPropPredDataset, Evaluator
from logger import Logger
hasGPU = torch.cuda.is_available()
def gpu(x):
return x.cuda() if hasGPU else x
class MADGraph(nn.Module):
def __init__(
self,
n_nodes,
node_feats,
src,
dst,
in_feats=32,
n_heads=4,
n_samples=256,
n_sentinels=8,
memory=-1,
softmin=True,
n_nearest=0
):
super(self.__class__, self).__init__()
self.n_nodes = n_nodes
self.node_feats = node_feats
self.n_samples = n_samples
self.n_heads = n_heads
self.n_sentinels = n_sentinels
self.memory = memory
self.softmin = softmin
self.n_nearest = n_nearest
self.pos = nn.Parameter(gpu(
torch.rand((n_heads, n_nodes, node_feats))))
self.field = nn.Parameter(gpu(
torch.rand((n_heads, n_nodes, node_feats))))
self.uncertainty = nn.Parameter(gpu(torch.ones(1, 1, 1) * 5))
edge = -1 * torch.ones(n_nodes, n_nodes)
edge[src, dst] = 1
edge[dst, src] = 1
self.edge = gpu(edge)
def forward(self, edge):
src, dst = edge.T
n = edge.shape[0]
mid0 = torch.randint(
0, self.n_nodes, (self.n_heads, n, self.n_samples))
mid1 = torch.randint(
0, self.n_nodes, (self.n_heads, n, self.n_samples))
if self.n_nearest and not self.training:
mid0 = (
self.pos[:, src].unsqueeze(2)
- self.pos.unsqueeze(1)
).norm(dim=3).topk(
1 + self.n_nearest, largest=False).indices[:, :, 1:]
mid1 = (
self.pos[:, dst].unsqueeze(2)
- self.pos.unsqueeze(1)
).norm(dim=3).topk(
1 + self.n_nearest, largest=False).indices[:, :, 1:]
srcdiff = self.pos[:, src].unsqueeze(2) - self.pos[
torch.arange(self.n_heads).unsqueeze(1).unsqueeze(2), mid0]
logits1 = (
(
srcdiff.unsqueeze(3)
@ (self.field[:, dst].unsqueeze(2).unsqueeze(4))
).squeeze(3).squeeze(3)
+ [
0,
self.uncertainty,
self.uncertainty * self.edge[
mid0, dst.unsqueeze(0).unsqueeze(2)],
][self.memory]
)
dstdiff = self.pos[:, dst].unsqueeze(2) - self.pos[
torch.arange(self.n_heads).unsqueeze(1).unsqueeze(2), mid1]
logits2 = (
(
dstdiff.unsqueeze(3)
@ (self.field[:, src].unsqueeze(2).unsqueeze(4))
).squeeze(3).squeeze(3)
+ [
0,
self.uncertainty,
self.uncertainty * self.edge[
src.unsqueeze(0).unsqueeze(2), mid1],
][self.memory]
)
logits = torch.cat((logits1, logits2), dim=2)
if not self.softmin:
return logits.mean(2).mean(0)
dist = torch.cat((srcdiff, dstdiff), dim=2).norm(dim=3)
if self.n_sentinels:
logits = torch.cat((
logits, gpu(torch.zeros(self.n_heads, n, self.n_sentinels))
), dim=2)
dist = torch.cat((
dist, gpu(torch.ones(self.n_heads, n, self.n_sentinels))
), dim=2)
return (
logits.unsqueeze(2) @ torch.softmax(1-dist, dim=2).unsqueeze(3)
).squeeze(2).squeeze(2).mean(0)
def sample(edge, batch_size=1024):
for perm in DataLoader(range(edge.shape[0]), batch_size, shuffle=True):
yield edge[perm]
def main():
parser = argparse.ArgumentParser(description='OGBL-DDI (MADGraph)')
parser.add_argument('--lr', type=float, default=0.005)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--eval_steps', type=int, default=5)
parser.add_argument('--runs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=4 * 1024)
parser.add_argument('--dim', type=int, default=12)
parser.add_argument('--heads', type=int, default=12)
parser.add_argument('--samples', type=int, default=8)
parser.add_argument('--nearest', type=int, default=8)
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--sentinels', type=int, default=8)
parser.add_argument('--memory', type=str, default='all')
parser.add_argument('--softmin', type=bool, default=True)
parser.add_argument('--output_csv', type=str, default='')
args = parser.parse_args()
print(args)
DNAME = 'ogbl-ddi'
dataset = LinkPropPredDataset(name=DNAME)
graph = dataset[0]
n_nodes = graph['num_nodes']
data = dataset.get_edge_split()
for group in 'train valid test'.split():
if group in data:
sets = data[group]
for key in ('edge', 'edge_neg'):
if key in sets:
sets[key] = gpu(torch.from_numpy(sets[key]))
data['eval_train'] = {'edge': data['train']['edge'][
torch.randperm(data['train']['edge'].shape[0])[
:data['valid']['edge'].shape[0]]]}
model = MADGraph(
n_nodes=n_nodes,
node_feats=args.dim,
src=data['train']['edge'][:, 0],
dst=data['train']['edge'][:, 1],
n_samples=args.samples,
n_heads=args.heads,
n_sentinels=args.sentinels,
memory=['none', 'stat', 'all'].index(args.memory),
softmin=args.softmin,
n_nearest=args.nearest,
)
params = [p for net in [model] for p in net.parameters()]
print('params:', sum(p.numel() for p in params))
evaluator = Evaluator(name=DNAME)
loggers = {
'Hits@10': Logger(args.runs, args),
'Hits@20': Logger(args.runs, args),
'Hits@30': Logger(args.runs, args),
}
for run in range(args.runs):
torch.manual_seed(args.seed + run)
opt = optim.Adam(params, lr=args.lr)
torch.nn.init.xavier_uniform_(model.pos.data)
torch.nn.init.xavier_uniform_(model.field.data)
model.uncertainty.data = model.uncertainty.data * 0 + 1
for epoch in range(1, args.epochs + 1):
model.train()
for chunk in sample(data['train']['edge'], args.batch_size):
opt.zero_grad()
p_edge = torch.sigmoid(model(chunk))
edge_neg_chunk = gpu(
torch.randint(0, n_nodes, chunk.shape))
p_edge_neg = torch.sigmoid(model(edge_neg_chunk))
loss = (
-torch.log(1e-5 + 1 - p_edge_neg).mean()
- torch.log(1e-5 + p_edge).mean())
loss.backward()
opt.step()
if epoch % args.eval_steps:
continue
with torch.no_grad():
model.eval()
p_train = torch.cat([
model(chunk) for chunk in sample(
data['eval_train']['edge'], args.batch_size)])
n_train = torch.cat([
model(chunk) for chunk in sample(
data['valid']['edge_neg'], args.batch_size)])
p_valid = torch.cat([
model(chunk) for chunk in sample(
data['valid']['edge'], args.batch_size)])
n_valid = n_train
p_test = torch.cat([
model(chunk) for chunk in sample(
data['test']['edge'], args.batch_size)])
n_test = torch.cat([
model(chunk) for chunk in sample(
data['test']['edge_neg'], args.batch_size)])
for K in [10, 20, 30]:
evaluator.K = K
key = f'Hits@{K}'
h_train = evaluator.eval({
'y_pred_pos': p_train,
'y_pred_neg': n_train,
})[f'hits@{K}']
h_valid = evaluator.eval({
'y_pred_pos': p_valid,
'y_pred_neg': n_valid,
})[f'hits@{K}']
h_test = evaluator.eval({
'y_pred_pos': p_test,
'y_pred_neg': n_test,
})[f'hits@{K}']
loggers[key].add_result(run, (h_train, h_valid, h_test))
print(key)
print(f'Run: {run + 1:02d}, '
f'Epoch: {epoch:02d}, '
f'Loss: {loss:.4f}, '
f'Train: {100 * h_train:.2f}%, '
f'Valid: {100 * h_valid:.2f}%, '
f'Test: {100 * h_test:.2f}%')
print('---')
for key in loggers.keys():
print(key)
loggers[key].print_statistics(run)
for key in loggers.keys():
print(key)
loggers[key].print_statistics()
if __name__ == '__main__':
main()
|
"""Miscellaneous utilities."""
from .consolidate_merge import consolidate_merge
from .strip_end import strip_end
# adapted from https://stackoverflow.com/a/31079085
__all__ = [
'consolidate_merge',
'strip_end',
]
|
import random
import copy
from matplotlib import pyplot as plt
import numpy as np
import csv
#----------------------------GLOBAL VARIABLES-----------------------#
p = 150 #population size
numR = 5 #number of rules
conLen = 6 #rule condition length
n = numR * (conLen+1) #gene size (set to num rules * condition and output for now)
initMutationChance = 0.07 #mutationchance initial mutation chance
lowestMutationChance = 0.03 #lowest mutation rate
mutationIncDecGeneration = 200 #Generation to start increasing/decreasing the mutation chance
mutationIncOrDec = False #Boolean to choose whether to increase or decrease the mutation rate per generation
mutationIncAmount = 0.001 #amount to increase the mutation by per generation
mutationDecAmount = 0.002 #amount to decrease the mutation by per generation
mutationRangeAmount = 0.05 #amount to increase or decrease the range for mutation of each gene
geneRangeMax = 0.999999 #maximum range between upper and lower boundaries
highestFitToCopy = int(p/5) #number of offspring to overwrite lowest fit individuals in population
initCrossoverChance = 0.95 #crossover chance
crossoverIncDecGeneration = 1000 #Generation to start increasing/decreasing the crossover chance
crossoverIncOrDec = True #Boolean to choose whether to increase or decrease the crossover rate per generation
crossoverIncAmount = 0.00001 #amount to increase the crossover by per generation
crossoverDecAmount = 0.001 #amount to increase the crossover by per generation
elitismPercentage = 5 #percentage of best individuals to survive generations (and worst removed)
tournamentSelectionSize = 3 #size of individuals to compete in tournament selection#
tournamentClusterSize = 3 #size of cluster size in tournament (population size divided by this number must be integer)
numFittestIndividuals = 1 #number of individuals to display as fittest
numToTrain = 1000 #number of items to train with
fileToWrite = 'Data3Runs6.csv' #where to write results
#----------------------------OBJECTS-----------------------#
class individual:
def __init__(self, gene, fitness):
self.gene = gene
self.fitness = fitness
def getFitness(self):
return self.fitness
class rule:
def __init__(self, cond, out):
self.cond = cond
self.out = out
class data:
def __init__(self, vars, out):
self.vars = vars
self.out = out
#----------------------------LOADING DATA FILE-----------------------#
dataRead = []
parser = csv.reader(open("data3.txt"), delimiter=' ')
for line in parser:
vars = []
out = 0
for i in range(0,conLen):
vars.append(float(line[i]))
out = int(line[conLen])
dataRead.append(data(vars,out))
random.shuffle(dataRead)
#----------------------------SELECTION-----------------------#
def tournamentSelectionSizeAny(size, clusterSize):
#Shuffle population in case of sorting
random.shuffle(population)
#Initialise cluster list
clusterList = [[] for i in range(clusterSize)]
index = 0
#Sort the population into clusters of specified size
for i in range(clusterSize):
for j in range(int(p/clusterSize)):
clusterList[i].append(copy.deepcopy(population[index]))
index +=1
#Loop for each cluster
for k in range(clusterSize):
for j in range(0,int(p/clusterSize)):
selectionList = []
highestFit = 0
highestFitSelection = 0
selected = 0
for i in range(0,size):
if i == 0:
selectionList.append(random.randint(0,int(p/clusterSize) - 1))
else:
while 1:
selected = random.randint(0,int(p/clusterSize) - 1)
if selected not in selectionList:
selectionList.append(selected)
break
#Select highest fitness from each cluster/tournament append to offspring
for i in range(0, len(selectionList)):
if clusterList[k][selectionList[i]].fitness >= highestFit:
highestFit = clusterList[k][selectionList[i]].fitness
highestFitSelection = selectionList[i]
offspring.append(copy.deepcopy(clusterList[k][highestFitSelection]))
#----------------------------CROSSOVER-----------------------#
def crossover(crossoverChance):
popSelected = 0
tempOffspring = copy.deepcopy(offspring)
while (popSelected < p):
crossoverProbability = random.random()
if (crossoverProbability <= crossoverChance):
crossoverPoint = random.randint(1,n-1)
for i in range (0,crossoverPoint):
temp1 = tempOffspring[popSelected].gene[i]
temp2 = tempOffspring[popSelected + 1].gene[i]
tempOffspring[popSelected].gene[i] = temp2
tempOffspring[popSelected + 1].gene[i] = temp1
popSelected += 2
offspring.clear()
offspring.extend(copy.deepcopy(tempOffspring))
#----------------------------MUTATION-----------------------#
def mutation(mutationChance):
for j in range(0,len(offspring)):
for i in range(0,n):
mutationProbability = random.random()
if (mutationProbability < mutationChance):
#Check if output bit
if (i + 1) % 7 == 0:
if (offspring[j].gene[i] == 1):
offspring[j].gene[i] = 0
else:
offspring[j].gene[i] = 1
else:
#Set whether to adjust the lower or upper bound, and also whether to increment or decrement based on amount
lowerOrUpper = random.randint(0,1)
incOrDec = random.randint(0,1)
amount = random.uniform(0,0.2)
mutationProbability = random.random()
#Small chance based on mutation chance to set gene to random floats
if mutationProbability < mutationChance:
offspring[j].gene[i][0] = round(random.random(),6)
offspring[j].gene[i][1] = round(random.random(),6)
offspring[j].gene[i].sort()
#Variable checks
elif lowerOrUpper == 0:
if incOrDec == 0:
offspring[j].gene[i][0] = round(offspring[j].gene[i][0] + amount,6)
if offspring[j].gene[i][0] > 0.999999:
offspring[j].gene[i][0] = 0.999999
offspring[j].gene[i].sort()
elif incOrDec == 1:
offspring[j].gene[i][0] = round(offspring[j].gene[i][0] - amount,6)
if offspring[j].gene[i][0] < 0.000001:
offspring[j].gene[i][0] = 0.000001
offspring[j].gene[i].sort()
elif lowerOrUpper == 1:
if incOrDec == 0:
offspring[j].gene[i][1] = round(offspring[j].gene[i][1] + amount,6)
if offspring[j].gene[i][1] > 0.999999:
offspring[j].gene[i][1] = 0.999999
offspring[j].gene[i].sort()
elif incOrDec == 1:
offspring[j].gene[i][1] = round(offspring[j].gene[i][1] - amount,6)
if offspring[j].gene[i][1] < 0.000001:
offspring[j].gene[i][1] = 0.000001
offspring[j].gene[i].sort()
#-----------------------------ELITISM FUNCTIONS-------------------------#
#Removes lowest fitness individuals from offspring
def removeLowestPercentile():
offspring.sort(key = individual.getFitness)
for i in range(0,round((p/100)*elitismPercentage)):
del(offspring[0])
#Saves the highest fit individuals from population
def saveHighestPercentile():
population.sort(key = individual.getFitness, reverse = True)
for i in range(0,round((p/100)*elitismPercentage)):
savedIndividuals.append(copy.deepcopy(population[i]))
#Adds the highest fit individuals from population into offspring
def addSavedIndiviualsToOffspring():
for i in range(0,len(savedIndividuals)):
offspring.append(copy.deepcopy(savedIndividuals[i]))
savedIndividuals.clear()
#-----------------------------FITNESS FUNCTIONS-------------------------#
#Used for training set
def fitnessfunc(selectedGene):
fitness = 0
k = 0
for i in range(numR):
for j in range(conLen):
rulebase[i].cond[j] = copy.deepcopy(selectedGene.gene[k])
k+=1
rulebase[i].out = selectedGene.gene[k]
k+=1
for j in range (len(trainingSet)):
for i in range(numR):
if condInRange(trainingSet[j],rulebase[i]):
if(trainingSet[j].out == rulebase[i].out):
fitness+=1
break
else:
break
return fitness
#Used for testing set
def fitnessfuncTest(selectedGene):
fitness = 0
k = 0
for i in range(0,numR):
for j in range(0, conLen):
rulebase[i].cond[j] = selectedGene.gene[k][:]
k+=1
rulebase[i].out = selectedGene.gene[k]
k+=1
for j in range (0,len(testingSet)):
for i in range(0,numR):
if(condInRange(testingSet[j],rulebase[i])):
if(testingSet[j].out == rulebase[i].out):
fitness+=1
break
else:
break
return fitness
def condInRange(data,rule):
match = True
for j in range(0,conLen):
if((data.vars[j] >= rule.cond[j][0]) and (data.vars[j] <= rule.cond[j][1])):
match = True
else:
return False
return match
#-----------------------------STAT FUNCTIONS-------------------------#
def getTotalFitness(pop):
totalfit = 0
for i in range(0,p):
totalfit += pop[i].fitness
return totalfit
def getBestFitness():
bestFitness = population[0].fitness
for i in range (0,p):
if (population[i].fitness > bestFitness):
bestFitness = population[i].fitness
return bestFitness
def getBestFitnessOffspring():
bestFitness = offspring[0].fitness
for i in range (0,len(offspring)):
if (offspring[i].fitness > bestFitness):
bestFitness = offspring[i].fitness
return bestFitness
def getAverageFitness():
averageFitness = 0
for i in range(0,p):
averageFitness += offspring[i].fitness
averageFitness = averageFitness / p
return averageFitness
def getAverageFitnessPop():
averageFitness = 0
for i in range(0,p):
averageFitness += population[i].fitness
averageFitness = averageFitness / p
return averageFitness
#-----------------------------RECOMBINATION-------------------------#
def copyOffspringToParent():
offspring.sort(key = individual.getFitness, reverse = True)
population.sort(key = individual.getFitness)
for i in range(0,highestFitToCopy):
same = False
for j in range(p):
#Check individual does not exist already in population, discard if so
if isIndividualSame(population[j], offspring[i]):
same = True
break
if same == False:
population[i] = copy.deepcopy(offspring[i])
offspring.clear()
def isIndividualSame(indv1, indv2):
for i in range(0,n):
if (i + 1) % 7 == 0:
if indv1.gene[i] == indv2.gene[i]:
pass
else:
return False
elif (indv1.gene[i][0] == indv2.gene[i][0]) and (indv1.gene[i][1] == indv2.gene[i][1]):
pass
else:
return False
return True
#-----------------------------MAIN GENERATION-------------------------#
def generation(numGenerations,run):
mutationChance = initMutationChance
crossoverChance = initCrossoverChance
totalfit = []
bestfit = []
avgfit = []
generation = []
for i in range(0,numGenerations):
#Adaptive mutation check
if i > mutationIncDecGeneration:
if mutationIncOrDec == True:
mutationChance = mutationChance + mutationIncAmount
elif mutationChance > lowestMutationChance:
mutationChance = mutationChance - mutationDecAmount
#Adaptive crossover check
if i > crossoverIncDecGeneration:
if crossoverIncOrDec == True:
crossoverChance = crossoverChance + crossoverIncAmount
else:
crossoverChance = crossoverChance - crossoverDecAmount
saveHighestPercentile()
tournamentSelectionSizeAny(tournamentSelectionSize,tournamentClusterSize)
crossover(crossoverChance)
mutation(mutationChance)
for j in range(0,len(offspring)):
offspring[j].fitness = fitnessfunc(offspring[j])
addSavedIndiviualsToOffspring()
totalfit.append(getTotalFitness(offspring))
bestfit.append(getBestFitnessOffspring())
avgfit.append(getAverageFitness())
generation.append(i)
if getBestFitnessOffspring() == 1000:
print("Max fitness achieved!")
copyOffspringToParent()
break
copyOffspringToParent()
#Sort pop to put highest fitness in position 0
population.sort(key = individual.getFitness, reverse = True)
#Create rule base to append to file
for i in range(0,numFittestIndividuals):
k=0
for l in range(0,numR):
for j in range(0, conLen):
rulebase[l].cond[j] = population[i].gene[k]
k+=1
rulebase[l].out = population[i].gene[k]
k+=1
maxFitIndividuals.append(copy.deepcopy(rulebase))
#Write information to CSV file
with open(fileToWrite, 'a', newline='') as file:
writer = csv.writer(file)
run = ["Run", run+1]
writer.writerow(run)
writer.writerow(generation)
writer.writerow(bestfit)
writer.writerow(avgfit)
rulenum = 1
for rule in maxFitIndividuals[0]:
toWrite = ["Rule: ", rulenum, rule.cond, rule.out]
writer.writerow(toWrite)
rulenum+=1
maxFitIndividuals.clear()
#-----------------------------MAIN-------------------------#
def run():
with open(fileToWrite, 'a', newline='') as file:
writer = csv.writer(file)
runInfoName = ["Population Size", "Num Rules", "Mutation Rate", "Crossover Rate","Tournament Cluster Size" ,"Tournament Size", "Max Offspring Recombined"]
runInfo = [str(p),str(numR),str(initMutationChance), str(initCrossoverChance),str(tournamentClusterSize), str(tournamentSelectionSize), str(highestFitToCopy)]
writer.writerow(runInfoName)
writer.writerow(runInfo)
writer.writerow(['Training'])
runs = int(input("Enter number of runs: "))
numGens = int(input("Enter number of generations: "))
for i in range(runs):
generateTrainTestSets()
init()
generation(numGens, i)
maxFitIndividuals.clear()
#Calculate fitness of testing set to final generation population
for i in range(0,p):
population[i].fitness = fitnessfuncTest(population[i])
population.sort(key = individual.getFitness, reverse = True)
#Create rule base to append to file
for i in range(0,numFittestIndividuals):
k=0
for l in range(0,numR):
for j in range(0, conLen):
rulebase[l].cond[j] = population[i].gene[k]
k+=1
rulebase[l].out = population[i].gene[k]
k+=1
maxFitIndividuals.append(copy.deepcopy(rulebase))
#Write each run to CSV file
with open(fileToWrite, 'a', newline='') as file:
writer = csv.writer(file)
toWrite = ['Testing','Individual that achieved highest fitness' ]
writer.writerow(toWrite)
for i in range(0,1):
rulenum = 1
for rule in maxFitIndividuals[i]:
writeList = []
writeList.append("Rule:")
writeList.append(rulenum)
for j in range(conLen):
writeList.append(str.format('{0:.6f} - {1:.6f}',rule.cond[j][0], rule.cond[j][1]))
writeList.append(str.format('{0}',rule.out))
writer.writerow(writeList)
rulenum+=1
writeList2 = ["Highest fitness individual matched ", population[0].fitness/len(trainingSet)*100," percent of test set"]
writer.writerow(writeList2)
print("Highest fitness individual matched ", (population[0].fitness/len(trainingSet))*100, " percent of test set")
#----------------INITIALSATIONS----------------#
trainingSet = []
testingSet = []
trainingSetIndexes = []
def generateTrainTestSets():
trainingSet.clear()
testingSet.clear()
random.shuffle(dataRead)
for i in range(0,len(dataRead)):
if i < numToTrain:
trainingSet.append(copy.deepcopy(dataRead[i]))
else:
testingSet.append(copy.deepcopy(dataRead[i]))
population = []
offspring = []
rulebase = []
savedIndividuals = []
maxFitIndividuals = []
#Initialise rulebase and population
def init():
population.clear()
offspring.clear()
rulebase.clear()
savedIndividuals.clear()
maxFitIndividuals.clear()
for i in range (0,numR):
rulebase.append(rule([0] * conLen ,0))
for k in range (0,p):
population.append(individual([[0] * 2 for i in range(n)] ,0))
#Initialise population
for i in range (0,p):
for j in range (0,n):
if (j + 1) % 7 == 0:
population[i].gene[j] = random.randint(0,1)
else:
population[i].gene[j][0] = round(random.random(),6)
population[i].gene[j][1] = round(random.random(),6)
population[i].gene[j].sort()
for i in range (0,p):
population[i].fitness = fitnessfunc(population[i])
run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.