text stringlengths 38 1.54M |
|---|
# -*- coding=utf-8
'''
Created on 2016年9月23日
牌桌麻将牌的管理器
包括:
1)发牌
2)牌桌上的出牌
3)宝牌
发牌说明:
发牌涉及到好牌点
@author: zhaol
'''
from majiang2.table_tile.test.table_tile_test import MTableTileTest
from poker.entity.dao import daobase
from freetime.util import log as ftlog
import json
class MTableTileTestLongNet(MTableTileTest):
def __init__(self, playerCount, playMode):
super(MTableTileTestLongNet, self).__init__(playerCount, playMode)
def initTiles(self):
"""初始化手牌,用于摆牌测试"""
key = 'put_card:' + self.playMode
ftlog.debug('MTableTileTestLongNet key:', key)
tile_info = daobase.executeMixCmd('get', key)
ftlog.debug('MTableTileTestLongNet.initTiles tile_info:', tile_info)
if not tile_info:
ftlog.debug('MTableTileTestLongNet.initTiles failed...')
return False
tileObj = json.loads(tile_info)
handTiles = []
ftlog.debug('MTableTileTestLongNet.playerCount:', self.playerCount)
for index in range(1, self.playerCount+1):
indexTiles = tileObj.get('seat'+str(index), [])
ftlog.debug('MTableTileTestLongNet.initTiles seat'+str(index)+':', indexTiles, ' length:', len(indexTiles))
#数据校验
if len(indexTiles) > 16:
return False
handTiles.append(indexTiles)
pool = tileObj.get('pool', [])
ftlog.debug('MTableTileTestLongNet.initTiles pool:', pool, ' length:', len(pool))
self.setHandTiles(handTiles)
self.setTiles(pool)
return True
|
import random
from p1 import (
get_number_of_repeated_columns,
get_number_of_repeated_rows,
get_trace,
)
from p5 import gen_diag_recursively
def gen_latin_matrix(n):
output_list = []
for i in range(n):
numbers = list(range(1, n + 1))
random.shuffle(numbers)
output_list.append(numbers)
return output_list
def check_natual_latin_matrix(matrix):
count = get_number_of_repeated_columns(matrix)
if count > 0:
return False
return True
def main(n):
result = set()
for _ in range(1000000):
matrix = gen_latin_matrix(n)
# print(matrix)
if check_natual_latin_matrix(matrix):
trace = get_trace(matrix)
result.add(trace)
if result == (n*n - n + 1):
break
print(result)
return result
if __name__ == '__main__':
main(50)
#3: {3, 6, 9}
#4: {4, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16}
#5: {13, 15, 18, 19, 20, 25}
# n = 5
# for k in {13, 15, 18, 19, 20, 25}:
# print(k)
# output = gen_diag_recursively(
# diag=[0 for _ in range(n)],
# current_sum=0,
# ind=0,
# target=k,
# )
# print(output)
|
import os
import shutil
from _datetime import datetime
from PIL import Image
class ImageOrganizer:
def folder_path_from_photo_date(self, file):
date = self.photo_shooting_date(file)
return date.strftime('%Y') + '/' + date.strftime('%Y-%m-%d')
def photo_shooting_date(self, file):
photo = Image.open(file)
info = photo._getexif()
try:
if 36867 in info:
date = info[36867]
date = datetime.strptime(date, '%Y:%m:%d %H:%M:%S')
else:
date = datetime.fromtimestamp(os.path.getmtime(file))
except:
date = datetime.fromtimestamp(os.path.getmtime(file))
return date
def move_photo(self, file):
new_folder = self.folder_path_from_photo_date(file)
if not os.path.exists(new_folder):
os.makedirs(new_folder)
shutil.move(file, new_folder + '/' + file)
def relocate_photo(self):
num = 0
img = ('jpg', 'jpeg', 'png')
for files in os.listdir('.'):
if any(files.lower().endswith(img) for i in img):
self.move_photo(files)
org = ImageOrganizer()
org.relocate_photo()
|
from api_app import Api, App
from merge_rule import *
# r_xxx_yyy_zzz
# d: default, c: common, u: unique, l: limit
r_d = MergeRule()
r_c = common_rule()
r_u = unique_rule()
r_c_u = chain_rule(r_c, r_u)
r_l_1 = limit_rule(1)
r_c_l1 = chain_rule(r_c,r_l_1)
def apps():
flowfilters = [
# app_zjxsp(),
# app_bai_du_flash(),
# app_cai_dan_sp(),
# app_cheng_yu_da_fu_hao(),
# app_cheng_yu_qu_wei_xiao(),
# app_dong_fan_tt(),
# app_hao_kan(),
# app_huo_shan(),
app_jin_ri_tou_tiao(),
# app_kai_xin_xiao_tan_guo(),
# app_ma_yi_kd(),
# app_qu_jian_pan(),
# app_qu_jian_pan_flash(),
# app_qu_tou_tiao(),
# app_qu_zhong_cai(),
# app_tian_chi_xiao_xiu_cai(),
# app_wan_zhe_da_nao(),
# app_wei_xin(),
# app_yang_ji_chang(),
# app_you_xi_he_zi(),
# app_zhong_qin_kd(),
# app_zhu_lai_le(),
]
return flowfilters
def api_common():
common = [
# 游戏
Api(r'/x/user/token', log='/x/user/token - 获取g_token'),
Api(r'/x/open/game', log='/x/open/game - 获取ticket', f_p_arg=['app_id']),
# report_type={round,level}
Api('/x/game-report/special_report', log='special_report', f_name='game_special_report',f_b_arg=['app_id'],f_b_kwarg={'report_type':'round'}),
Api('/x/game-report/duration_report', log='duration_report', f_name='game_duration_report',f_b_arg=['start_ts','duration'],f_b_kwarg={'report_type':'duration_addition'}),
Api('/x/gapp/task/list',log='游戏 - 任务列表', f_b_arg=['app_id', 'app']),
Api('/x/gapp/task/take-reward',log='游戏 - take-reward - 领金币', f_b_arg=['task_id', 'app_id', 'app']),
# 金币-取现-账户
Api('/qapptoken', log='/qapptoken - 获取access_token', f_name='get_access_token', f_p_arg=['app_id']),
Api('/withdraw/getCoinLog',log='金币明细', f_p_arg=['page','page_size']),
Api('/withdraw/getBindInfo',log='取现 - 用户账户信息'),
Api('/withdraw/sku/list',log='取现 - 可取现金额列表'),
Api('/withdraw/order/create',log='取现 - 取现', f_b_arg=['sku_id']),
Api('/withdraw/order/listApp',log='取现 - 提现列表'),
Api('/user/withdraw/days',log='取现 - 条件'),
]
return common
def api_sign():
urls = [
Api('/x/game-center/gapp/sign-in', log='签到'),
Api('/x/game-center/gapp/sign-in-double', log='签到 - double'),
]
return urls
def api_baidu():
urls = [
'/activity/acad/bubblead',
Api(r'/activity/tasks/active', params_as_all=True, f_p_arg=['productid', 'tid']),
Api(r'/activity/acad/rewardad', f_p_arg=['productid', 'tid'] ), # 看视频
Api(r'/activity/tasks/taskreward'),
]
return urls
def api_qu_jian_pan():
''' 趣键盘 '''
urls = [
r'/qjp-app/user/info',
# 翻牌
r'/qjp-app/game/fanpai/basicInfo',
r'/qjp-app/game/fanpai/getAward',
r'/qjp-app/game/fanpai/awardDouble',
r'/qjp-app/game/fanpai/',
# 储蓄罐
r'/qjp-app/game/savingsBank/taskInfo',
r'/qjp-app/game/savingsBank/signIn',
r'/qjp-app/game/savingsBank/collectPigMoney',
r'/qjp-app/game/savingsBank/exchangePigMoney',
Api(r'/qjp-app/game/savingsBank/unlockDouble',f_b_arg=['taskType'], content_type='json'),
Api(r'/qjp-app/game/savingsBank/finishTask',f_b_arg=['taskCode'], content_type='json'),
Api(r'/qjp-app/game/savingsBank/doubleBox',f_b_arg=['ticket'], content_type='json'),
# 小猪刮刮乐
r'/qjp-app/game/guagua/',
# 小猪转盘
r'/qjp-app/pig/turntable/info',
# type Integer 3
r'/qjp-app/pig/turntable/draw',
Api('/qjp-app/pig/turntable/receiveVideoReward', f_b_arg=['ticket']),
r'/qjp-app/pig/turntable/',
# 大转盘
r'/gk/draw/info',
r'/gk/draw/extract',
Api('/gk/draw/double', f_b_arg=['ticket']),
r'/gk/draw/package',
r'/gk/draw/pkdouble',
# 便利店 - 已没有入口
# Api('/gk/game/bianlidian/receiveBox', f_b_arg=['packageId']),
# Api('/gk/game/bianlidian/draw/double', f_b_arg=['ticket']),
# Api('/gk/game/bianlidian/receiveGift', log='便利店 - xxx金币礼包碎片', f_b_arg=['ticket']),
# Api('/gk/game/bianlidian/receiveMediumCoin', log='便利店 - 随机金币奖励', f_b_arg=['ticket']),
# r'/gk/game/bianlidian/',
# 打地鼠 - 已没有入口
# r'/gk/game/dadishu/',
r'/qujianpan/',
# 已没有入口
# r'/gk/garbage/',
]
return urls
# ''' 填词小秀才 - 游戏
def api_tczyqtt():
c_tczyqtt = [
# 游戏 - 填词小秀才
Api('/api/v1/tczyqtt/login', log='填词小秀才 - 登录 - 获取open_id', f_p_arg=['ticket'], api_ok={'code':[1]}),
Api('/api/v1/tczyqtt/sign',log='填词小秀才 - 签到'),
Api('/api/v1/tczyqtt/lottery',log='填词小秀才 - lottery', api_ok={'code':[1]}),
Api('/api/v1/tczyqtt/exchange',log='填词小秀才 - 红包满20元兑换成金币'),
Api('/api/v1/tczyqtt/get_reward',log='填词小秀才 - 任务完成', f_p_arg=['activity_id'], api_ok={'code':[1]}),
Api('/api/v1/tczyqtt/open_redpacket',log='填词小秀才 - 红包', api_ok={'code':[1]}),
Api('/api/v1/tczyqtt/draw_a_char',log='填词小秀才 - 抽字', api_ok={'code':[1]}),
Api('/api/v1/tczyqtt/add_coin',log='填词小秀才 - add_coin'),
'/api/v1/tczyqtt/'
]
return c_tczyqtt
def app_zjxsp():
''' '''
urls = [
'/user/getallfloatgold',
Api(r'/user/drawfloatgold', f_p_arg=['floatGoldId']),
]
urls.extend(api_baidu())
return App(urls, 'zjxsp')
# ''' 百度 - 百度极速版 '''
def app_bai_du_flash():
''' 百度 - 百度极速版 '''
urls = [
Api(r'/api/task/1/task/381/complete', f_p_arg=['rewardVideoPkg']), # 看视频
]
urls.extend(api_baidu())
return App(urls, 'bai-du-flash')
# ''' 百度 - 好看 '''
def app_hao_kan():
''' 百度 - 好看 '''
urls = [
r'activity/acusercheckin', # 每日签到
r'signIn/new/sign', # 游戏中心签到
Api(r'api/task/1/task/379/complete', f_p_arg=['rewardVideoPkg']), # 看视频
]
urls.extend(api_baidu())
return App(urls, 'hao-kan')
# ''' 彩蛋视频 '''
def app_cai_dan_sp():
''' 彩蛋视频 '''
urls = [
Api('/h5/task/index',log='任务信息'),
Api('/task/sign',log='sign - 签到、金币信息'),
Api('/task/timer_submit',log='看视频 - 得金币', f_b_enc={'qdata'}, f_b_arg=['qdata'], f_merge_key=r_u),
Api('/h5/task/submit',log='日常福利 - 观看小视频', body_as_all=True, f_merge_key=r_d),
Api('/h5/reduce/reward',log='瓜分他人金币', body_as_all=True, f_merge_key=r_d),
Api('/h5/reward/prize',log='iphone免费抽', body_as_all=True, f_merge_key=r_d),
Api('/h5/active_value/reward_prize',log='活跃奖励', body_as_all=True, f_merge_key=r_d),
]
urls.extend(api_common())
return App(urls, 'cai-dan-sp')
# ''' 成语大富豪
def app_cheng_yu_da_fu_hao():
urls = [
Api('/x/cocos/gapp-game-init', params_as_all=True, f_merge_key=r_c_l1),#返回的url中含有ticket
Api('/api/Login', log='2-登录游戏', f_name='api_login', f_b_arg=['ticket','game_id']),
Api('/api/GetQCoin', log='获取金币数', f_b_arg=['session_id']),
Api('/api/AddCoin', log='成语大富豪 - 金币',f_b_arg=['AddCoinNum','session_id']),
Api('/api/AddSecondCoin', log='成语大富豪 - 金币 - AddSecondCoin',f_b_arg=['AddCoinNum','session_id']),
]
urls.extend(api_common())
urls.extend(api_sign())
return App(urls, 'cheng-yu-da-fu-hao')
# ''' 成语趣味消
def app_cheng_yu_qu_wei_xiao():
urls = [
Api('/chengyu_app/login', log='成语趣味消 - 登入', f_p_arg=['ticket']),
Api('/chengyu_app/signin', log='签到'),
Api('/chengyu_app/draw_fuca', log='抽字'),
Api('/chengyu_app/addcoin', f_b_arg=['open_id','add_num']),
Api('/chengyu_app/update_task', f_b_arg=['task_index']),
Api('/chengyu_app/get_task_award', f_b_arg=['task_index']),
Api('/chengyu_app/'),
]
urls.extend(api_common())
urls.extend(api_sign())
return App(urls, 'cheng-yu-qu-wei-xiao')
# ''' 东方头条 '''
def app_dong_fan_tt():
''' 东方头条 '''
urls = [
r'sign/news_take_s',
r'timesaward/timesaward/get_award',
r'answer_question_new/get_question',
r'answer_question_new/add_user_bonus',
r'zhuanpan_v3/get_zhuanpan_new',
r'zhuanpan_v3/get_gold',
r'hit_susliks/hit_susliks/start_play_game',
r'hit_susliks/hit_susliks/finish_play_game',
r'hit_susliks/hit_susliks/set_user_video_num',
r'hit_susliks/hit_susliks/lucky_draw',
r'turn_over_packet/packet/add_packet_bonus',
]
return App(urls, 'dong-fan-tt')
# ''' 火山极速版 '''
def app_huo_shan():
''' 火山极速版 '''
urls = [
Api('/luckycat/hotsoon/v1/task/done/excitation_ad_treasure_box', log='火山-开宝箱-看视频', f_name='task_done_excitation_ad_treasure_box'),
Api('/luckycat/hotsoon/v1/task/page', log='火山-获取任务状态'),
Api('/luckycat/hotsoon/v1/task/done/treasure_task', log='火山-开宝箱'),
Api('/luckycat/hotsoon/v1/task/done/show_money', log='火山-晒收入', params_as_all=True),
Api('/luckycat/hotsoon/v1/task/done/excitation_ad', log='火山-看视频赚海量金币', params_as_all=True),
Api('/luckycat/hotsoon/v1/task/done/daily_read_1m', log='火山-1分钟', params_as_all=True),
Api('/luckycat/hotsoon/v1/task/done/daily_read_2m', log='火山-2分钟', params_as_all=True),
Api('luckycat/v1/task/page/', log='火山-获取任务状态', params_as_all=True, f_merge_key=r_c_l1),
Api('luckycat/v1/task/sign_in/', log='火山-每日签到', params_as_all=True, f_merge_key=r_c_l1),
Api('luckycat/v1/task/open_treasure_box/', log='火山-开宝箱', params_as_all=True),
Api('luckycat/v1/task/done_task/', log='火山-开宝箱-看视频', params_as_all=True, body_as_all=True),
Api('luckycat/v1/landing/add_amount/', log='火山-晒收入', params_as_all=True),
Api('luckycat/v1/task/get_read_bonus/',params_as_all=True),
Api('api/ad/v1/inspire/', log='火山-获取广告', params_as_all=True),
]
return App(urls, 'huo-shan', api_ok={'code':[0],'err_no':[0]})
# ''' 开心消糖果 '''
def app_kai_xin_xiao_tan_guo():
''' 开心消糖果 '''
urls = [
Api('/x/cocos/gapp-game-init', params_as_all=True, f_merge_key=r_c_l1),#返回的url中含有ticket
Api('/happy/qtt/apkuserinfo', log='/happy/qtt/apkuserinfo - 获取open_id', f_p_arg=['ticket']),
'/happy/protocol'
]
urls.extend(api_common())
urls.extend(api_sign())
return App(urls, app_name='kai-xin-xiao-tan-guo')
# ''' 趣键盘 '''
def app_qu_jian_pan():
''' 趣键盘 '''
urls = [
]
urls.extend(api_qu_jian_pan())
return App(urls, 'qu-jian-pan')
# ''' 趣键盘极速版 '''
def app_qu_jian_pan_flash():
''' 趣键盘极速版 '''
urls = [
]
urls.extend(api_qu_jian_pan())
return App(urls, 'qu-jian-pan-flash')
# ''' 趣头条 '''
def app_qu_tou_tiao():
''' 趣头条 '''
urls = [
# 金币-账号-提现
Api(r'/member/getMemberIncome',log='收益详情', f_p_arg=['page','last_time']),
Api(r'/cash/order/list',log='取现 - 提现列表'),
Api(r'/member/getMemberInfo',log='取现 - 用户账户信息'),
Api(r'/mall/item/ItemList',log='取现 - 可取现金额列表'),
Api(r'/cash_order/create',log='取现 - 提现', f_p_enc={'qdata'}, f_merge_key=r_u),
Api(r'/sign/sign', log='每日签到', params_as_all=True, f_merge_key=r_c_l1),
Api(r'/mission/intPointReward', log='时段签到', params_as_all=True, api_ok={'code':[-312]}, f_merge_key=r_c_l1),
Api(r'/taskcenter/getReward', log='任务完成 - 领金币', params_as_all=True, f_merge_key=r_c_l1),
r'/x/game-center/user/sign-in',
r'/x/game-center/user/last-sign-coin',
Api('/x/task/v2/take-reward', log='任务完成 - 领金币', f_name='game_take_reward',f_b_arg=['task_id']),
r'/newuserline/activity/signRewardNew', # 挑战签到
Api(r'/mission/receiveTreasureBox', log='趣头条-开宝箱', api_ok={'code':[-1710]}),
Api(r'/content/readV2',params_as_all=True, f_merge_key=r_u),
Api(r'/app/re/taskCenter/info/v1/get', log='任务信息', params_as_all=True, p_as_all_limit=1, f_merge_key=r_c_l1),
Api(r'/app/user/info/personal/v1/get', log='用户信息', params_as_all=True, p_as_all_limit=1, f_merge_key=r_c_l1),
Api(r'/coin/service', body_as_all=True, f_merge_key=r_u),
r'/readtimer/report',
# Api(r'motivateapp/mtvcallback', params_as_all=True),
Api(r'/x/feed/getReward', log='信息流-惊喜红包', params_as_all=True, api_ok={'code':[-308]}, f_merge_key=r_c_l1),
# 天天乐
Api(r'/lotteryGame/status', log='天天乐-信息'),
Api(r'/tiantianle/video', log='天天乐-增加机会', params_as_all=True, f_merge_key=r_c_l1),
Api(r'/lotteryGame/order', log='天天乐-投注'),
# 金猪 withdraw:(从银行)取钱
r'/x/v1/goldpig/info',
r'/x/v1/goldpig/foundLostPig', # 金猪 - 找回金猪
r'/x/v1/goldpig/bubbleWithdraw', # 金猪 - 看视频
r'/x/v1/goldpig/withdraw', # 金猪
# 存钱罐
Api(r'/finance/piggybank/taskReward',api_ok={'code':[-2004]}), # 存钱罐
Api(r'/finance/piggybank/draw', log='存钱罐 - 活期金币转出到钱包', f_b_arg=['amount']),
# 游戏 - 种菜
r'/x/tree-game/task-list',
r'/x/tree-game/left-plant-num',
r'/x/tree-game/plant-ok',
r'/x/tree-game/add-plant',
r'/x/tree-game/fertilizer/add',
r'/x/tree-game/fertilizer/use',
r'/x/tree-game/water-plants',
r'/x/tree-game/my-gift-box/draw-lottery',
r'/x/tree-game/my-gift-box/receive-prize',
r'/x/tree-game/task-update',
r'/x/tree-game/add-task-drips',
Api(r'/x/tree-game/task/pop/take-reward',f_b_arg=['task_id']),#task_id=10,11,12
r'/x/tree-game/truck/sold',
r'/x/tree-game/truck/ad-award',
# r'/x/tree-game/',
r'/x/task/encourage/activity/grant', # 游戏 - 瓜分
r'api/loginGame',
r'api/qttAddCoin',
# 游戏 - 成语
Api(r'/api/Login', log='猜成语赚钱 - 登录'),
r'api/AddCoin',
# 游戏 - 成语消消乐
Api('/chengyu/login', log='成语消消乐 - 登录 - 获取open_id', f_b_arg=['ticket']),
Api('/chengyu/addcoin', log='成语消消乐 - 金币', f_b_arg=['add_num']),
Api('/chengyu/update_red_packet', log='成语消消乐 - 过关得现金', f_b_arg=['level']),
# 游戏 - 切菜
Api(r'/x/open/coin/add', body_as_all=True, f_merge_key=chain_rule(sort_rule(lambda item: int(item['coin_num']), reverse=True),r_u)),
# 游戏 - 糖果
Api(r'/happy/qtt/userinfo', log='游戏 - 糖果 - 获取open_id', f_p_arg=['ticket']),
Api(r'/happy/protocol', log='游戏 - 糖果 - 获取金币', f_b_arg=['data']),
# 游戏 - 钓鱼
Api(r'/xyx_sdk/gw/partner_login', log='游戏 - 钓鱼 - 登录', body_as_all=True, f_merge_key=r_c_l1),
Api(r'/qtt/coin/withdraw', log='游戏 - 钓鱼 - 获取金币'),
# 游戏 - 大脑
Api('/api/v1/z6h5/sign', log='游戏 - 大脑 - 签到'),
Api('/api/v1/z6h5/lottery', log='游戏 - 大脑 - 获取红包'),
Api('/api/v1/z6h5/login', log='王者大脑 - 获取open_id', f_p_arg=['ticket']),
Api('/api/v1/z6h5/sign',log='王者大脑 - 签到'),
Api('/api/v1/z6h5/lottery',log='王者大脑 - lottery'),
Api('/api/v1/z6h5/exchange',log='王者大脑 - 红包满20元兑换成金币'),
Api('/api/v1/z6h5/get_reward',log='王者大脑 - 任务完成', f_p_arg=['activity_id']),
Api('/api/v1/z6h5/open_redpacket',log='王者大脑 - 红包'),
Api('/api/v1/z6h5/add_coin',log='王者大脑 - add_coin', params_as_all=True, f_merge_key=r_u),
Api('/api/v1/z6h5/get_rank',log='王者大脑 - 判案比赛-排行信息'),
Api('/api/v1/z6h5/get_rank_reward',log='王者大脑 - 判案比赛-领奖'),
Api('/api/v1/z6h5/upload_rank',log='王者大脑 - 判案比赛 - 排行', f_p_arg=['score']),
'/api/v1/z6h5/',
Api(r'/press_trigger',log='幸运大转盘'),
# 金猪
Api(r'/actcenter/piggy/videoConfirm',log='合成金猪 - 气泡', f_p_arg=['tag']),
r'/actcenter/piggy/',
Api(r'/search/searchContentNew',log='搜索内容得金币', params_as_all=True, p_as_all_limit=3, f_merge_key=chain_rule(r_c, limit_rule(3))),
]
urls.extend(api_common())
urls.extend(api_tczyqtt())
return App(urls, 'qu-tou-tiao')
# ''' 趣种菜 '''
def app_qu_zhong_cai():
''' 趣种菜 '''
urls = [
Api('/x/tree-game/user', log='趣种菜 - 获取用户信息 - s_token'),
Api('/x/tree-game/gapp/info', log='趣种菜 - 信息'),
Api('/x/tree-game/gapp/box/my/rand-reward', log='趣种菜 - 拆礼物 - 点击'),
Api('/x/tree-game/gapp/box/my/take-reward', log='趣种菜 - 拆礼物 - 收获'),
Api('/x/tree-game/gapp/add-plant', log='趣种菜 - 植物 - 种下'),
Api('/x/tree-game/gapp/plant-ok', log='趣种菜 - 植物 - 收获'),
Api('/x/tree-game/gapp/water-plants', log='趣种菜 - 植物 - 浇水'),
Api('/x/tree-game/gapp/remove-bug', log='趣种菜 - 植物 - 杀虫'),
# 翻翻乐
Api('/x/middle/flop/info', log='趣种菜 - 翻翻乐 - 信息'),
Api('/x/middle/flop/start', log='趣种菜 - 翻翻乐 - 开始'),
'/x/middle/flop/',
# 水池
Api('/x/tree-game/gapp/pool/info', log='趣种菜 - 水池 - 信息'),
Api('/x/tree-game/gapp/pool/with-draw', log='趣种菜 - 水池 - 存到水壶'),
Api('/x/tree-game/gapp/pool/speed-up', log='趣种菜 - 水池 - 加速'),
# '/x/tree-game/gapp/pool/',
# 兔子
'/x/tree-game/gapp/activity/rabbit/',
Api('/x/tree-game/gapp/activity/carrot/take-reward', log='趣种菜 - 植物 - 点我'),
# Api('/x/tree-game/'),
]
urls.extend(api_common())
urls.extend(api_sign())
return App(urls, 'qu-zhong-cai')
# ''' 金猪游戏盒子 '''
def app_you_xi_he_zi():
''' 金猪游戏盒子 '''
urls = [
# 游戏
Api('/x/task/v3/list', log='游戏任务列表'),
Api('/x/cash/time-bonus/info', log='时段金币 - 信息'),
Api('/x/cash/time-bonus/get', log='时段金币 - 领取', f_b_arg=['index']),
Api('/x/cash/task-bonus/get', log='红包 - 领取', f_p_arg=['cnt']),
Api('/x/cash/daily-bonus/get', log='签到 - 奖励', body_as_all=True, f_merge_key=r_c_l1),
Api('/x/task/v2/take-reward', log='任务完成 - 领金币', f_name='game_take_reward',f_b_arg=['task_id']),
# 抽奖 - 游戏嘉年华
r'/x/raffle/detail',
r'/x/raffle/roll',
r'/x/raffle/add-times',
# 金猪 withdraw:(从银行)取钱
r'/x/v1/goldpig/info',
r'/x/v1/goldpig/foundLostPig', # 金猪 - 找回金猪
r'/x/v1/goldpig/bubbleWithdraw', # 金猪 - 看视频
r'/x/v1/goldpig/withdraw', # 金猪
# 游戏 - 成语大富豪
Api('qttgame.midsummer.top/api/Login', log='2-登录游戏', f_name='api_login', f_b_arg=['ticket','game_id']),
Api('qttgame.midsummer.top/api/AddCoin', log='成语大富豪 - 金币',f_b_arg=['AddCoinNum','session_id']),
# 游戏 - 成语消消乐
Api('/chengyu/login', log='成语消消乐 - 登录 - 获取open_id', f_b_arg=['ticket']),
Api('/chengyu/addcoin', log='成语消消乐 - 金币', f_b_arg=['add_num']),
Api('/chengyu/update_red_packet', log='成语消消乐 - 过关得现金', f_b_arg=['level']),
# 游戏 - 切菜
Api('/x/open/coin/add', log='切菜 - 金币', body_as_all=True, f_merge_key=chain_rule(sort_rule(lambda item: int(item['coin_num']), reverse=True),r_u)),
# 游戏 - 糖果
Api(r'/happy/qtt/userinfo', log='游戏 - 糖果 - 获取open_id', f_p_arg=['ticket']),
Api(r'/happy/protocol', log='游戏 - 糖果 - 获取金币', f_b_arg=['data']),
# 游戏 - 钓鱼
Api(r'/qtt/coin/withdraw', log='游戏 - 钓鱼 - 获取金币'),
# 游戏 - 王者大脑
Api('/api/v1/z6h5/login', log='王者大脑 - 获取open_id', f_p_arg=['ticket']),
Api('/api/v1/z6h5/sign',log='王者大脑 - 签到'),
Api('/api/v1/z6h5/lottery',log='王者大脑 - lottery'),
Api('/api/v1/z6h5/exchange',log='王者大脑 - 红包满20元兑换成金币'),
Api('/api/v1/z6h5/get_reward',log='王者大脑 - 任务完成', f_p_arg=['activity_id']),
Api('/api/v1/z6h5/open_redpacket',log='王者大脑 - 红包'),
Api('/api/v1/z6h5/add_coin',log='王者大脑 - add_coin', params_as_all=True, f_merge_key=r_u),
Api('/api/v1/z6h5/get_rank',log='王者大脑 - 判案比赛-排行信息'),
Api('/api/v1/z6h5/get_rank_reward',log='王者大脑 - 判案比赛-领奖'),
Api('/api/v1/z6h5/upload_rank',log='王者大脑 - 判案比赛 - 排行', f_p_arg=['score']),
'/api/v1/z6h5/',
]
urls.extend(api_common())
urls.extend(api_sign())
urls.extend(api_tczyqtt())
return App(urls, 'you-xi-he-zi')
# ''' 今日头条 '''
def app_jin_ri_tou_tiao():
''' 今日头条 '''
urls = [
Api('/task/page_data/', f_name='task_page_data'),
Api('/score_task/v1/task/sign_in/', f_name='task_sign_in'),
Api('/score_task/v1/task/open_treasure_box', f_name='task_open_treasure_box'),
Api('/score_task/v1/task/new_excitation_ad', f_name='task_new_excitation_ad', f_b_arg=['task_id'], params_as_all=True),
Api('/score_task/v1/task/get_read_bonus/', f_name='task_get_read_bonus', params_as_all=True, f_p_arg=['group_id']),
# 'score_task/v1/task/done_task/',
# 'score_task/v1/landing/add_amount/',
# 'score_task/v1/user/profit_detail/',
# # 小说
Api('/api/novel/book/directory/list/v1', log='书目录', f_p_arg=['book_id']),
Api('score_task/v1/novel/bonus/', f_b_arg=['item_id']), # 读小说得金币
# # 搜索
# 'search/suggest/homepage_suggest/',
# 'search/suggest/initial_page/',
# 'api/search/content/',
# Api('/search/', log='搜索', f_p_arg=['keyword']),
# # 走咯
Api(r'score_task/v1/walk/count/', f_b_arg=['count']),
r'score_task/v1/walk/',
# # 睡觉
'score_task/v1/sleep/status/',
'score_task/v1/sleep/start/',
'score_task/v1/sleep/stop/',
'score_task/v1/sleep/done_task/', # 睡觉领金币
# # 农场
Api('/ttgame/game_farm/home_info', f_name='farm_home_info', api_ok={'status_code':[0]}),
r'ttgame/game_farm/',
# # 吃
r'score_task/lite/v1/eat/eat_info/',
r'score_task/lite/v1/eat/done_eat/',
'api/news/feed/v47/', # 安卓视频tab页
# 'api/news/feed/v64/', # ios视频tab页
# # 'score_task/v1',
# 'score_task/v2',
]
return App(urls, 'jin-ri-tou-tiao')
def app_ma_yi_kd():
# 蚂蚁看点
urls = [
Api(r'article/treasure_chest', log='时段签到', f_b_enc={'p'}, content_type='multipart_form'),
Api(r'/user/shai_income_task_award',log='晒收'),
r'TaskCenter/daily_sign',
# r'WebApi/',
r'WebApi/Stage/task_reward',
r'WapPage/get_video_status',
r'WebApi/RotaryTable/turn_rotary_new',
r'WebApi/RotaryTable/turn_reward',
r'WebApi/RotaryTable/video_double',
r'WebApi/RotaryTable/chestReward',
Api(r'/WebApi/sleep/sleep_start',log='睡觉 - 开始'),
Api(r'/WebApi/sleep/get_sleep_score',log='睡觉 - 醒来'),
Api(r'article/haotu_video',log='看视频得金币', f_b_enc={'p'}, content_type='multipart_form'),
Api(r'article/complete_article',log='读文章得金币', f_b_enc={'p'}, content_type='multipart_form'),
Api(r'/v5/user/rewar_video_callback', log='视频广告 - 得金币', f_b_enc={'p'}, content_type='multipart_form'),
Api(r'/v5/article/complete_welfare_score.json', log='福袋 - 得金币', f_b_enc={'p'}, content_type='multipart_form'),
Api(r'/v5/user/adlickstart.json',log='点击广告领金币 - 开始', f_b_enc={'p'}, content_type='multipart_form'),
Api(r'/v5/user/adlickend.json',log='点击广告领金币 - 结束', f_b_enc={'p'}, content_type='multipart_form'),
Api(r'/v5/user/task_second_callback.json',f_b_enc={'p'}, content_type='multipart_form'),
Api(r'/v3/user/userinfo.json', log='用户信息', params_as_all=True, p_as_all_limit=1, content_type='multipart_form'),
Api(r'/user/income_ajax', log='收益详情', f_p_arg=['page'], content_type='multipart_form'),
# 新版答题
r'/v6/Answer/getData.json',
r'/v5/answer/first_reward',
r'/v6/Answer/answer_question.json',
r'/v5/answer/answer_reward.json',
# 旧版答题
# r'WebApi/Answer/getData',
# r'WebApi/Answer/answer_question',
# r'WebApi/Answer/answer_reward',
# r'WebApi/Answer/video_double',
# r'WebApi/Answer/fill_energy',
]
return App(urls, 'ma-yi-kd')
# ''' 填词小秀才app '''
def app_tian_chi_xiao_xiu_cai():
''' 填词小秀才 '''
urls = [
Api('/api/v1/tczyapp/login', log='填词小秀才 - 获取open_id', f_p_arg=['ticket']),
Api('/api/v1/tczyapp/sign',log='填词小秀才 - 签到'),
Api('/api/v1/tczyapp/lottery',log='填词小秀才 - lottery'),
Api('/api/v1/tczyapp/exchange',log='填词小秀才 - 红包满20元兑换成金币'),
Api('/api/v1/tczyapp/get_reward',log='填词小秀才 - 任务完成', f_p_arg=['activity_id']),
Api('/api/v1/tczyapp/open_redpacket',log='填词小秀才 - 红包'),
Api('/api/v1/tczyapp/draw_a_char',log='填词小秀才 - 抽字'),
Api('/api/v1/tczyapp/add_coin',log='填词小秀才 - add_coin', params_as_all=True, f_merge_key=r_u),
Api('/api/v1/tczyapp/get_rank',log='填词小秀才 - 判案比赛-排行信息'),
Api('/api/v1/tczyapp/get_rank_reward',log='填词小秀才 - 判案比赛-领奖'),
Api('/api/v1/tczyapp/upload_rank',log='填词小秀才 - 判案比赛 - 排行', f_p_arg=['score']),
'/api/v1/tczyapp/'
]
urls.extend(api_common())
urls.extend(api_sign())
return App(urls, 'tian-chi-xiao-xiu-cai')
# ''' 王者大脑app '''
def app_wan_zhe_da_nao():
urls = [
Api('/api/v1/z6qtt/login', log='王者大脑 - 获取open_id', f_p_arg=['ticket']),
Api('/api/v1/z6qtt/sign',log='王者大脑 - 签到'),
Api('/api/v1/z6qtt/lottery',log='王者大脑 - lottery'),
Api('/api/v1/z6qtt/exchange',log='王者大脑 - 红包满20元兑换成金币'),
Api('/api/v1/z6qtt/get_reward',log='王者大脑 - 任务完成', f_p_arg=['activity_id']),
Api('/api/v1/z6qtt/open_redpacket',log='王者大脑 - 红包'),
Api('/api/v1/z6qtt/add_coin',log='王者大脑 - add_coin', params_as_all=True, f_merge_key=r_u),
Api('/api/v1/z6qtt/get_rank',log='王者大脑 - 判案比赛-排行信息'),
Api('/api/v1/z6qtt/get_rank_reward',log='王者大脑 - 判案比赛-领奖'),
Api('/api/v1/z6qtt/upload_rank',log='王者大脑 - 判案比赛 - 排行', f_p_arg=['score']),
'/api/v1/z6qtt/'
]
urls.extend(api_common())
urls.extend(api_sign())
return App(urls, 'wan-zhe-da-nao')
# ''' 微信 '''
def app_wei_xin():
urls = [
Api('/userroll/userrolllist', params_as_all=True, f_merge_key=r_c_l1)
]
return App(urls, 'wei-xin-zhi-fu')
# ''' 欢乐养鸡场app '''
def app_yang_ji_chang():
''' 欢乐养鸡场 '''
urls = [
Api('/x/middle/open/user/ticket', log='欢乐养鸡场 - 获取s_token'),
Api('/x/chicken/info', log='欢乐养鸡场 - 信息'),
Api('/x/chicken/task/take-award', log='达标领奖励'),
Api('/x/chicken/feed', log='喂饲料'),
Api('/x/chicken/get-fodder', log='领饲料', f_b_arg=['id','pos','again']),
Api('/x/chicken/mood/use-object', log='打赏'),
'/x/chicken/video/accomplish',
# 翻翻乐
Api('/x/middle/flop/info', log='欢乐养鸡场 - 翻翻乐 - 信息'),
Api('/x/middle/flop/start', log='欢乐养鸡场 - 翻翻乐 - 开始'),
'/x/middle/flop/',
# 砸蛋
Api('/x/chicken/add-hit-count', log='欢乐养鸡场 - 增加砸蛋机会'),
Api('/x/chicken/hit-egg/award', log='欢乐养鸡场 - 砸蛋 - 领奖', f_b_arg=['again']),
'/x/chicken/'
]
urls.extend(api_common())
return App(urls, 'yang-ji-chang')
# ''' 中青看点 '''
def app_zhong_qin_kd():
''' 中青看点 '''
urls = [
Api(r'/WebApi/TimePacket/getReward', f_name='time_packet', log='计时红包'),
r'/webApi/AnswerReward/',
Api(r'/v5/Game/GameVideoReward.json', log='2次可选领取 - 看广告视频', f_b_enc={'p'},f_merge_key=r_u),
Api(r'/taskCenter/getAdVideoReward',log='任务中心 - 看视频'),
Api(r'/v5/article/complete.json',log='看视频得金币', f_b_enc={'p'}, f_b_arg=['p'], content_type='urlencoded_form'),
Api(r'/v5/CommonReward/toGetReward.json',log='可领取 - ', f_b_enc={'p'}, f_b_arg=['p'], f_merge_key=r_c_u),
Api(r'/v5/CommonReward/toDouble.json',log='可领取 - 双倍', f_b_enc={'p'}, f_b_arg=['p'], f_merge_key=r_c_u),
Api(r'/WebApi/Task/receiveBereadRed',log='任务中心 - 领红包'),
Api(r'/WebApi/EverydayShare/share_back',log='每日分享奖励', f_b_arg=['uid'], ),
# 天天抽奖
'/WebApi/RotaryTable/turnRotary',
'/WebApi/RotaryTable/',
Api('/wap/user/balance', log='用户金币数量'),
# 旧版
# Api(r'/WebApi/invite/openHourRed',log='开宝箱', body_as_all=True),
# Api(r'getTimingRedReward.json', log='时段签到', f_name='hourly_sign', ),
]
return App(urls, 'zhong-qin-kd')
def app_zhu_lai_le():
''' 猪来了 '''
urls = [
Api(r'/pig/protocol', log='猪来了'),
]
return App(urls, 'zhu-lai-le')
def helper_app_from_path(from_or_to_path: str) -> App:
for k, v in globals().items():
if k.startswith('app_') and isinstance(v, type(apps)):
new_name = k.replace('app_', '')
new_name = new_name.replace('_', '-')
if new_name in from_or_to_path:
return v()
def helper_health_check():
pass
no_merge_rule = {}
for app in apps():
a: App = app
it = filter(lambda item: isinstance(item[1], Api), a.url_a_dict.items())
for _, apii in it:
api: Api = apii
if api.f_b_enc or api.f_p_enc or api.params_as_all or api.body_as_all:
if api.f_merge_key == None:
l = no_merge_rule.setdefault(a.app_name, [])
l.append(api)
if len(no_merge_rule):
print('没有配置 f_merge_key')
for app_name, apis in no_merge_rule.items():
for api in apis:
print(f'\t{app_name}\t{api.url}')
if __name__ == "__main__":
# helper_app_from_path('/Users/zhoujie/Desktop/dev/tian-chi-xiao-xiu-cai/session_huawei.py')
# 场景: apps()排序
for k, v in sorted(globals().items(), key=lambda item: item[0]):
print(f'{k}(),')
# helper_health_check()
|
import shutil
import pdb
from operator import add
import euroki
# Example Front module
# ---------------------------------------------------------------------
er = euroki.euroKi('io-faceplate') # Give the pj a name
er.drawOutline(8) # 8 HP module
er.drawMountingHoles()
er.drawRails()
er.pot = 7.1 # Alpha 9mm pots
er.jack = 6.1 # Thonk 3.5mm jack
er.button = 4.9 # 1RRED PB Cap
er.LP = 3.1 # VLP-300-F light pipe
er.initPCB([39, 98]) # This is the PCB that will go behind the panel
er.drawPCB() # Draw this
# Now Just draw jacks and pots where you want
er.drawPot([7, 37])
er.drawPot([32, 37])
er.drawPot([20, 55])
er.drawPot([7, 72])
er.drawPot([32, 72])
er.drawJack([7, 88])
er.drawJack([32, 88])
er.fin() # Cleanup
# ---------------------------------------------------------------------
|
from __future__ import absolute_import, division, print_function, unicode_literals
import matplotlib.pyplot as plt
import tensorflow as tf
train_dataset_fp = "hu2222.csv"
column_names = ['Hu1', 'Hu2', 'Hu3', 'Hu4', 'Hu5', 'Hu6', 'Hu7', 'Class']
feature_names = column_names[:-1]
label_name = column_names[-1]
print("Features: {}".format(feature_names))
print("Label: {}".format(label_name))
class_names = ['0', '1', '2', '3']
batch_size = 256
train_dataset = tf.data.experimental.make_csv_dataset(
train_dataset_fp,
batch_size,
column_names=column_names,
label_name=label_name,
num_epochs=1)
features, labels = next(iter(train_dataset))
def pack_features_vector(features, labels):
"""Pack the features into a single array."""
features = tf.stack(list(features.values()), axis=1)
return features, labels
train_dataset = train_dataset.map(pack_features_vector)
features, labels = next(iter(train_dataset))
model = tf.keras.Sequential([
tf.keras.layers.Dense(256, activation=tf.nn.selu, input_shape=(7,)), # input shape required
tf.keras.layers.Dense(128, activation=tf.nn.tanh),
tf.keras.layers.Dense(128, activation=tf.nn.tanh),
tf.keras.layers.Dense(4, )
])
predictions = model(features)
predictions[:7]
tf.nn.softmax(predictions[:7])
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
def loss(model, x, y, training):
# training=training is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
y_ = model(x, training=training)
return loss_object(y_true=y, y_pred=y_)
l = loss(model, features, labels, training=False)
def grad(model, inputs, targets):
with tf.GradientTape() as tape:
loss_value = loss(model, inputs, targets, training=True)
return loss_value, tape.gradient(loss_value, model.trainable_variables)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
loss_value, grads = grad(model, features, labels)
print("Step: {}, Initial Loss: {}".format(optimizer.iterations.numpy(),
loss_value.numpy()))
optimizer.apply_gradients(zip(grads, model.trainable_variables))
print("Step: {}, Loss: {}".format(optimizer.iterations.numpy(),
loss(model, features, labels, training=True).numpy()))
train_loss_results = []
train_accuracy_results = []
num_epochs =140
for epoch in range(num_epochs):
epoch_loss_avg = tf.keras.metrics.Mean()
epoch_accuracy = tf.keras.metrics.SparseCategoricalAccuracy()
# Training loop
for x, y in train_dataset:
# Optimize the model
loss_value, grads = grad(model, x, y)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
# Track progress
epoch_loss_avg(loss_value) # Add current batch loss
# Compare predicted label to actual label
# training=True is needed only if there are layers with different
# behavior during training versus inference (e.g. Dropout).
epoch_accuracy(y, model(x, training=True))
# End epoch
train_loss_results.append(epoch_loss_avg.result())
train_accuracy_results.append(epoch_accuracy.result())
if epoch % 20 == 0:
print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch,
epoch_loss_avg.result(),
epoch_accuracy.result()))
fig, axes = plt.subplots(2, sharex=True, figsize=(12, 8))
fig.suptitle('Training Metrics')
axes[0].set_ylabel("Loss", fontsize=14)
axes[0].plot(train_loss_results)
axes[1].set_ylabel("Accuracy", fontsize=14)
axes[1].set_xlabel("Epoch", fontsize=14)
axes[1].plot(train_accuracy_results)
plt.show()
|
def frequency_count(input_string):
frequency = {}
for letter in input_string:
try:
frequency[letter] = frequency[letter] + 1
except KeyError:
frequency[letter] = 1
return frequency
print( frequency_count("CoE 161") )
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils import spectral_norm
from torch.nn.init import xavier_uniform_
import torch.nn.init as init
def init_weights(m):
if type(m) == nn.Linear or type(m) == nn.Conv2d:
xavier_uniform_(m.weight)
m.bias.data.fill_(0.)
def snconv2d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
return spectral_norm(nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias))
def snconv3d(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
return spectral_norm(nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size,
stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias))
def snlinear(in_features, out_features, bias=True):
return spectral_norm(nn.Linear(in_features=in_features, out_features=out_features, bias=bias))
def sn_embedding(num_embeddings, embedding_dim):
return spectral_norm(nn.Embedding(num_embeddings=num_embeddings, embedding_dim=embedding_dim))
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self, in_channels):
super(Self_Attn, self).__init__()
self.in_channels = in_channels
self.snconv1x1_theta = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_phi = snconv2d(in_channels=in_channels, out_channels=in_channels//8, kernel_size=1, stride=1, padding=0)
self.snconv1x1_g = snconv2d(in_channels=in_channels, out_channels=in_channels//2, kernel_size=1, stride=1, padding=0)
self.snconv1x1_attn = snconv2d(in_channels=in_channels//2, out_channels=in_channels, kernel_size=1, stride=1, padding=0)
self.maxpool = nn.MaxPool2d(2, stride=2, padding=0)
self.softmax = nn.Softmax(dim=-1)
self.sigma = nn.Parameter(torch.zeros(1))
def forward(self, x):
"""
inputs :
x : input feature maps(B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
_, ch, h, w = x.size()
# Theta path
theta = self.snconv1x1_theta(x)
theta = theta.view(-1, ch//8, h*w)
# Phi path
phi = self.snconv1x1_phi(x)
phi = self.maxpool(phi)
phi = phi.view(-1, ch//8, h*w//4)
# Attn map
attn = torch.bmm(theta.permute(0, 2, 1), phi)
attn = self.softmax(attn)
# g path
g = self.snconv1x1_g(x)
g = self.maxpool(g)
g = g.view(-1, ch//2, h*w//4)
# Attn_g
attn_g = torch.bmm(g, attn.permute(0, 2, 1))
attn_g = attn_g.view(-1, ch//2, h, w)
attn_g = self.snconv1x1_attn(attn_g)
# Out
out = x + self.sigma*attn_g
return out
class ConditionalBatchNorm2d(nn.Module):
def __init__(self, num_features, num_classes, space=240):
super().__init__()
self.num_features = num_features
self.bn = nn.BatchNorm2d(num_features, momentum=0.001, affine=False)
self.gain = snlinear(in_features=space, out_features=num_features)
self.bias = snlinear(in_features=space, out_features=num_features)
def forward(self, x, y):
gain = (1 + self.gain(y)).view(y.size(0), -1, 1, 1)
bias = self.bias(y).view(y.size(0), -1, 1, 1)
out = self.bn(x)
out = out * gain + bias
return out
class InplaceShift(torch.autograd.Function):
@staticmethod
def forward(ctx, input, fold):
ctx.fold_ = fold
n, t, c, h, w = input.size()
buffer = input.data.new(n, t, fold, h, w).zero_()
buffer[:, :-1] = input.data[:, 1:, :fold]
input.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, 1:] = input.data[:, :-1, fold: 2 * fold]
input.data[:, :, fold: 2 * fold] = buffer
return input
@staticmethod
def backward(ctx, grad_output):
fold = ctx.fold_
n, t, c, h, w = grad_output.size()
buffer = grad_output.data.new(n, t, fold, h, w).zero_()
buffer[:, 1:] = grad_output.data[:, :-1, :fold]
grad_output.data[:, :, :fold] = buffer
buffer.zero_()
buffer[:, :-1] = grad_output.data[:, 1:, fold: 2 * fold]
grad_output.data[:, :, fold: 2 * fold] = buffer
return grad_output, None
class GenBlock(nn.Module):
def __init__(self, in_channels, out_channels, num_classes, space=256, fold=3):
super(GenBlock, self).__init__()
self.f=fold
self.cond_bn1 = ConditionalBatchNorm2d(in_channels, num_classes)
self.relu = nn.ReLU(inplace=True)
self.snconv2d1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.cond_bn2 = ConditionalBatchNorm2d(out_channels, num_classes)
self.snconv2d2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.snconv2d0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x, labels, shift):
x0 = x
if shift:
x = self.shift(x, 16, fold_div=self.f, inplace=True)
x = self.cond_bn1(x, labels)
x = self.relu(x)
x = F.interpolate(x, scale_factor=2, mode='nearest') # upsample
x = self.snconv2d1(x)
x = self.cond_bn2(x, labels)
x = self.relu(x)
x = self.snconv2d2(x)
x0 = F.interpolate(x0, scale_factor=2, mode='nearest') # upsample
x0 = self.snconv2d0(x0)
out = x + x0
return out
def shift(self, x, n_segment, fold_div=3, inplace=False):
nt, c, h, w = x.size()
n_batch = nt // n_segment
x = x.view(n_batch, n_segment, c, h, w)
fold = c // fold_div
if inplace:
out = InplaceShift.apply(x, fold)
else:
out = torch.zeros_like(x)
out[:, :-1, :fold] = x[:, 1:, :fold] # shift left
out[:, 1:, fold: 2 * fold] = x[:, :-1, fold: 2 * fold] # shift right
out[:, :, 2 * fold:] = x[:, :, 2 * fold:] # not shift
return out.view(nt, c, h, w)
class Generator(nn.Module):
"""Generator."""
def __init__(self, z_dim, g_conv_dim, num_classes, fold):
super(Generator, self).__init__()
self.z_dim = z_dim
self.g_conv_dim = g_conv_dim
self.snlinear0 = snlinear(in_features=z_dim+120, out_features=g_conv_dim*16*4*4)
self.embed = sn_embedding(num_classes, 120)
self.block1 = GenBlock(g_conv_dim*16, g_conv_dim*16, num_classes, fold=fold)
self.block2 = GenBlock(g_conv_dim*16, g_conv_dim*8, num_classes, fold=fold)
self.block3 = GenBlock(g_conv_dim*8, g_conv_dim*4, num_classes, fold=fold)
self.self_attn = Self_Attn(g_conv_dim*4)
self.block4 = GenBlock(g_conv_dim*4, g_conv_dim*2, num_classes, fold=fold)
self.block5 = GenBlock(g_conv_dim*2, g_conv_dim, num_classes, fold=fold)
self.bn = nn.BatchNorm2d(g_conv_dim, eps=1e-5, momentum=0.0001, affine=True)
self.relu = nn.ReLU(inplace=True)
self.snconv2d1 = snconv2d(in_channels=g_conv_dim, out_channels=3, kernel_size=3, stride=1, padding=1)
self.tanh = nn.Tanh()
xavier_uniform_(self.embed.weight)
# Weight init
self.apply(init_weights)
def forward(self, z, labels):
# n x z_dim
embed = self.embed(labels)
labels_ = torch.cat((z, embed), 1)
act0 = self.snlinear0(labels_) # n x g_conv_dim*16*4*4
act0 = act0.view(-1, self.g_conv_dim*16, 4, 4) # n x g_conv_dim*16 x 4 x 4
act1 = self.block1(act0, labels_, True) # n x g_conv_dim*16 x 8 x 8
act2 = self.block2(act1, labels_, True) # n x g_conv_dim*8 x 16 x 16
act3 = self.block3(act2, labels_, False) # n x g_conv_dim*4 x 32 x 32
act3 = self.self_attn(act3) # n x g_conv_dim*4 x 32 x 32
act4 = self.block4(act3, labels_, False) # n x g_conv_dim*2 x 64 x 64
act5 = self.block5(act4, labels_, False) # n x g_conv_dim x 128 x 128
act5 = self.bn(act5) # n x g_conv_dim x 128 x 128
act5 = self.relu(act5) # n x g_conv_dim x 128 x 128
act6 = self.snconv2d1(act5) # n x 3 x 128 x 128
act6 = self.tanh(act6) # n x 3 x 128 x 128
return act6
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if ('embed' not in name and 'gain' not in name):
if ('bias' not in name and 'snlinear0' not in name):
if ('cond_bn1' not in name and 'cond_bn2' not in name):
param = param.data
own_state[name].copy_(param)
class DiscOptBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DiscOptBlock, self).__init__()
self.snconv2d1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU(inplace=True)
self.snconv2d2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.downsample = nn.AvgPool2d(2)
self.snconv2d0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x):
x0 = x
x = self.snconv2d1(x)
x = self.relu(x)
x = self.snconv2d2(x)
x = self.downsample(x)
x0 = self.downsample(x0)
x0 = self.snconv2d0(x0)
out = x + x0
return out
class DiscBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(DiscBlock, self).__init__()
self.relu = nn.ReLU(inplace=True)
self.snconv2d1 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.snconv2d2 = snconv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1)
self.downsample = nn.AvgPool2d(2)
self.ch_mismatch = False
if in_channels != out_channels:
self.ch_mismatch = True
self.snconv2d0 = snconv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, x, downsample=True):
x0 = x
x = self.relu(x)
x = self.snconv2d1(x)
x = self.relu(x)
x = self.snconv2d2(x)
if downsample:
x = self.downsample(x)
if downsample or self.ch_mismatch:
x0 = self.snconv2d0(x0)
if downsample:
x0 = self.downsample(x0)
out = x + x0
return out
class Discriminator(nn.Module):
"""Discriminator."""
def __init__(self, d_conv_dim, num_classes):
super(Discriminator, self).__init__()
self.d_conv_dim = d_conv_dim
self.opt_block1 = DiscOptBlock(3, d_conv_dim)
self.block1 = DiscBlock(d_conv_dim, d_conv_dim*2)
self.self_attn = Self_Attn(d_conv_dim*2)
self.block2 = DiscBlock(d_conv_dim*2, d_conv_dim*4)
self.block3 = DiscBlock(d_conv_dim*4, d_conv_dim*8)
self.block4 = DiscBlock(d_conv_dim*8, d_conv_dim*16)
self.block5 = DiscBlock(d_conv_dim*16, d_conv_dim*16)
self.relu = nn.ReLU(inplace=True)
self.snlinear1 = snlinear(in_features=d_conv_dim*16, out_features=1)
self.sn_embedding1 = sn_embedding(num_classes, d_conv_dim*16)
# Weight init
self.apply(init_weights)
xavier_uniform_(self.sn_embedding1.weight)
def forward(self, x, labels):
# n x 3 x 128 x 128
h0 = self.opt_block1(x) # n x d_conv_dim x 64 x 64
h1 = self.block1(h0) # n x d_conv_dim*2 x 32 x 32
h1 = self.self_attn(h1) # n x d_conv_dim*2 x 32 x 32
h2 = self.block2(h1) # n x d_conv_dim*4 x 16 x 16
h3 = self.block3(h2) # n x d_conv_dim*8 x 8 x 8
h4 = self.block4(h3) # n x d_conv_dim*16 x 4 x 4
h5 = self.block5(h4, downsample=False) # n x d_conv_dim*16 x 4 x 4
h5 = self.relu(h5) # n x d_conv_dim*16 x 4 x 4
h6 = torch.sum(h5, dim=[2,3]) # n x d_conv_dim*16
output1 = torch.squeeze(self.snlinear1(h6)) # n x 1
# Projection
h_labels = self.sn_embedding1(labels) # n x d_conv_dim*16
proj = torch.mul(h6, h_labels) # n x d_conv_dim*16
output2 = torch.sum(proj, dim=[1]) # n x 1
# Out
output = output1 + output2 # n x 1
return output
def load_my_state_dict(self, state_dict):
own_state = self.state_dict()
for name, param in state_dict.items():
if 'embed' not in name:
# backwards compatibility for serialized parameters
param = param.data
own_state[name].copy_(param)
class Discriminator_3D(nn.Module):
def __init__(self, d_conv_dim, num_classes, attention, T=16):
super(Discriminator_3D, self).__init__()
self.main = nn.Sequential(
# input is (nc) x T x 96 x 96
snconv3d(3, d_conv_dim, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x T/2 x 48 x 48
snconv3d(d_conv_dim, d_conv_dim * 2, 4, 2, 1, bias=False),
nn.BatchNorm3d(d_conv_dim * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x T/4 x 24 x 24
snconv3d(d_conv_dim * 2, d_conv_dim * 4, 4, 2, 1, bias=False),
nn.BatchNorm3d(d_conv_dim * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x T/8 x 12 x 12
snconv3d(d_conv_dim * 4, d_conv_dim * 8, 4, 2, 1, bias=False),
nn.BatchNorm3d(d_conv_dim * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x T/16 x 6 x 6
)
self.linear = snlinear(d_conv_dim * 8, 1)
self.embed = sn_embedding(num_classes, d_conv_dim*8)
# Weight init
self.apply(init_weights)
xavier_uniform_(self.embed.weight)
def forward(self, input, class_id):
output = self.main(input)
output = torch.sum(output, dim=[3,4]).view(-1, output.size(1))
output_linear = torch.squeeze(self.linear(output))
y = class_id.long()
embed = self.embed(y)
prod = (output * embed).sum(1)
return output_linear + prod#
class GRU(nn.Module):
def __init__(self, input_size, hidden_size, dropout=0, gpu=True):
super(GRU, self).__init__()
output_size = input_size
self._gpu = gpu
self.hidden_size = hidden_size
# define layers
self.gru = nn.GRUCell(input_size, hidden_size)
self.drop = nn.Dropout(p=dropout)
self.linear = snlinear(hidden_size, output_size)
self.bn = nn.BatchNorm1d(output_size, affine=False)
def forward(self, inputs, n_frames):
outputs = []
for i in range(n_frames):
self.hidden = self.gru(inputs, self.hidden)
inputs = self.linear(self.hidden)
outputs.append(inputs)
outputs = [ self.bn(elm) for elm in outputs ]
outputs = torch.stack(outputs)
return outputs
def initWeight(self, init_forget_bias=1):
for name, params in self.named_parameters():
if 'weight' in name:
init.xavier_uniform(params)
# initialize forget gate bias
elif 'gru.bias_ih_l' in name:
b_ir, b_iz, b_in = params.chunk(3, 0)
init.constant(b_iz, init_forget_bias)
elif 'gru.bias_hh_l' in name:
b_hr, b_hz, b_hn = params.chunk(3, 0)
init.constant(b_hz, init_forget_bias)
else:
init.constant(params, 0)
def initHidden(self, batch_size):
self.hidden = Variable(torch.zeros(batch_size, self.hidden_size))
if self._gpu == True:
self.hidden = self.hidden.cuda()
|
# https://atcoder.jp/contests/abc032/tasks/abc032_b
s = input()
k = int(input())
st = set()
for i in range(len(s) - k + 1):
st.add(s[i:i+k])
ans = len(st)
print(ans)
|
""" NLPIA Chapter 2 Section 2.1 Code Listings and Snippets """
import pandas as pd
sentence = "Thomas Jefferson began building Monticello at the age of twenty-six."
sentence.split()
# ['Thomas', 'Jefferson', 'began', 'building', 'Monticello', 'at', 'the', 'age', 'of', 'twenty-six.']
# As you can see, this simple Python function already does a decent job tokenizing the example sentence. A couple more vanilla python statements and you can create numerical vector representations for each word.
sorted(dict([(token, 1) for token in sentence.split()]).items())
[('Jefferson', 1),
('Monticello', 1),
('Thomas', 1),
('age', 1),
('at', 1),
('began', 1),
('building', 1),
('of', 1),
('the', 1),
('twenty-six.', 1)]
# A slightly better data structure
sentence = "Thomas Jefferson began building Monticello at the age of 26."
df = pd.DataFrame(pd.Series(dict([(token, 1) for token in sentence.split()])), columns=['sent']).T
df
# 26. Jefferson Monticello Thomas age at began building of the
# 0 1 1 1 1 1 1 1 1 1 1
# And a pandas dataframe is great for holding multiple texts (sentences, tweets, or documents)
sentences = "Construction was done mostly by local masons and carpenters.\n" \
"He moved into the South Pavilion in 1770.\n" \
"Turning Monticello into a neoclassical masterpiece in the Palladian style was his perennial project.\n"
for i, sent in enumerate(sentences.split('\n')):
df['sent{}'.format(i)] = dict([(token, 1) for token in sent.split()])
|
## Python Script to pull turnstile data from http://web.mta.info/developers/turnstile.html
## The purpose of the this script extract the data off the web and combine in to one file
## This complete file can be cleaned and used for data exploration and forcasting
## Template for extracting data taken from:
## https://towardsdatascience.com/mta-turstile-data-my-first-taste-of-a-data-science-project-493b03f1708a
## changes made: Date pulled from start date to current date rather than for X number of weeks
#import packages
import pandas as pd
#initialise the date for the first week of the dataset (week ending on start_date)
start_date = filedate = pd.datetime(2020,4,7)
#initialise the regex for the MTA turnstile url
filename_regex = "http://web.mta.info/developers/data/nyct/turnstile/turnstile_{}.txt"
filelist = []
while filedate < pd.datetime.now():
# create the appropriate filename for the week
filedate_str = str(filedate.year)[2:4] + str(filedate.month).zfill(2) + str(filedate.day).zfill(2)
filename = filename_regex.format(filedate_str)
# read the file and append it to the list of files to be concacated
df = pd.read_csv(filename, parse_dates=['DATE'], keep_date_col=True)
filelist.append(df)
# advance to the next week
filedate += pd.Timedelta(days=7)
mta_test = pd.concat(filelist, axis=0, ignore_index=True)
mta_test.rename(columns={'EXITS ':'EXITS'}, inplace=True)
# aggrigating the data to the day.
## gets the number of entries and exits
mta_entries = mta_test.groupby(['STATION','C/A','UNIT','SCP','DATE']).ENTRIES.max() - mta_test.groupby(['STATION','C/A','UNIT','SCP','DATE']).ENTRIES.min()
mta_exits = mta_test.groupby(['STATION','C/A','UNIT','SCP','DATE']).EXITS.max() - mta_test.groupby(['STATION','C/A','UNIT','SCP','DATE']).EXITS.min()
## flattens the data
mta_entries_flat = mta_entries.reset_index()
mta_exits_flat = mta_exits.reset_index()
mta_entries_exits = pd.merge(mta_entries_flat, mta_exits_flat, how='outer')
## append weekday and traffic column to dataset
mta_entries_exits['WEEKDAY'] = mta_entries_exits['DATE'].dt.day_name()
mta_entries_exits['TRAFFIC'] = mta_entries_exits['ENTRIES'] + mta_entries_exits['EXITS']
mta_entries_exits['WEEKDAY_INDEX'] = mta_entries_exits['DATE'].dt.weekday
## aggrigate by station rather than by individual turnstile
mta_bystation = mta_entries_exits.groupby(['STATION','DATE','WEEKDAY', 'WEEKDAY_INDEX']).sum().reset_index()
PennTS = mta_bystation.loc[mta_bystation.STATION == '34 ST-PENN STA']
PennTS.to_csv('PennTS.csv')
|
from typing import Optional
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isSymmetric(self, root: Optional[TreeNode]) -> bool:
queue = [root]
while queue:
values = [i.val if i else None for i in queue]
if values != values[::-1]:
return False
queue = [child for i in queue if i for child in (i.left, i.right)]
return True
|
from rudra.utils.validation import check_field_exists, check_url_alive
import pytest
def test_check_field_exists():
input_data = ['a', 'b', 'c']
missing = check_field_exists(input_data, ['a', 'd'])
assert 'd' in missing
missing = check_field_exists(input_data, ['a', 'c'])
assert not missing
input_data = {'a': 1, 'b': 2, 'c': 3}
missing = check_field_exists(input_data, ['a', 'd'])
assert 'd' in missing
with pytest.raises(ValueError):
check_field_exists(111, ['a'])
def test_check_url_alive():
url = 'https://google.com'
assert check_url_alive(url)
url = 'https://234j23ksadasca.com'
assert not check_url_alive(url)
|
from __future__ import division
import json
import pycountry
from django.shortcuts import render
from django.db.models import Sum
from models import *
from auth.decorators import loginRequired
from common.utils import getHttpResponse as HttpResponse
from common.utils import getUnixTimeMillisec
from common.decorators import allowedMethods
from django.views.decorators.csrf import csrf_exempt
import datetime
from dateutil import parser
from django.db.models import Q
from dateutil.relativedelta import relativedelta
try:
from django.db.models.loading import get_model
except ImportError:
from django.apps import apps
get_model = apps.get_model
@allowedMethods(["GET"]) #only GET requests will be allowed
#@loginRequired #check for login
def test_success(request):
return HttpResponse(final_data)
@allowedMethods(["POST"])
def test_fail(request):
return HttpResponse("sample error resp", error=1)
def get_filtered_graph_data(model, country, project, start_date, end_date):
model_objs = get_model('api', model).objects.all()
return_key = None
graph_data = []
filter_map = {
'projects': 'project',
'countries': 'countrycode',
'project': 'project'
}
if not country:
query = Q()
if start_date:
query = query & Q(day__gte=start_date)
if end_date:
query = query & Q(day__lte=end_date)
if query:
query_data = [model_objs.filter(query).aggregate(
actual=Sum('attendance_achvd'), target=Sum('attendance_target'))]
else:
query_data = [model_objs.all().aggregate(actual=Sum('attendance_achvd'),\
target=Sum('attendance_target'))]
if query_data[0]['target']:
return_key = 'South East Asia'
if country and not project:
query = Q(countrycode=country)
if start_date:
query = query & Q(day__gte=start_date)
if end_date:
query = query & Q(day__lte=end_date)
query_data = model_objs.filter(query).values('project').annotate(\
actual=Sum('attendance_achvd'), target=Sum('attendance_target'))
return_key = 'projects'
if country and project:
if country == 'all' and project == 'all':
query = Q()
if start_date:
query = query & Q(day__gte=start_date)
if end_date:
query = query & Q(day__lte=end_date)
if query:
query_data = model_objs.filter(query).\
values('countrycode').annotate(actual=Sum('attendance_achvd'), \
target=Sum('attendance_target'))
return_key = 'countries'
else:
query_data = model_objs.all().values('countrycode').annotate(\
actual=Sum('attendance_achvd'),target=Sum('attendance_target'))
return_key = 'countries'
else:
query = Q(project=project)
if start_date:
query = query & Q(day__gte=start_date)
if end_date:
query = query & Q(day__lte=end_date)
query_data = model_objs.filter(query).values('project').annotate(
actual=Sum('attendance_achvd'), target=Sum('attendance_target'))
return_key = 'project'
if return_key:
for datum in query_data:
graph_point = {
'name': 'South East Asia' if return_key=='South East Asia' else datum[filter_map[return_key]],
'actual': datum['actual'],
'target': datum['target'],
'y': round((datum['actual']/datum['target'])*100, 2),
'drilldown': True if return_key != 'project' else False
}
graph_data.append(graph_point)
return graph_data, return_key
@loginRequired
@allowedMethods(["GET"])
def attendance(request):
country = request.GET.get('country', None)
project = request.GET.get('project', None)
day = request.GET.get('day', None)
start_date = request.GET.get('start_date', None)
end_date = request.GET.get('end_date', None)
graph_data, detail = get_filtered_graph_data('kpi', country, project, start_date, end_date)
resp = {
'name': detail,
'colorByPoint': True,
'data': graph_data
}
return HttpResponse(resp)
@loginRequired
@csrf_exempt
@allowedMethods(["GET"])
def osa(request):
kpis = Kpi.objects.all()
kpi_values = kpis.values('countrycode', 'project', 'moc', 'day', 'osa_target', 'osa_available')
for kpi_val in kpi_values:
try:
kpi_val['osa_percent'] = round(
(kpi_val['osa_available']/kpi_val['osa_target'])*100, 2)
except ZeroDivisionError as e:
kpi_val['osa_percent'] = 0
kpi_val['day'] = kpi_val['day'].strftime('%d-%m-%Y')
return HttpResponse(list(kpi_values))
@loginRequired
def pop(request):
kpis = Kpi.objects.all()
kpi_values = kpis.values('countrycode', 'project', 'moc', 'day', 'pop_target', 'pop_available')
for kpi_val in kpi_values:
try:
kpi_val['pop_percent'] = round(
(kpi_val['pop_available']/kpi_val['pop_target'])*100, 2)
except ZeroDivisionError as e:
kpi_val['pop_percent'] = 0
kpi_val['day'] = kpi_val['day'].strftime('%d-%m-%Y')
return HttpResponse(list(kpi_values))
@loginRequired
def npd(request):
kpis = Kpi.objects.all()
kpi_values = kpis.values('countrycode', 'project', 'moc', 'day', 'npd_target', 'npd_available')
for kpi_val in kpi_values:
try:
kpi_val['npd_percent'] = round(
(kpi_val['npd_available']/kpi_val['npd_target'])*100, 2)
except ZeroDivisionError as e:
kpi_val['npd_percent'] = 0
kpi_val['day'] = kpi_val['day'].strftime('%d-%m-%Y')
return HttpResponse(list(kpi_values))
@loginRequired
def outlets(request):
kpis = Kpi.objects.all()
kpi_values = kpis.values('countrycode', 'project', 'moc', 'day', 'outlets_done', 'outlets_total')
for kpi_val in kpi_values:
try:
kpi_val['outlets_percent'] = round(
(kpi_val['outlets_done']/kpi_val['outlets_total'])*100, 2)
except ZeroDivisionError as e:
kpi_val['outlets_percent'] = 0
kpi_val['day'] = kpi_val['day'].strftime('%d-%m-%Y')
return HttpResponse(list(kpi_values))
def format_nodes(node_list, node_map):
nodes = []
for ind, ent in enumerate(node_list):
node = {
'node': ind,
'name': ent
}
node_map[ent] = ind
nodes.append(node)
return nodes
def get_nodes_sankey(kpis):
countries = list(kpis.distinct().values_list('countrycode', flat=True))
projects = list(kpis.distinct().values_list('project', flat=True))
node_list = countries + projects
nodes = []
node_map = {}
nodes = format_nodes(node_list, node_map)
return nodes, node_map
def get_node_links_sankey(kpis, node_map):
kpi_values = kpis.values(
'countrycode', 'project').annotate(
available=Sum('promo_available'), target=Sum('promo_target'))
links = []
for ent in kpi_values:
node_link = {
'source': node_map[ent['countrycode']],
'value': round((ent['available']/ent['target'])*100, 0) if ent['target'] else 0,
#'value': ent['available'],
'target': node_map[ent['project']]
}
if ent['target']:
links.append(node_link)
return links
#@loginRequired
@csrf_exempt
@allowedMethods(["GET"])
def promo(request):
start_date = request.GET.get('start_date', None)
end_date = request.GET.get('end_date', None)
country = request.GET.get('country', None)
project = request.GET.get('project', None)
query = Q()
if start_date:
start_date = parser.parse(start_date).date()
query = query & Q(day__gte=start_date)
if end_date:
end_date = parser.parse(end_date).date()
query = query & Q(day__lte=end_date)
if country:
query = query & Q(countrycode = country)
if project:
query = query & Q(project = project)
if query:
kpis = Kpi.objects.filter(query)
else:
kpis = Kpi.objects.all()
#kpis = Kpi.objects.filter(countrycode='HONGKONG')
nodes, node_map = get_nodes_sankey(kpis)
links = get_node_links_sankey(kpis, node_map)
resp = {
"nodes": nodes,
"links": links
};
return HttpResponse(resp)
# #@loginRequired
# @csrf_exempt
# @allowedMethods(["GET"])
# def promo(request):
# start_date = request.GET.get('start_date', None)
# end_date = request.GET.get('end_date', None)
# country = request.GET.get('country', None)
# project = request.GET.get('project', None)
# query = Q()
# if start_date:
# start_date = parser.parse(start_date).date()
# query = query & Q(day__gte=start_date)
# if end_date:
# end_date = parser.parse(end_date).date()
# query = query & Q(day__lte=end_date)
# if country:
# query = query & Q(countrycode = country)
# if project:
# query = query & Q(project = project)
# if query:
# kpis = Kpi.objects.filter(query)
# else:
# kpis = Kpi.objects.all()
# kpi_values = kpis.values('day').annotate(
# actual=Sum('promo_available'), target=Sum('promo_target'))
# resp_list = []
# for kpi_val in kpi_values:
# try:
# kpi_val['percent'] = round(
# (kpi_val['actual']/kpi_val['target'])*100, 2)
# except ZeroDivisionError as e:
# kpi_val['percent'] = 0
# kpi_val['day'] = getUnixTimeMillisec(kpi_val['day'])
# resp_list.append([kpi_val['day'], kpi_val['percent']])
# return HttpResponse(resp_list)
@loginRequired
def heatmap(request):
_format = request.GET.get('format', 'dict')
start_date = request.GET.get('start_date', None)
end_date = request.GET.get('end_date', None)
country = request.GET.get('country', None)
project = request.GET.get('project', None)
query = Q()
if start_date:
query = query & Q(day__gte=start_date)
if end_date:
query = query & Q(day__lte=end_date)
if country:
query = query & Q(countrycode = country)
if project:
query = query & Q(project = project)
if query:
query_data = Kpi.objects.filter(query).\
values('countrycode').annotate(
actual=Sum('attendance_achvd'), target=Sum('attendance_target'))
else:
query_data = Kpi.objects.all().values('countrycode').\
annotate(actual=Sum('attendance_achvd'), target=Sum('attendance_target'))
data_list = []
py_countries = {}
for country in pycountry.countries:
py_countries[country.name] = country.alpha_2
stnd_dict = {'Hongkong': 'Hong Kong', 'Taiwan' : 'Taiwan, Province of China',\
'Vietnam' : 'Viet Nam'}
for data in query_data:
if _format == 'dict':
cnt_name = data['countrycode'].title()
cnt_code = stnd_dict.get(cnt_name, cnt_name)
data_dict = {}
data_dict['name'] = cnt_name
data_dict['code'] = py_countries.get(cnt_code, 'Unknown code')
data_dict['value'] = round((data['actual']/data['target'])*100, 0)
if not data_dict['code'] == 'Unknown code':
data_list.append(data_dict)
else:
data_list.append([data['countrycode'] + ',' + str(round((data['actual']/data['target'])*100, 2))])
return HttpResponse(data_list)
def get_countries(request):
countries = Kpi.objects.filter().values_list('countrycode').distinct()
cnt_list = []
for country in countries:
cnt_list.append(country[0])
return HttpResponse(cnt_list)
def get_projects(request):
country = request.GET.get('country', 'all')
if country == 'all':
projects = Kpi.objects.filter().values_list('project').distinct()
else:
projects = Kpi.objects.filter(countrycode=country).values_list('project').distinct()
prj_list = []
for project in projects:
prj_list.append(project[0])
return HttpResponse(prj_list)
|
# Rohde & Schwarz instruments
# Just add each specialization as needed
from .hmc804x import *
from .ngx200 import *
# USB Vendor ID
# There may be better sources but this one was a good start on USB Vendor IDs
# https://devicehunt.com/all-usb-vendors
USB_VID = '0AAD'
# Dictionary of product/model ID (PID) and model name to allow look up of derived Device types
dmms = {}
logic_analyzers = {}
oscilloscopes = {}
power_supplies = { NGX200.USB_PID : NGX200, # Should default to mock behaviors
'NGX200' : NGX200, # Should default to mock behaviors
NGL201.USB_PID : NGL201,
'NGL201' : NGL201,
NGL202.USB_PID : NGL202,
'NGL202' : NGL202,
NGM201.USB_PID : NGM201,
'NGM201' : NGM201,
NGM202.USB_PID : NGM202,
'NGM202' : NGM202,
HMC804X.USB_PID : HMC804X, # Should default to mock behaviors
'HMC804X' : HMC804X, # Should default to mock behaviors
HMC8041.USB_PID : HMC8041,
'HMC8041' : HMC8041,
HMC8042.USB_PID : HMC8042,
'HMC8042' : HMC8042,
HMC8043.USB_PID : HMC8043,
'HMC8043' : HMC8043
}
|
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 31 11:56:04 2020
@author: ariel
"""
from sklearn.datasets import load_iris
import pandas as pd
import seaborn as sns
iris_dataset = load_iris()
iris_dataframe = pd.DataFrame(iris_dataset['data'], columns = iris_dataset.feature_names)
iris_dataframe['target'] = iris_dataset['target']
sns.pairplot(iris_dataframe,hue='target')
|
def solve(a, b):
r = int(a + b)
t = 1
while r > 0:
r -= t
t += 2
return r == 0
a, b = input().split()
if solve(a, b):
print("Yes")
else:
print("No")
|
#!/usr/bin/python3
import argparse
import rsa
from ECC import ECC
from Point import Point
import binascii
import time
def create_arguments():
"""Create command line argument for this program.
"""
parser = argparse.ArgumentParser(
description='Public Key based Cryptosystem'
)
parser.add_argument("mode", help="Mode used in this program, can be encrypt/decrypt/keygen.\
If keygen is used and --")
parser.add_argument("cipher", help="Cipher used in this program, can be RSA/ECC")
parser.add_argument("-p", "--public_key", help="Public key file location")
parser.add_argument("-l", "--key_length", help="Length of key")
parser.add_argument("-v", "--private_key", help="Private key file location")
parser.add_argument("-f", "--file", help="File input used in this program")
parser.add_argument("-o", "--output", help="Output generated after encryption")
return parser.parse_args()
def process_rsa(args):
if (args.mode.lower() == "keygen"):
filename_pub = args.public_key if args.public_key != None else "key.pub"
filename_priv = args.private_key if args.private_key != None else "key.priv"
key_length = int(args.key_length) if args.key_length != None else 32
rsa.keygen(filename_pub, filename_priv, length = key_length)
elif (args.mode.lower() == "encrypt") or (args.mode.lower() == "decrypt") :
if args.file == None:
raise Exception("No file input on " + args.mode + "ion process")
if (args.mode.lower() == "encrypt") and args.public_key == None:
raise Exception("No public key given on " + args.mode + "ion process")
if (args.mode.lower() == "decrypt") and args.private_key == None:
raise Exception("No private key given on " + args.mode + "ion process")
output = args.output if args.output != None else "result.encrypted"
data = open(args.file, 'rb').read()
print("Plaintext:\n", data)
if args.mode.lower() == "encrypt":
key = rsa.RSAPublicKey(from_file = True, filename = args.public_key)
enc = True
else:
key = rsa.RSAPrivateKey(from_file = True, filename = args.private_key)
enc = False
result = rsa.process(encrypt=enc, data=data, RSA_key=key)
print("Ciphertext:\n", binascii.hexlify(result))
print("Size:", len(result), 'bytes')
with open(output, 'wb') as fout:
fout.write(result)
else:
raise Exception("Unsupported Mode " + args.cipher)
def process_ecc(args):
a, b, p = input("Insert a, b, and p variable for elliptic graph:\n").split()
ecc = ECC(int(a), int(b), int(p))
x, y = input("Insert base point G value:\n").split()
ecc.set_g(Point(int(x),int(y)))
k = input("Insert k value:\n")
ecc.set_k(int(k))
if (args.mode == "keygen"):
filename_pub = args.public_key if args.public_key != None else "key.pub"
n = input("Insert n value:\n")
result = ecc.generate_pkey(int(n))
open(filename_pub, 'wb').write(result)
print("Public key: ", binascii.hexlify(result))
print("Size:", len(result), 'bytes')
elif (args.mode == "encrypt"):
if args.file == None:
raise Exception("No file input on " + args.mode + "ion process")
if args.public_key == None:
raise Exception("No public key given on " + args.mode + "ion process")
output = args.output if args.output != None else "result.encrypted"
data = open(args.file, 'rb').read()
print("Plaintext:\n", data)
result = ecc.encrypt_data(data, args.public_key)
print("Ciphertext:\n", binascii.hexlify(result))
print("Size:", len(result), 'bytes')
with open(output, 'wb') as fout:
fout.write(result)
elif (args.mode == "decrypt"):
if args.file == None:
raise Exception("No file input on " + args.mode + "ion process")
output = args.output if args.output != None else "result.encrypted"
n = input("Insert n value\n")
data = open(args.file, 'rb').read()
print("Ciphertext:\n", binascii.hexlify(data))
result = ecc.decrypt_data(data, int(n))
print("Plaintext:\n", result)
print("Size:", len(result), 'bytes')
with open(output, 'wb') as fout:
fout.write(result)
else:
raise Exception("Unsupported Mode " + args.cipher)
if __name__ == '__main__':
args = create_arguments()
start = time.time()
if args.cipher.upper() == "RSA":
process_rsa(args)
elif args.cipher.upper() == "ECC":
process_ecc(args)
else:
raise Exception("Unsupported Cipher " + args.cipher)
end = time.time()
print("Time Elapsed", end - start, 'second')
|
#! /usr/bin/env python
import rospy
# For the state machine
from StateMachine import Smach
# To graph the state machine diagram
# import pygraphviz
# ROS messages and services
from std_msgs.msg import String, Int32
# from eagle_one_test.msg import State
# Initialize the state machine and variables
smach = Smach()
state = String()
transition = String()
# Callback functions to handle the data along the /smach/transition topic
def transition_cb(msg):
transition.data = msg.data
# ROS initializations
rospy.init_node('qc_smach_server')
rospy.loginfo("Hulk Smach!") # Let's us know that this has loaded
rate = rospy.Rate(100) # 100Hz
# Setup the publishers and subscribers
state_pub = rospy.Publisher('/smach/state', String, queue_size=100000)
transition_sub = rospy.Subscriber('/smach/transition', String, transition_cb)
counter_pub = rospy.Publisher('/smach/counter', Int32, queue_size=1000)
counter = 0
if __name__=='__main__':
while not rospy.is_shutdown():
smach.change_state(transition.data)
counter_pub.publish(counter)
print("New state: %s" % smach.state)
state.data = smach.state
state_pub.publish(state)
counter += 1
rate.sleep()
|
#! /usr/bin/env python2
class Cls(object):
def __init__(self, value):
self.value = value
x = [Cls(value) for value in range(10)]
# y = x[:]
y = list(x)
y.pop()
y[0].value = 42
print [datum.value for datum in x]
print [datum.value for datum in y]
|
from __future__ import print_function, division
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.ensemble import BaggingRegressor
from sklearn.externals import six
import numpy as np
import pandas as pd
__all__ = [
'BaggedRegressorImputer',
'CustomPandasTransformer',
'DummyEncoder'
]
class CustomPandasTransformer(BaseEstimator, TransformerMixin):
def _validate_input(self, X):
if not isinstance(X, pd.DataFrame):
raise TypeError("X must be a DataFrame, but got type=%s"
% type(X))
return X
@staticmethod
def _validate_columns(X, cols):
scols = set(X.columns) # set for O(1) lookup
if not all(c in scols for c in cols):
raise ValueError("all columns must be present in X")
class DummyEncoder(CustomPandasTransformer):
"""A custom one-hot encoding class that handles previously unseen
levels and automatically drops one level from each categorical
feature to avoid the dummy variable trap.
Parameters
----------
columns : list
The list of columns that should be dummied
sep : str or unicode, optional (default='_')
The string separator between the categorical feature name
and the level name.
drop_one_level : bool, optional (default=True)
Whether to drop one level for each categorical variable.
This helps avoid the dummy variable trap.
tmp_nan_rep : str or unicode, optional (default="N/A")
Each categorical variable adds a level for missing values
so test data that is missing data will not break the encoder
"""
def __init__(self, columns, sep='_', drop_one_level=True,
tmp_nan_rep='N/A'):
self.columns = columns
self.sep = sep
self.drop_one_level = drop_one_level
self.tmp_nan_rep = tmp_nan_rep
def fit(self, X, y=None):
# validate the input, and get a copy of it
X = self._validate_input(X).copy()
# load class attributes into local scope
tmp_nan = self.tmp_nan_rep
# validate all the columns present
cols = self.columns
self._validate_columns(X, cols)
# begin fit
# for each column, fit a label encoder
lab_encoders = {}
for col in cols:
vec = [tmp_nan if pd.isnull(v)
else v for v in X[col].tolist()]
# if the tmp_nan value is not present in vec, make sure it is
# so the transform won't break down
svec = list(set(vec))
if tmp_nan not in svec:
svec.append(tmp_nan)
le = LabelEncoder()
lab_encoders[col] = le.fit(svec)
# transform the column, re-assign
X[col] = le.transform(vec)
# fit a single OHE on the transformed columns - but we need to ensure
# the N/A tmp_nan vals make it into the OHE or it will break down later.
# this is a hack - add a row of all transformed nan levels
ohe_set = X[cols]
ohe_nan_row = {c: lab_encoders[c].transform([tmp_nan])[0] for c in cols}
ohe_set = ohe_set.append(ohe_nan_row, ignore_index=True)
ohe = OneHotEncoder(sparse=False).fit(ohe_set)
# assign fit params
self.ohe_ = ohe
self.le_ = lab_encoders
self.cols_ = cols
return self
def transform(self, X):
check_is_fitted(self, 'ohe_')
X = self._validate_input(X).copy()
# fit params that we need
ohe = self.ohe_
lenc = self.le_
cols = self.cols_
tmp_nan = self.tmp_nan_rep
sep = self.sep
drop = self.drop_one_level
# validate the cols and the new X
self._validate_columns(X, cols)
col_order = []
drops = []
for col in cols:
# get the vec from X, transform its nans if present
vec = [tmp_nan if pd.isnull(v)
else v for v in X[col].tolist()]
le = lenc[col]
vec_trans = le.transform(vec) # str -> int
X[col] = vec_trans
# get the column names (levels) so we can predict the
# order of the output cols
le_clz = le.classes_.tolist()
classes = ["%s%s%s" % (col, sep, clz) for clz in le_clz]
col_order.extend(classes)
# if we want to drop one, just drop the last
if drop and len(le_clz) > 1:
drops.append(classes[-1])
# now we can get the transformed OHE
ohe_trans = pd.DataFrame.from_records(data=ohe.transform(X[cols]),
columns=col_order)
# set the index to be equal to X's for a smooth concat
ohe_trans.index = X.index
# if we're dropping one level, do so now
if drops:
ohe_trans = ohe_trans.drop(drops, axis=1)
# drop the original columns from X
X = X.drop(cols, axis=1)
# concat the new columns
X = pd.concat([X, ohe_trans], axis=1)
return X
class BaggedRegressorImputer(CustomPandasTransformer):
"""Fit bagged regressor models for each of the impute columns in order
to impute the missing values.
Parameters
----------
impute_cols : list
The columns to impute
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
"""
def __init__(self, impute_cols, base_estimator=None, n_estimators=10,
max_samples=1.0, max_features=1.0, bootstrap=True,
bootstrap_features=False, n_jobs=1,
random_state=None, verbose=0):
self.impute_cols = impute_cols
self.base_estimator = base_estimator
self.n_estimators = n_estimators
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y=None):
# validate that the input is a dataframe
X = self._validate_input(X) # don't need a copy this time
# validate the columns exist in the dataframe
cols = self.impute_cols
self._validate_columns(X, cols)
# this dictionary will hold the models
regressors = {}
# this dictionary maps the impute column name(s) to the vecs
targets = {c: X[c] for c in cols}
# drop off the columns we'll be imputing as targets
X = X.drop(cols, axis=1) # these should all be filled in (no NaN)
# iterate the column names and the target columns
for k, target in six.iteritems(targets):
# split X row-wise into train/test where test is the missing
# rows in the target
test_mask = pd.isnull(target)
train = X.loc[~test_mask]
train_y = target[~test_mask]
# fit the regressor
regressors[k] = BaggingRegressor(
base_estimator=self.base_estimator,
n_estimators=self.n_estimators,
max_samples=self.max_samples,
max_features=self.max_features,
bootstrap=self.bootstrap,
bootstrap_features=self.bootstrap_features,
n_jobs=self.n_jobs,
random_state=self.random_state,
verbose=self.verbose, oob_score=False,
warm_start=False).fit(train, train_y)
# assign fit params
self.regressors_ = regressors
return self
def transform(self, X):
check_is_fitted(self, 'regressors_')
X = self._validate_input(X).copy() # need a copy
cols = self.impute_cols
self._validate_columns(X, cols)
# fill in the missing
models = self.regressors_
for k, model in six.iteritems(models):
target = X[k]
# split X row-wise into train/test where test is the missing
# rows in the target
test_mask = pd.isnull(target)
# if there's nothing missing in the test set for this feature, skip
if test_mask.sum() == 0:
continue
test = X.loc[test_mask].drop(cols, axis=1) # drop impute cols
# generate predictions
preds = model.predict(test)
# impute!
X.loc[test_mask, k] = preds
return X
|
import math
import sys
Obstacles = []
class Point:
def __init__(self, x_coord, y_coord, thetha_coord):
self.x_coord = x_coord
self.y_coord = y_coord
self.thetha_coord = thetha_coord
# For simplicity, the obstacle has the thetha occupied in [0,2*pi].
# Thus, if there's a rectangular obstacle, for any car orientation inside it, assume it's occupied by obstacle
class Obstacle:
# The rectangle obstacle spans x coord horizontally from start_x to end_x.
# It also spans y coord vertically from start_y to end_y
def __init__(self, startx, starty, endx, endy):
if(endx < startx):
temp = startx
starttime = endx
endx = temp
if(endy < starty):
temp = starty
starty = endy
endy = temp
self.x_start = startx
self.y_start = starty
self.x_end = endx
self.y_end = endy
def contained(point, obstacle):
if(point.x_coord >= obstacle.x_end or point.x_coord <= obstacle.x_start):
return False
if(point.y_coord >= obstacle.y_end or point.y_coord <= obstacle.y_start):
return False
return True
def intersect(point_start, point_end, obstacle):
if(contained(point_start,obstacle) or contained(point_end,obstacle)):
#print("cotained")
return True
x_difference = point_end.x_coord - point_start.x_coord
y_difference = point_end.y_coord - point_start.y_coord
y_min = min(point_start.y_coord, point_end.y_coord)
y_max = max(point_start.y_coord, point_end.y_coord)
x_min = min(point_start.x_coord, point_end.x_coord)
x_max = max(point_start.x_coord, point_end.x_coord)
if(y_max <= obstacle.y_start or y_min >= obstacle.y_end or x_max <= obstacle.x_start or x_min >= obstacle.x_end):
return False
#y of intersection between the projection of the line to the left side of obstacle
if(x_difference != 0):
ystart_intersect = point_start.y_coord + ((obstacle.x_start - point_start.x_coord)/x_difference)*y_difference
if(obstacle.y_start < ystart_intersect and ystart_intersect < obstacle.y_end):
if(y_min <= ystart_intersect and ystart_intersect <= y_max):
#print("y_start_intersect")
return True
#y of intersection between the projection of the line to the right side of obstacle
if(x_difference != 0):
yend_intersect = point_start.y_coord + ((obstacle.x_end - point_start.x_coord)/x_difference)*y_difference
if(obstacle.y_start < yend_intersect and yend_intersect < obstacle.y_end):
if(y_min <= yend_intersect and yend_intersect <= y_max):
#print("y_end_intersect")
return True
#x of intersection between the projection of the line to the bottom side of obstacle
if(y_difference != 0):
xstart_intersect = point_start.x_coord + ((obstacle.y_start - point_start.y_coord)/y_difference)*x_difference
if(obstacle.x_start < xstart_intersect and xstart_intersect < obstacle.x_end):
if(x_min <= xstart_intersect and xstart_intersect <= x_max):
#print("t_start_intersect")
return True
#x of intersection between the projection of the line to the top side of obstacle
xend_intersect = point_start.x_coord + ((obstacle.y_end - point_start.y_coord)/y_difference)*x_difference
if(obstacle.t_start < xend_intersect and xend_intersect < obstacle.x_end):
if(x_min <= xend_intersect and xend_intersect <= x_max):
#print("t_end_intersect")
return True
return False
def intersect_bounded_dist_time(obstacle1, obstacle2):
if(intersect( Point(obstacle1.x_start, obstacle1.y_start,0), Point(obstacle1.x_start, obstacle1.y_end,0), obstacle2)):
#print("left_side_intersect")
return True
if(intersect(Point(obstacle1.x_start, obstacle1.y_start,0), Point(obstacle1.x_end, obstacle1.y_start,0), obstacle2)):
#print("bottom_side_intersect")
return True
if(intersect(Point(obstacle1.x_end, obstacle1.y_start,0), Point(obstacle1.x_end, obstacle1.y_end,0), obstacle2)):
#print("right_side_intersect")
return True
if(intersect(Point(obstacle1.x_start, obstacle1.y_end,0), Point(obstacle1.x_end, obstacle1.y_end,0), obstacle2)):
#print("top_side_intersect")
return True
return False
def intersect_bounded(obstacle1, obstacle2):
return intersect_bounded_dist_time(obstacle1, obstacle2) or intersect_bounded_dist_time(obstacle2, obstacle1)
def checkIntersectObstacles(point1, point2):
for j in range(0,len(Obstacles)):
if(intersect(point1, point2,Obstacles[j])):
#print_point(point1)
#print_point(point2)
#print("Intersect with obstacle : ")
#print_obstacles(Obstacles[j])
return True
return False
|
import boto3
import os
import json
from botocore.exceptions import ClientError
from boto3.dynamodb.conditions import Key, Attr
from decimal import Decimal
dynamodb = boto3.resource('dynamodb', region_name='us-west-2')
lambdaClient = boto3.client('lambda')
class RentCalculator:
def __init__(self):
self.table = dynamodb.Table('RentalStats')
def get_rent(self, address, zipcode, bedrooms, bathrooms, square_footage, mp_style, max_days, max_comps):
try:
payload = {
'address': address,
'bedrooms': bedrooms,
'bathrooms': bathrooms,
'sqft': square_footage,
'mp_style': mp_style,
'days_old': max_days,
'comp_count': max_comps
}
response = lambdaClient.invoke(
FunctionName=f'rentimport-{os.environ["stage"]}-getRent',
Payload=json.dumps(payload)
)
data = json.loads(response['Payload'].read().decode("utf-8"))
data['comps'] = [RentCalculator.__convert_to_comp__(c) for c in data.get('comps')]
data['source'] = 'RealtyMole'
if not data.get('price'):
data['price'] = self.get_rental_stats_price(zipcode, bedrooms)
data['source'] = 'Rentometer'
return data
except:
print('there was an error getting the rent from RealtyMole. Trying Rentometer')
data = {
'price': self.get_rental_stats_price(zipcode, bedrooms),
'source': 'Rentometer'
}
return data
# Get average monthly rent for property by zipcode and bedrooms
def get_rental_stats_price(self, zipcode, bedrooms):
print('retrieving rental stats price from dynamodb for zip ' + str(zipcode) + ' beds ' + str(bedrooms))
result = 0.0
try:
response = self.table.query(
KeyConditionExpression=Key('address').eq(int(zipcode))
)
except ClientError as e:
print("error retrieving rental stats")
print(e.response['Error']['Message'])
return result
else:
# Get rows if any returned and compute price based on closest match to bedrooms function arg
# Set up to search for closest match in # of beds
diff_beds = 10
match_beds = 0
match_price = 0.0
for i in response['Items']:
# Look for closest number of bedrooms with non-zero price for the zipcode
zip_beds = float(i['bedrooms'])
beds_price = float(i.get('median', 0))
beds_diff = abs(zip_beds - bedrooms)
if (beds_diff < diff_beds and beds_price > 0.0):
match_beds = int(zip_beds)
match_price = beds_price
diff_beds = beds_diff
# If found a non-zero price for some number of bedrooms in the zipcode, use funky formula to computer guesstimate.
if (match_price > 0.0):
# Price will be increased or decreased by 10% times difference in number of bedrooms
delta_price = match_price * ((float(abs(match_beds - bedrooms))) * 0.10)
if (match_beds < bedrooms):
result = match_price + delta_price
else:
result = match_price - delta_price
return result
@staticmethod
def __convert_to_comp__(realty_mole_comp):
return RealtyMoleComp(realty_mole_comp)
class RealtyMoleComp():
def __init__(self, c):
self.address = c.get('formattedAddress')
self.lat = c.get('latitude')
self.lon = c.get('longitude')
self.price = c.get('price')
self.listing_date = c.get('publishedDate')
self.zipcode = c.get('zipcode')
self.city = c.get('city')
self.county = c.get('county')
self.state = c.get('state')
self.days = c.get('daysOld')
self.miles = c.get('distance')
self.mp_style_name = c.get('propertyType')
self.bed = c.get('bedrooms')
self.bath = c.get('bathrooms')
self.sqft = c.get('squareFootage', 0)
self.price_sqft = ( float(self.price / self.sqft) ) if self.sqft > 0 else 0
|
from . import views
from django.urls import re_path
urlpatterns=[
re_path(r'^user_ask/$',views.user_ask,name='user_ask'),
#下面是用户收藏的路由
re_path(r'^user_love/$',views.user_love,name='user_love'),
#下面是用户评论的路由
re_path(r'^user_comment/$',views.user_comment,name='user_comment'),
#下面是在用户中心删除收藏的路由
re_path(r'^user_deletelove/$',views.user_deletelove,name='user_deletelove'),
#下面是在用户中心把未读消息变为已读的路由
re_path(r'^user_deletemessage/$',views.user_deletemessage,name='user_deletemessage'),
] |
from flask import Flask
import tensorflow as tf
import cv2
app = Flask(__name__)
@app.route("/")
def main():
return "App is working"
if __name__ == "__main__":
app.run(host="0.0.0.0") |
rule edena_overlapping:
"""
Modified:
2020-08-17 10:05:36 Added '_se' in tool name. Should lead to deprecation in old code. TODO: Add '_se' everywhere.
Doc:
https://oit.ua.edu/wp-content/uploads/2016/10/edena_referencemanual120926.pdf
Note:
Reads provided to edena should have same length, hence quality trimming, e.g. with sickle, should be skipped
Test:
out/edena/overlapping_se/awk/sam_to_fastq/samtools/view_bam_to_sam/bedtools/intersect_-v_-b_bed-hg19-refgene-exons/samtools/index/samtools/sort/samtools/view_sam_to_bam/awk/extract_reads_with_insertions/bowtie2/se_-k_1_-q_hg19/bowtie/se_--chunkmbs_256_--best_--strata_-m_1_-n_2_ebwt-hg19/ln/alias/sst/all_samples/fastq/TH134_CD34_H3K27ac_unmapped.ovl
"""
input:
fastq="out/{filler}.fastq"
output:
"out/{tool}{extra}/{filler}.ovl"
log:
"out/{tool}{extra}/{filler}.log"
benchmark:
"out/{tool}{extra}/{filler}.benchmark.tsv"
params:
outdir="out/{tool}{extra}/{filler}",
extra = params_extra
wildcard_constraints:
tool="edena/overlapping_se"
threads:
1
# Actually only use one thread even if more are provided
#MAX_THREADS
conda:
"../envs/edena.yaml"
shell:
"edena -nThreads {threads} {params.extra} -r {input.fastq} -p {params.outdir} &> {log}"
rule edena_overlapping_pe:
"""
Created:
2020-08-17 09:59:35
Doc:
https://oit.ua.edu/wp-content/uploads/2016/10/edena_referencemanual120926.pdf
Note:
Reads provided to edena should have same length, hence quality trimming, e.g. with sickle, should be skipped
Test:
out/edena/overlapping_pe/gunzip/to-stdout/ln/alias/sst/all_samples/fastq/856_H3K27ac.ovl
"""
input:
fq_1="out/{filler}_1.fastq",
fq_2="out/{filler}_2.fastq"
output:
"out/{tool}{extra}/{filler}.ovl"
log:
"out/{tool}{extra}/{filler}.log"
benchmark:
"out/{tool}{extra}/{filler}.benchmark.tsv"
params:
outdir="out/{tool}{extra}/{filler}",
extra = params_extra
wildcard_constraints:
tool="edena/overlapping_pe"
threads:
1
# Actually only use one thread even if more are provided
#MAX_THREADS
conda:
"../envs/edena.yaml"
shell:
"edena -nThreads {threads} {params.extra} -DRpairs {input.fq_1} {input.fq_2} -p {params.outdir} &> {log}"
rule edena_assembling:
"""
Doc:
https://oit.ua.edu/wp-content/uploads/2016/10/edena_referencemanual120926.pdf
Note:
Reads provided to edena should have same length, hence quality trimming, e.g. with sickle, should be skipped
Test:
out/edena/assembling_-d_20_-c_20_-minCoverage_5/edena/overlapping/awk/sam_to_fastq/samtools/view_bam_to_sam/bedtools/intersect_-v_-b_bed-hg19-refgene-exons/samtools/index/samtools/sort/samtools/view_sam_to_bam/awk/extract_reads_with_insertions/bowtie2/se_-k_1_-q_hg19/bowtie/se_--chunkmbs_256_--best_--strata_-m_1_-n_2_ebwt-hg19/ln/alias/sst/all_samples/fastq/TH134_CD34_H3K27ac_unmapped_contigs.fasta
out/edena/assembling/edena/overlapping/gunzip/to-stdout/bowtie/se_--chunkmbs_256_--best_--strata_-m_1_-n_2_ebwt-hg19/ln/alias/sst/all_samples/fastq/Jurkat_SRR1057274_H3K27ac_unmapped_contigs.fasta
out/edena/assembling/edena/overlapping/gunzip/to-stdout/bowtie/se_--chunkmbs_256_--best_--strata_-m_1_-n_2_ebwt-hg19-main-chr/ln/alias/sst/all_samples/fastq/Jurkat_SRR1057274_H3K27ac_unmapped_contigs.fasta
out/edena/assembling_-d_20_-c_20_-minCoverage_5/edena/overlapping/gunzip/to-stdout/ln/alias/sst/all_samples/fastq/Jurkat_SRR1057274_H3K27ac_contigs.fasta
"""
input:
ovl="out/{filler}.ovl"
output:
"out/{tool}{extra}/{filler}_contigs.fasta",
"out/{tool}{extra}/{filler}_contigs.lay",
log:
"out/{tool}{extra}/{filler}.log"
benchmark:
"out/{tool}{extra}/{filler}.benchmark.tsv"
params:
outdir="out/{tool}{extra}/{filler}",
extra = params_extra
wildcard_constraints:
tool="edena/assembling"
threads:
MAX_THREADS
conda:
"../envs/edena.yaml"
shell:
"edena -nThreads {threads} {params.extra} -e {input.ovl} -p {params.outdir}"
# After assembling:
# /opt/bao/bin/bowtie2/bowtie2 --rfg 1,1 -p 5 -k 1 -q -f -x /opt/bao/bin/bowtie2/indexes/hg19/hg19 -U $file.unmapped/$file.unmapped_contigs.fasta -S $file.unmapped/$file.unmapped_contigs.sam
# out/bowtie2/se_--rfg_1,1_-k_1_-q_-f_hg19/edena/assembling_-d_20_-c_20_-minCoverage_5/edena/overlapping/awk/sam_to_fastq/samtools/view_bam_to_sam/bedtools/intersect_-v_-b_bed-hg19-refgene-exons/samtools/index/samtools/sort/samtools/view_sam_to_bam/awk/extract_reads_with_insertions/bowtie2/se_-k_1_-q_hg19/bowtie/se_--chunkmbs_256_--best_--strata_-m_1_-n_2_ebwt-hg19/ln/alias/sst/all_samples/fastq/TH134_CD34_H3K27ac_unmapped_contigs.sam
|
"""
Desafio 074
Problema: Crie um programa que vai gerar 5 números aleatórios e colocar em uma tupla.
Depois disso, mostre a listagem de números gerados e também indique o menor e
o maior valor que estão na tupla.
Resolução do problema:
"""
from random import randint
print('-' * 30 + f'\n{"SORTEIO DE VALORES": ^30}\n' + '-' * 30)
# Atribuindo valores sorteados a uma tupla
valorSorteado = ()
for cont in range(0, 5):
valorSorteado += randint(1, 10), # Pode-se utilizar uma
# Listando valores sorteados formatados sem Aspas e Parênteses
print('Valores Sorteados: ', end='')
for idx, valor in enumerate(valorSorteado):
print(f'{valorSorteado[idx]}', end=' ')
print(f'\nMAIOR VALOR: {max(valorSorteado)}\nMENOR VALOR: {min(valorSorteado)}')
|
#https://leetcode.com/problems/tree-diameter/
#Time Complexity: O(V+E)
class Solution:
def treeDiameter(self, edges: List[List[int]]) -> int:
from collections import defaultdict
self.tree = defaultdict(list)
for u,v in edges:
self.tree[u].append(v)
self.tree[v].append(u)
self.result = 0
self.dfs(0, None)
return self.result
def dfs(self, node, parent):
countChild = 0
pathLens = []
for child in self.tree[node]:
if child != parent:
countChild += 1
pathLens.append(self.dfs(child, node))
if countChild == 0:
return 1
if len(pathLens) == 1:
if self.result < pathLens[0]:
self.result = pathLens[0]
return pathLens[0] + 1
maxLen = max(pathLens)
pathLens.remove(maxLen)
secondMaxLen = max(pathLens)
if self.result < maxLen+secondMaxLen:
self.result = maxLen+secondMaxLen
return maxLen + 1
|
import numpy as np # You can set an alias for the library you imported
from sklearn.datasets import load_iris
from sklearn import tree
'''
This imports is for visualizing the
decision tree later on
https://medium.com/@rnbrown/creating-and-visualizing-decision-trees-with-python-f8e8fa394176
https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/tree/export.py#L655
'''
from sklearn.externals.six import StringIO
import pydotplus
iris = load_iris()
'''
the first entry of each kind of flower
is at 0, 50, and 100. We'll omit these to
use as a tester after the training data to
ensure we are on the right track. We'll also
omit other random numbers for checking.
'''
test_idx = [0, 50, 100, 16, 25, 62, 32, 75, 99, 121]
train_target = np.delete(iris.target, test_idx)
train_data = np.delete(iris.data, test_idx, axis = 0)
'''
this is the testing data
'''
test_target = iris.target[test_idx]
test_data = iris.data[test_idx]
clf = tree.DecisionTreeClassifier()
clf.fit(train_data, train_target)
print(clf.predict(test_data))
dot_data = StringIO()
tree.export_graphviz(clf, out_file=dot_data,
feature_names=iris.feature_names, class_names=iris.target_names,
filled=True, rounded=True,
impurity=False,special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_pdf("iris.pdf")
# Examples of how to use the data set
'''
this prints out the features that describes the flowers
print (iris.feature_names)
'''
'''
this prints out the species of flower
print (iris.target_names)
'''
'''
this prints out the data for that flower
print (iris.data[0])
'''
'''
this prints out the number for the flower
in the target names
setosa = 0
versiclor = 1
virginica = 2
print (iris.target[0])
'''
'''
this iterates over the data set and tells you which example
what label is hsould get based on the target name
and the features of that flower
for i in range(len(iris.target)):
print ("Example %d: label %s, feature %s" % (i, iris.target[i], iris.data[i]))
'''
|
import ROOT as r
from ROOT import gROOT, TCanvas, TFile, TGraphErrors, SetOwnership, TVector3
import math, sys, optparse, array, copy, os
import gc, inspect, __main__
import numpy as np
runningfile = os.path.abspath(__file__)
WORKPATH = ''
for level in runningfile.split('/')[:-1]:
WORKPATH += level
WORKPATH += '/'
GALAPAGOPATH = ''
for d in WORKPATH.split('/'):
GALAPAGOPATH += d
GALAPAGOPATH += '/'
if d == 'Galapago-Framework': break
sys.path.insert(0, GALAPAGOPATH)
import include.Canvas as Canvas
import include.Sample as Sample
import include.helper as helper
import include.CutManager as CutManager
#print(WORKPATH, WORKPATH)
#print(GALAPAGOPATH, GALAPAGOPATH)
if __name__ == "__main__":
gROOT.ProcessLine('.L ' + GALAPAGOPATH + 'include/tdrstyle.C')
gROOT.SetBatch(1)
r.setTDRStyle()
###########################
#### Parser object ####
###########################
parser = optparse.OptionParser(usage='usage: %prog [opts] FilenameWithSamples', version='%prog 1.0')
#parser.add_option('-t', '--tag', action='store', type=str, dest='tag', default='', help='Output tag')
parser.add_option('-e', '--era', action='store', type=str, dest='era', default='', help='2018 era')
#parser.add_option('-n', '--nmax', action='store', type=int, dest='nmax', default=0, help='Path to file')
(opts, args) = parser.parse_args()
##################################
#### Variable declaration ####
##################################
dr_logbin = np.logspace(-3, 1, 101)
iso_logbin = np.logspace(-2, 2, 101)
#### -----------------
#### ---- Histograms
#### -----------------
hist_2d_BPregion = r.TH2F("hist_2d_BPregion", "", 150, -5, 5, 150, -5, 5)
hist_2d_20region = r.TH2F("hist_2d_20region", "", 200, -20, 20, 200, -20, 20)
hist_1d_r = r.TH1F("hist_1d_r_all", "", 100, 0, 10)
#########################
#### Load sample ####
#########################
treeA = Sample.Tree( fileName = helper.selectSamples(GALAPAGOPATH + 'dat/Samples_cern_UltraLegacy.dat', ['EGamma_Run2018' + opts.era], 'DATA'), name = '2018' + opts.era, isdata = 0 )
###################################
#### Loop over tree events ####
###################################
cm = CutManager.CutManager()
for b in treeA.blocks:
for s in b.samples:
#for f in s.ftpaths:
# print(f)
for t in s.ttrees:
print('New tree with:', t.GetEntries())
cutoff = t.GetEntries()/10
for e,ev in enumerate(t):
for j in range(0, ev.nEE):
iee = j # index to handle DMDM pair
R = math.sqrt(ev.EE_vx[iee]*ev.EE_vx[iee] + ev.EE_vy[iee]*ev.EE_vy[iee])
if not (ev.HLT_Diphoton30_18_R9IdL_AND_HE_AND_IsoCaloId_NoPixelVeto or ev.HLT_Diphoton30_22_R9Id_OR_IsoCaloId_AND_HE_R9Id_Mass90 or ev.HLT_DoublePhoton70): continue
if not ev.EE_leadingEt[iee] > 40: continue
if not ev.EE_subleadingEt[iee] > 25: continue
if not (abs(ev.ElectronCandidate_eta[ev.EE_idxA[iee]]) < 1.4 and abs(ev.ElectronCandidate_eta[ev.EE_idxB[iee]]) < 1.4): continue
if not ev.EE_normalizedChi2[iee] < 10: continue
if ev.EE_mass[iee] > 15 and eval(cm.EE_iso0l):
hist_1d_r.Fill(R)
if R > 1.5:
hist_2d_BPregion.Fill(ev.EE_vx[iee], ev.EE_vy[iee])
hist_2d_20region.Fill(ev.EE_vx[iee], ev.EE_vy[iee])
"""
if not ev.DGM_ptError[ev.DMDM_idxB[imm]]/ev.DGM_pt[ev.DMDM_idxB[imm]] < 0.3: continue
:u
if not ev.DGM_ptError[ev.DMDM_idxA[imm]]/ev.DGM_pt[ev.DMDM_idxA[imm]] < 0.3: continue
if not ev.DGM_normChi2[ev.DMDM_idxA[imm]] < 7.5: continue
if not ev.DGM_normChi2[ev.DMDM_idxB[imm]] < 7.5: continue
if not ev.DGM_muonHits[ev.DMDM_idxA[imm]] > 11: continue
if not ev.DGM_muonHits[ev.DMDM_idxB[imm]] > 11: continue
if not ev.DGM_outerTrackerHits[ev.DMDM_idxA[imm]] > 8: continue
if not ev.DGM_outerTrackerHits[ev.DMDM_idxB[imm]] > 8: continue
"""
if not os.path.exists(WORKPATH + 'Results/'): os.makedirs(WORKPATH + 'Results/')
outputFile = TFile(WORKPATH + 'Results/th1f_EE_QCD'+opts.era+'.root', 'RECREATE')
#### Write everything to use later:
hist_1d_r.Write()
hist_2d_BPregion.Write()
hist_2d_20region.Write()
outputFile.Close()
|
import bs4
import webbrowser
import requests
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from download import download
import os
# Fake Headers: headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
def urlgen(model, size):
BaseSize = 580
#BaseSize is for Shoe Size 6.5
ShoeSize = int(size) - 6.5
ShoeSize = ShoeSize * 20
RawSize = ShoeSize + BaseSize
ShoeSizeCode = int(RawSize)
url = 'http://www.adidas.com.au/' + str(model) + '.html?forceSelSize=' + str(model) + '_' + str(ShoeSizeCode)
print(url)
return url
def CheckStock(url):
# These methods DO NOT work if desired elements are rendered with JavaScipt
# hdr = {'User-Agent': 'Mozilla/5.0'}
# req = requests.get(url, headers=hdr)\
req = driver.page_source
page = bs4.BeautifulSoup(req, "html.parser")
title = page.title.string
print(title)
SizeSelectorRaw = page.find("select", attrs={"aria-label": "Select size"})
SizeSelectorRaw = SizeSelectorRaw.find_all("option")
ArrayOfSizes = []
i = 0
while i < len(SizeSelectorRaw):
Text = SizeSelectorRaw[i].get_text()
print(Text)
ArrayOfSizes.append(Text)
i += 1
ArrayOfSizes.remove("")
print("Sizes Available:")
print(ArrayOfSizes)
def selectQuantity(url, no):
dropdownSelector = "#app > div > div:nth-child(1) > div.empty_pdp_space_reserver___IFQzq > div > div.hero___2YuNz > div.container.hero_container___nM-YT > div.order_information___z33d1.col-s-12.col-l-8.col-hg-7 > div > div > form > div.row.no-gutters.size_quantity_row___1pgH7 > div.quantity_selector___1qWYG.col-s-3 > div > div"
selectionSelectorAddon = " > div.gl-dropdown__options > ul > li:nth-child"
quantityDrop = driver.find_element_by_css_selector(dropdownSelector)
quantityDrop.click()
num = str(no)
quantitySelect = driver.find_element_by_css_selector(dropdownSelector + selectionSelectorAddon + "(" + num + ")")
print("Selecting quantity...")
quantitySelect.click()
def findIframe():
driver.switch_to.frame(3)
def recaptcha():
# Click on checkbox to initate challenge
try:
waitele = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#g-recaptcha > div > div > iframe")))
finally:
captchaButton = driver.find_element_by_css_selector("#g-recaptcha > div > div > iframe")
captchaButton.click()
# Switch to challange iframe
print("Scrolling...")
driver.execute_script("window.scrollTo(0, 270)")
findIframe()
try:
print("Searching for audio button...")
waitele = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, "//*[@id='recaptcha-audio-button']")))
finally:
print("Found! Clicking...")
audioChallenge = driver.find_element_by_class_name("rc-button-audio")
audioChallenge.click()
url = driver.current_url()
downloadFile(url)
def addToCart():
recaptcha()
try:
waitele = WebDriverWait(driver, 3).until(EC.element_to_be_clickable((By.CSS_SELECTOR, '#app > div > div:nth-child(1) > div.empty_pdp_space_reserver___IFQzq > div > div.hero___2YuNz > div.container.hero_container___nM-YT > div.order_information___z33d1.col-s-12.col-l-8.col-hg-7 > div > div > form > div.row.no-gutters.add_to_bag_container___16ts0 > button')))
finally:
addToCart = driver.find_element_by_css_selector('#app > div > div:nth-child(1) > div.empty_pdp_space_reserver___IFQzq > div > div.hero___2YuNz > div.container.hero_container___nM-YT > div.order_information___z33d1.col-s-12.col-l-8.col-hg-7 > div > div > form > div.row.no-gutters.add_to_bag_container___16ts0 > button')
addToCart.click()
print("Adding to Cart...")
print("Successful!")
try:
print("Waiting for element to appear...")
waitele = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "#modal-root > div.gl-modal.gl-modal--regular.gl-modal--mobile-full.gl-modal--active.glass-modal___1JNyq > div.gl-modal__dialog.no-gutters.col-l-12 > div > div > div > div.row.no-gutters.gl-hidden-s-m.undefined > div.col-l-12 > div > a.gl-cta.gl-cta--primary.gl-cta--full-width.gl-vspacing-s")))
finally:
viewbag = driver.find_element_by_css_selector("#modal-root > div.gl-modal.gl-modal--regular.gl-modal--mobile-full.gl-modal--active.glass-modal___1JNyq > div.gl-modal__dialog.no-gutters.col-l-12 > div > div > div > div.row.no-gutters.gl-hidden-s-m.undefined > div.col-l-12 > div > a.gl-cta.gl-cta--primary.gl-cta--full-width.gl-vspacing-s")
print("Element found! Clicking...")
viewbag.click()
def paypal():
checkoutPaypal = driver.find_element_by_css_selector("#content > div.cart-wrapper.row > div.container.clearfix > div.cart-right.col-4.co-delivery-right.vertical-callout-container.rbk-mobile-shadow-block > div.mobile-cart-summary.rbk-mobile-shadow-block.clear.clearfix > div > div.co-actions.cart-bottom-actions > a > button")
checkoutPaypal.click()
def card():
checkoutCard = driver.find_element_by_css_selector("")
def payment(method):
if method == "paypal":
print("Wise choice :)")
paypal()
if method == "card":
card()
def Main(model, size, quantity):
URL = urlgen(model, size)
driver.get(URL)
print("Loading page...")
#CheckStock(URL)
selectQuantity(URL, quantity)
addToCart()
method = input("Payment Method? ")
payment(method)
print("End of script. Closing Driver...")
driver.quit()
cwd = os.getcwd()
chrome_options = Options()
chrome_options.add_argument("--window-size=1920,1080")
driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=cwd + '/chromedriver')
productCode = "G27805" #input("Input Product Code: ")
Size = 8 #input("Input Size of Shoe: ")
Amount = 1 #input("Input quantity of pairs: ")
try:
Main(productCode, Size, Amount)
except:
print("Error occurred, closing Driver.")
driver.quit()
|
from BaseUserAPITest import BaseUserAPITest
from opentera.db.models.TeraParticipantGroup import TeraParticipantGroup
from opentera.db.models.TeraParticipant import TeraParticipant
from opentera.db.models.TeraSession import TeraSession
import datetime
class UserQueryParticipantGroupTest(BaseUserAPITest):
test_endpoint = '/api/user/groups'
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def test_get_endpoint_no_auth(self):
with self._flask_app.app_context():
response = self.test_client.get(self.test_endpoint)
self.assertEqual(401, response.status_code)
def test_get_endpoint_invalid_http_auth(self):
with self._flask_app.app_context():
response = self._get_with_user_http_auth(self.test_client)
self.assertEqual(401, response.status_code)
def test_get_endpoint_invalid_token_auth(self):
with self._flask_app.app_context():
response = self._get_with_user_token_auth(self.test_client)
self.assertEqual(401, response.status_code)
def test_query_specific_group_as_admin(self):
with self._flask_app.app_context():
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params='id_group=1')
self.assertEqual(response.status_code, 200)
json_data = response.json
self.assertEqual(len(json_data), 1)
self._checkJson(json_data=json_data[0], minimal=False)
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params='id_group=1&list=1')
self.assertEqual(response.status_code, 200)
json_data = response.json
self.assertEqual(len(json_data), 1)
self._checkJson(json_data=json_data[0], minimal=True)
def test_query_specific_group_as_user(self):
with self._flask_app.app_context():
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params='id_group=1')
self.assertEqual(response.status_code, 200)
json_data = response.json
self.assertEqual(len(json_data), 0)
response = self._get_with_user_http_auth(self.test_client, username='user3', password='user3',
params='id_group=1')
self.assertEqual(response.status_code, 200)
json_data = response.json
self.assertEqual(len(json_data), 1)
self._checkJson(json_data=json_data[0], minimal=False)
response = self._get_with_user_http_auth(self.test_client, username='user3', password='user3',
params='id_group=1&list=1')
self.assertEqual(response.status_code, 200)
json_data = response.json
self.assertEqual(len(json_data), 1)
self._checkJson(json_data=json_data[0], minimal=True)
def test_query_for_project_as_admin(self):
with self._flask_app.app_context():
response = self._get_with_user_http_auth(self.test_client, username='admin', password='admin',
params='id_project=1')
self.assertEqual(response.status_code, 200)
json_data = response.json
target_count = len(TeraParticipantGroup.get_participant_group_for_project(1))
self.assertEqual(len(json_data), target_count)
for group_data in json_data:
self._checkJson(json_data=group_data, minimal=False)
def test_query_for_project_as_user(self):
with self._flask_app.app_context():
response = self._get_with_user_http_auth(self.test_client, username='user4', password='user4',
params='id_project=1')
self.assertEqual(response.status_code, 200)
json_data = response.json
self.assertEqual(len(json_data), 0)
response = self._get_with_user_http_auth(self.test_client, username='user3', password='user3',
params='id_project=1')
self.assertEqual(response.status_code, 200)
json_data = response.json
target_count = len(TeraParticipantGroup.get_participant_group_for_project(1))
self.assertEqual(len(json_data), target_count)
for group_data in json_data:
self._checkJson(json_data=group_data, minimal=False)
def test_post_and_delete(self):
with self._flask_app.app_context():
json_data = {
'participant_group_name': 'Testing123',
}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=json_data)
self.assertEqual(response.status_code, 400, msg="Missing group struct")
json_data = {
'participant_group': {
'participant_group_name': 'Testing123'
}
}
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=json_data)
self.assertEqual(response.status_code, 400, msg="Missing id_participant_group")
json_data['participant_group']['id_participant_group'] = 0
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=json_data)
self.assertEqual(response.status_code, 400, msg="Missing id_project")
json_data['participant_group']['id_project'] = 1
response = self._post_with_user_http_auth(self.test_client, username='user4', password='user4',
json=json_data)
self.assertEqual(response.status_code, 403, msg="No access to project")
response = self._post_with_user_http_auth(self.test_client, username='user3', password='user3',
json=json_data)
self.assertEqual(response.status_code, 200, msg="Post new") # All ok now!
part_data = response.json[0]
self._checkJson(part_data)
group_id = part_data['id_participant_group']
# Test update
json_data = {
'participant_group': {
'id_participant_group': group_id,
'id_project': 3
}
}
response = self._post_with_user_http_auth(self.test_client, username='user3', password='user3',
json=json_data)
self.assertEqual(response.status_code, 403, msg="No access to new project")
json_data['participant_group']['id_project'] = 2
response = self._post_with_user_http_auth(self.test_client, username='user4', password='user4',
json=json_data)
self.assertEqual(response.status_code, 403, msg="No access to group")
response = self._post_with_user_http_auth(self.test_client, username='admin', password='admin',
json=json_data)
self.assertEqual(response.status_code, 200, msg="Update completed")
part_data = response.json[0]
self._checkJson(part_data)
self.assertEqual(part_data['id_project'], 2)
# Test delete
part1 = TeraParticipant()
part1.from_json({'participant_name': 'Test Part #1',
'id_participant_group': group_id,
'id_project': 2})
TeraParticipant.insert(part1)
part2 = TeraParticipant()
part2.from_json({'participant_name': 'Test Part #2',
'id_participant_group': group_id,
'id_project': 2})
TeraParticipant.insert(part2)
part2_session = TeraSession()
part2_session.from_json({'id_session_type': 1,
'session_name': 'Session #1',
'session_start_datetime': datetime.datetime.now(),
'session_status': 0,
'id_creator_participant': part2.id_participant
}
)
TeraSession.insert(part2_session)
response = self._delete_with_user_http_auth(self.test_client, username='admin', password='admin',
params={'id': group_id})
self.assertEqual(response.status_code, 500, msg="Can't delete, has participants with sessions")
TeraSession.delete(part2_session.id_session)
response = self._delete_with_user_http_auth(self.test_client, username='user4', password='user4',
params={'id': group_id})
self.assertEqual(response.status_code, 403, msg="Can't delete, forbidden")
id_part1 = part1.id_participant
id_part2 = part2.id_participant
response = self._delete_with_user_http_auth(self.test_client, username='admin', password='admin',
params={'id': group_id})
self.assertEqual(response.status_code, 200, msg="Delete OK")
# Check that all participants were also deleted
self.assertEqual(TeraParticipant.get_participant_by_id(id_part1), None)
self.assertEqual(TeraParticipant.get_participant_by_id(id_part2), None)
def _checkJson(self, json_data, minimal=False):
self.assertGreater(len(json_data), 0)
self.assertTrue(json_data.__contains__('id_participant_group'))
self.assertTrue(json_data.__contains__('id_project'))
self.assertTrue(json_data.__contains__('participant_group_name'))
if minimal:
self.assertTrue(json_data.__contains__('group_participant_count'))
self.assertFalse(json_data.__contains__('project_name'))
else:
self.assertFalse(json_data.__contains__('group_participant_count'))
self.assertTrue(json_data.__contains__('project_name'))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = '太阳黑子活动折线图'
__author__ = 'zhangjingjun'
__mtime__ = '2017/11/3'
# ----------Dragon be here!----------
┏━┓ ┏━┓
┏━┛ ┻━━━━━━┛ ┻━━┓
┃ ━ ┃
┃ ━┳━┛ ┗━┳━ ┃
┃ ┻ ┃
┗━━━┓ ┏━━━━┛
┃ ┃神兽保佑
┃ ┃永无BUG!
┃ ┗━━━━━━━━━┓
┃ ┣━┓
┃ ┏━┛
┗━━┓ ┓ ┏━━━┳━┓ ┏━┛
┃ ┫ ┫ ┃ ┫ ┫
┗━┻━┛ ┗━┻━┛
"""
import requests
from reportlab.graphics.shapes import *
from reportlab.lib.colors import purple, PCMYKColor, black, pink, green, blue
from reportlab.graphics.charts.lineplots import LinePlot
from reportlab.graphics.charts.legends import LineLegend
from reportlab.graphics import renderPDF
URL = "http://services.swpc.noaa.gov/text/predicted-sunspot-radio-flux.txt"
COMMENT_CHARS="#:"
#画布大小
drawing = Drawing(500,500)
data=[]
r=requests.get(URL)
c=r.text
cont= c.split("\n")
#print(cont)
for line in cont:
if line.strip()!="" and (not line.isspace()) and not (line[0] in COMMENT_CHARS):
data.append([float(n) for n in line.split()])
print(data)
swo = [row[2] for row in data]
print(swo)
ri = [row[3] for row in data]
times = [row[0] + row[1] / 12.0 for row in data]
lp = LinePlot()
#数据绘画的起点,距离画布的坐下角的相对位置
lp.x = 100
lp.y = 200
#x轴和y轴的长度
lp.height = 125
lp.width = 300
lp.data = []
lp.lines[0].strokeColor = colors.blue
lp.lines[1].strokeColor = colors.yellow
#lp.lines[0].strokeColor = PCMYKColor(0,100,100,40,alpha=100)
#lp.lines[1].strokeColor = PCMYKColor(100,0,90,50,alpha=100)
#drawing.colorNamePairs = [(PCMYKColor(0,100,100,40,alpha=100), 'Bovis Homes'), (PCMYKColor(100,0,90,50,alpha=100), 'HSBC Holdings')]
swo_2=zip(times,swo)
ri_2=zip(times,ri)
l=[]
w=[]
for i in swo_2:
l.append(i)
for j in ri_2:
w.append(j)
lp.data.append(l)
lp.data.append(w)
print(lp.data)
drawing.add(lp)
try:
# 图标文字的位置,图标名,字体大小,颜色
drawing.add(String(250, 150, 'Sunspots',fontSize=14, fillColor=colors.red))
renderPDF.drawToFile(drawing, 'report2.pdf', 'Sunspots')
except PermissionError:
print("文件被打开,请关闭文件重新生成")
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-06 17:58
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('web_configurations', '0015_auto_20171006_1257'),
]
operations = [
migrations.RemoveField(
model_name='cacheconfiguration',
name='nombre_cache_clientes',
),
migrations.RemoveField(
model_name='cacheconfiguration',
name='tiempo_cache_clientes',
),
]
|
# -*- coding:utf8 -*-
# 作者 yanchunhuo
# 创建时间 2018/01/19 22:36
# github https://github.com/yanchunhuo
from base.web_ui.demoProject.web_ui_demoProject_client import WEB_UI_DemoProject_Client
from page_objects.web_ui.demoProject.pages.indexPage import IndexPage
from assertpy import assert_that
class TestIndex:
def setup_class(self):
self.demoProjectClient = WEB_UI_DemoProject_Client()
self.searchPage=IndexPage(self.demoProjectClient.browserOperator).search_kw('apitest')
def test_search_kw(self):
self.searchPage.search_kw('apitest12')
assert_that('apitest12_百度搜索').is_equal_to(self.demoProjectClient.browserOperator.getTitle())
def teardown_class(self):
self.demoProjectClient.browserOperator.close() |
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDHarvester import DQMEDHarvester
# Control trigger
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
SUSY_HLT_HT200_alphaT0p51 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
caloJetCollection = cms.InputTag("ak4CaloJets"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT200_PFAlphaT0p51_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT150CaloAlphaT0p51', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT200PFAlphaT0p51', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
caloHtThrTurnon = cms.untracked.double(200),
caloAlphaTThrTurnon = cms.untracked.double(0.53),
pfHtThrTurnon = cms.untracked.double(225),
pfAlphaTThrTurnon = cms.untracked.double(0.53),
)
# Primary triggers
SUSY_HLT_HT200_alphaT0p57 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
caloJetCollection = cms.InputTag("ak4CaloJets"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT200_DiPFJetAve90_PFAlphaT0p57_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT150CaloAlphaT0p54', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT200PFAlphaT0p57', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
caloHtThrTurnon = cms.untracked.double(200),
caloAlphaTThrTurnon = cms.untracked.double(0.61),
pfHtThrTurnon = cms.untracked.double(225),
pfAlphaTThrTurnon = cms.untracked.double(0.65),
)
SUSY_HLT_HT250_alphaT0p55 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT250_DiPFJetAve90_PFAlphaT0p55_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT225CaloAlphaT0p53', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT250PFAlphaT0p55', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
pfAlphaTThrTurnon = cms.untracked.double(0.6),
pfHtThrTurnon = cms.untracked.double(275),
caloAlphaTThrTurnon = cms.untracked.double(0.57),
caloHtThrTurnon = cms.untracked.double(250),
)
SUSY_HLT_HT300_alphaT0p53 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT300_DiPFJetAve90_PFAlphaT0p53_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT275CaloAlphaT0p525', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT300PFAlphaT0p53', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
pfAlphaTThrTurnon = cms.untracked.double(0.56),
pfHtThrTurnon = cms.untracked.double(325),
caloAlphaTThrTurnon = cms.untracked.double(0.55),
caloHtThrTurnon = cms.untracked.double(300),
)
SUSY_HLT_HT350_alphaT0p52 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),#ak4PFJetsCHS
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT350_DiPFJetAve90_PFAlphaT0p52_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT325CaloAlphaT0p515', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT350PFAlphaT0p52', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
pfAlphaTThrTurnon = cms.untracked.double(0.55),
pfHtThrTurnon = cms.untracked.double(375),
caloAlphaTThrTurnon = cms.untracked.double(0.53),
caloHtThrTurnon = cms.untracked.double(350),
)
SUSY_HLT_HT400_alphaT0p51 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT400_DiPFJetAve90_PFAlphaT0p51_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT375CaloAlphaT0p51', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT400PFAlphaT0p51', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
pfAlphaTThrTurnon = cms.untracked.double(0.54),
pfHtThrTurnon = cms.untracked.double(425),
caloAlphaTThrTurnon = cms.untracked.double(0.53),
caloHtThrTurnon = cms.untracked.double(400),
)
# Backup triggers
SUSY_HLT_HT200_alphaT0p63 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
caloJetCollection = cms.InputTag("ak4CaloJets"),
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT200_DiPFJetAve90_PFAlphaT0p63_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT175CaloAlphaT0p59', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT200PFAlphaT0p63', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
caloHtThrTurnon = cms.untracked.double(200),
caloAlphaTThrTurnon = cms.untracked.double(0.61),
pfHtThrTurnon = cms.untracked.double(225),
pfAlphaTThrTurnon = cms.untracked.double(0.65),
)
SUSY_HLT_HT250_alphaT0p58 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT250_DiPFJetAve90_PFAlphaT0p58_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT225CaloAlphaT0p55', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT250PFAlphaT0p58', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
pfAlphaTThrTurnon = cms.untracked.double(0.6),
pfHtThrTurnon = cms.untracked.double(275),
caloAlphaTThrTurnon = cms.untracked.double(0.57),
caloHtThrTurnon = cms.untracked.double(250),
)
SUSY_HLT_HT300_alphaT0p54 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT300_DiPFJetAve90_PFAlphaT0p54_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT250CaloAlphaT0p53', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT300PFAlphaT0p54', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
pfAlphaTThrTurnon = cms.untracked.double(0.56),
pfHtThrTurnon = cms.untracked.double(325),
caloAlphaTThrTurnon = cms.untracked.double(0.55),
caloHtThrTurnon = cms.untracked.double(300),
)
SUSY_HLT_HT350_alphaT0p53 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),#ak4PFJetsCHS
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT350_DiPFJetAve90_PFAlphaT0p53_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT300CaloAlphaT0p51', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT350PFAlphaT0p53', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
pfAlphaTThrTurnon = cms.untracked.double(0.55),
pfHtThrTurnon = cms.untracked.double(375),
caloAlphaTThrTurnon = cms.untracked.double(0.53),
caloHtThrTurnon = cms.untracked.double(350),
)
SUSY_HLT_HT400_alphaT0p52 = DQMEDAnalyzer('SUSY_HLT_alphaT',
trigSummary = cms.InputTag("hltTriggerSummaryAOD",'', 'HLT'), #to use with test sample
pfJetCollection = cms.InputTag("ak4PFJetsCHS"),
caloJetCollection = cms.InputTag("ak4CaloJets"),
TriggerResults = cms.InputTag('TriggerResults','','HLT'), #to use with test sample
HLTProcess = cms.string('HLT'),
TriggerPath = cms.string('HLT_PFHT400_DiPFJetAve90_PFAlphaT0p52_v'),
TriggerPathAuxiliaryForHadronic = cms.string('HLT_IsoMu22_eta2p1_v'),
TriggerPreFilter = cms.InputTag('hltHT325CaloAlphaT0p51', '', 'HLT'),
TriggerFilter = cms.InputTag('hltPFHT400PFAlphaT0p52', '', 'HLT'),
PtThrJet = cms.untracked.double(40.0),
EtaThrJet = cms.untracked.double(3.0),
pfAlphaTThrTurnon = cms.untracked.double(0.54),
pfHtThrTurnon = cms.untracked.double(425),
caloAlphaTThrTurnon = cms.untracked.double(0.53),
caloHtThrTurnon = cms.untracked.double(400),
)
SUSYoHLTalphaToPOSTPROCESSING = DQMEDHarvester("DQMGenericClient",
subDirs = cms.untracked.vstring(
"HLT/SUSYBSM/HLT_PFHT200_DiPFJetAve90_PFAlphaT0p51_v"
"HLT/SUSYBSM/HLT_PFHT200_DiPFJetAve90_PFAlphaT0p57_v",
"HLT/SUSYBSM/HLT_PFHT250_DiPFJetAve90_PFAlphaT0p55_v",
"HLT/SUSYBSM/HLT_PFHT300_DiPFJetAve90_PFAlphaT0p53_v",
"HLT/SUSYBSM/HLT_PFHT350_DiPFJetAve90_PFAlphaT0p52_v",
"HLT/SUSYBSM/HLT_PFHT400_DiPFJetAve90_PFAlphaT0p51_v",
"HLT/SUSYBSM/HLT_PFHT200_DiPFJetAve90_PFAlphaT0p63_v",
"HLT/SUSYBSM/HLT_PFHT250_DiPFJetAve90_PFAlphaT0p58_v",
"HLT/SUSYBSM/HLT_PFHT300_DiPFJetAve90_PFAlphaT0p54_v",
"HLT/SUSYBSM/HLT_PFHT350_DiPFJetAve90_PFAlphaT0p53_v",
"HLT/SUSYBSM/HLT_PFHT400_DiPFJetAve90_PFAlphaT0p52_v",
),
verbose = cms.untracked.uint32(2), # Set to 2 for all messages
resolution = cms.vstring(""),
efficiency = cms.vstring(
"pfHtTurnOn_eff 'Turn-on vs PF HT; HT (GeV); #epsilon' pfHtTurnOn_num pfHtTurnOn_den",
"pfAlphaTTurnOn_eff 'Turn-on vs PF alpha T; AlphaT (GeV); #epsilon' pfAlphaTTurnOn_num pfAlphaTTurnOn_den",
# "caloHtTurnOn_eff 'Turn-on vs Calo HT; HT (GeV); #epsilon' caloHtTurnOn_num caloHtTurnOn_den",
# "caloAlphaTTurnOn_eff 'Turn-on vs Calo alpha T; AlphaT (GeV); #epsilon' caloAlphaTTurnOn_num caloAlphaTTurnOn_den",
)
)
|
from subprocess import Popen, TimeoutExpired, PIPE
import shlex
def run_cmd(args, input=None, timeout=10):
stdin = PIPE if input else None
with Popen(shlex.split(args), stdin=stdin, stdout=PIPE, stderr=PIPE,
encoding='utf-8') as proc:
try:
# proc.wait(timeout=10)
outs, errs = proc.communicate(input=input, timeout=timeout)
except TimeoutExpired as e:
print('TimeoutExpired')
proc.kill()
raise e
# return str(proc.stdout.read(), 'utf-8').rstrip('\n')
return outs.rstrip('\n'), errs
def sh(cmds, timeout=10, get_err=True):
input = None
li_errs = []
for args in cmds.split(' | '):
try:
input, errs = run_cmd(args, input, timeout=timeout)
except TimeoutExpired as e:
raise e
li_errs += errs,
if get_err:
return input, ''.join(li_errs)
return input
def send_notif(title, msg, icon=None):
icon = icon if icon else 'media-memory'
sh("notify-send --icon={icon} '{title}' '{msg}'"
.format(icon=icon, title=title, msg=msg))
def get_free_mem():
free_go = int(sh("free -w -m", get_err=False)
.split('\n')[1].split(' ')[-1]) / 1000
return free_go
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayOpenPublicLabelUserQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayOpenPublicLabelUserQueryResponse, self).__init__()
self._label_ids = None
@property
def label_ids(self):
return self._label_ids
@label_ids.setter
def label_ids(self, value):
self._label_ids = value
def parse_response_content(self, response_content):
response = super(AlipayOpenPublicLabelUserQueryResponse, self).parse_response_content(response_content)
if 'label_ids' in response:
self.label_ids = response['label_ids']
|
from FOL.Formulae import Formulae
from FOL.Alphabet import l_inference
class Sequence:
def __init__(self, antecedent, succedent):
for formula in antecedent:
assert isinstance(formula, Formulae)
for formula in succedent:
assert isinstance(formula, Formulae)
self.antecedent = antecedent # left part
self.succedent = succedent # right part
def text(self):
return ', '.join([formula.text() for formula in self.antecedent]) + ' ' + l_inference + ' ' +\
', '.join([formula.text() for formula in self.succedent])
def print(self):
print(self.text())
|
import os
from dotenv import load_dotenv
import event
import asyncio
from asgiref.sync import async_to_sync
def print_camion():
print("camion")
def print_something(something: str):
print(something)
event.connect("camion_registered", print_camion)
event.connect("camion_registered", print_something)
async def trigger_camion_registered():
await event.trigger("camion_registered", "something about trucks")
print("async_to_sync ...")
async_to_sync(trigger_camion_registered)()
def token_print():
load_dotenv()
print("TOKEN", os.getenv("TOKEN"))
def run():
token_print() |
from Base.Base import Base
from Page.UIElements import UIElements
class PersonPage(Base):
def __init__(self, driver):
Base.__init__(self, driver)
def get_shop_cart(self):
"""获取优惠券文本内容"""
# timeout给10秒 在取结果元素的时候,降低失败等待时间
return self.get_element(UIElements.person_shop_cart_id, timeout=10).text
def click_setting_btn(self):
"""点击设置按钮"""
self.click_element(UIElements.person_setting_btn_id)
|
#the module NLTK (Natural Language Toolkit) is used for natural language processing
import nltk
#In many languages, words appear in several inflected forms. For example, in English, the verb 'to walk'
#may appear as 'walk', 'walked', 'walks' or 'walking'. The base form, 'walk', that one might look up in
#a dictionary, is called the lemma for the word. Lemmatization attempts to select the correct lemma depending
#on the context.
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
#JSON (JavaScript Object Notation) is a lightweight open standard data-interchange file format, that uses
#human readable text for transmitting data. Although you may conclude from the name that it's a Javascript
#data format. Well, not exactly, JSON is a text format that is completely language independent and uses
#conventions that are familiar of most popular programming languages such as Python.
import json
#Pickling is used to store python objects. It is the process of converting a Python object (lists, dictionaries, tuples, etc)
#into byte streams that can be saved to disks or can be transferred over a network. In de-serialization or unpickling the byte
#streams saved on file contains the necessary information to reconstruct the original python object.
import pickle
#NumPy is a library for the Python programming language, adding support for large, multi-dimensional arrays and matrices,
#along with a large collection of high-level mathematical functions to operate on these arrays.
import numpy as np
#Keras is an open-source library that provides a Python interface for artificial neural networks. Keras acts as an
#interface for the TensorFlow library. It is used to create a deep learning model for both regression and classification problems.
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
import random
words=[]
classes = []
documents = []
ignore_words = ['?', '!']
data_file = open('intents.json').read()
intents = json.loads(data_file)
for intent in intents['intents']:
for pattern in intent['patterns']:
#the sentences are split into words using the method word_tokenize()
w = nltk.word_tokenize(pattern)
words.extend(w)
#add documents in the corpus
documents.append((w, intent['tag']))
# add to our classes list
if intent['tag'] not in classes:
classes.append(intent['tag'])
#Here we lemmaztize and lower each word and remove duplicates.
words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_words]
words = sorted(list(set(words)))
# sort classes
classes = sorted(list(set(classes)))
# documents = combination between patterns and intents
print (len(documents), "documents")
# classes = intents
print (len(classes), "classes", classes)
# words = all words, vocabulary
print (len(words), "unique lemmatized words", words)
#The steps for pickling in python:
#1. Import pickle module.
#2. Use pickle.dump(object, filename) method to save the object into file <filename>: this will save the object in this file in byte format.
#3. Use pickle.load(filename): to load back python object from the file where it was dumped before.
#Here the files are opened in write-bytes “wb” mode
pickle.dump(words,open('words.pkl','wb'))
pickle.dump(classes,open('classes.pkl','wb'))
# create our training data
training = []
# create an empty array for our output
output_empty = [0] * len(classes)
# training set, bag of words for each sentence
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# lemmatize each word - create base word, in attempt to represent related words
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]
# create our bag of words array with 1, if word match found in current pattern
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
# output is a '0' for each tag and '1' for current tag (for each pattern)
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
# create train and test lists. X - patterns, Y - intents
train_x = list(training[:,0])
train_y = list(training[:,1])
print("Training data created")
#Instantiate the keras model. The sequential API is the easiest model to build and run in Keras. A sequential model allows us
#to create models layer by layer in a step by step fashion. The main task of the model is to arrange the layers of the Keras in
#sequential order. In this model, the data flows from one layer to another layer. The flow of data is continued until the data reaches the final layer.
model = Sequential()
#Create a model with 3 layers
#Layer 1: Input layer with 128 neurons
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
#Dropout layers are commonly used to prevent overfitting
model.add(Dropout(0.5))
#Layer 2: Hidden layer with 64 neurons
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
#Layer 3: Output layer contains number of neurons equal to number of intents to predict output intent with softmax (an
#activation function that represents multiple outcomes for a classification problem)
model.add(Dense(len(train_y[0]), activation='softmax'))
# Compile model
#Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#to train the model we need to call its fit() method
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
#saving the model
model.save('chatbot_model.h5', hist)
print("model created")
|
import sys
def test():
a = [21, 2]
res = largest_number(a)
print('===== {}'.format(res))
assert int(res) == 221
def is_greater_or_equal(num1, num2):
return int(str(num1) + str(num2)) >= int(str(num2) + str(num1))
def largest_number(a):
res = ""
while a:
max_num = 0
for curr_num in a:
if is_greater_or_equal(curr_num, max_num):
max_num = curr_num
res += str(max_num)
a.remove(max_num)
return res
def main():
input = sys.stdin.read()
data = input.split()
a = data[1:]
print(largest_number(a))
if __name__ == '__main__':
# test()
main() |
#The code for G(a). Author:Alapan Das
import math as m
def primality(p):
count=1
for i in range(2,p):
if p%i!=0:
count=count*1
else:
count=count*0
break
return count
def factor(n):
l=[]
a=[]
for P in range(3,int(n/2)+1):
if primality(P)==1:
if n%P==0:
l.append(P)
for r in range(1, 5):
s=m.pow(P,r)
if n%s!=0:
a.append(r-1)
break
else:
continue
else:
continue
else:
continue
T=list(zip(l,a))
return T
S=[]
def G(a):
for k in range(1, 50):
s=m.pow(2, k)
if a%s!=0:
r0=k-1
break
else:
continue
if r0==0:
z=0
else:
g=1
T=factor(a)
r=len(T)
for i in range(0, r):
g=g*(T[i][1]+1-(T[i][1])/(T[i][0]))
z=a*(r0/2*g-1)
return z
K=[]
L=[]
for k in range(1, 1000):
K.append(2*k)
s=round(G(2*k))
L.append(s)
M=list(zip(K,L))
print(M)
|
import matplotlib.pyplot as plt
#绘制点模型,定义文本框和箭头格式
#分支节点,boxstyle是样式,fc是不透明度
decisionNode = dict(boxstyle = "sawtooth", fc ="0.8")
#叶节点
leafNode = dict(boxstyle="round4",fc= "0.8")
arrow_args = dict(arrowstyle="<-")
#给createPlot子节点绘图添加注释。具体解释:nodeTxt:节点显示的内容;xy:起点位置;xycoords/xytext:坐标说明?;xytext:显示字符的位置
#va/ha:显示的位置?;bbox:方框样式;arrow:箭头参数,字典类型数据
def plotNode(nodeText, centerPt,parentPt, nodeType):
createPlot.axl.annotate(nodeText,xy=parentPt,xycoords='axes fraction',xytext = centerPt,textcoords= 'axes fraction',va = "center",ha ="center",bbox=nodeType,arrowprops=arrow_args)
def createPlot(inTree):
fig = plt.figure(1,facecolor='white')
fig.clf()
#去除标尺用的
axprops = dict(xticks =[],yticks =[])
#frmeon:边框
createPlot.axl = plt.subplot(111,frameon = False, **axprops)
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW;plotTree.yOff = 1.0;
plotTree(inTree,(0.5,1.0),'')
plt.show()
#获取叶子节点的数目
def getNumLeafs(myTree):
numLeafs = 0
firstStr = list(myTree.keys())[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
#判断节点类型是否为字典(有子节点)
if type(secondDict[key]).__name__ =='dict':
numLeafs += getNumLeafs(secondDict[key])
else:numLeafs += 1
return numLeafs
#获取树深
def getTreeDepth(myTree):
maxDepth = 0
firstStr = list(myTree.keys())[0]
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else:thisDepth = 1
if thisDepth > maxDepth:maxDepth = thisDepth
return maxDepth
#测试树模型
def retrieveTree(i):
listOfTrees = [{'no surfacing':{0:'no',1:{'flippers':{0:'no',1:'yes'}}}},
{'no surfacing':{0:'no',1:{'flippers':{0:{'head':{0:'no',1:'yes'}},1:'no'}}}}]
return listOfTrees[i]
#在父子节点之间填充文本信息
def plotMidText(cntrPt,parenPt,txtString):
xMid = (parenPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parenPt[1]-cntrPt[1])/2.0 + cntrPt[1]
#指定位置绘制文本
createPlot.axl.text(xMid,yMid,txtString)
def plotTree(myTree,parentPt,nodeTxt):
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
firstStr = list(myTree.keys())[0]
#一开始在中心位置,而中心文本相对于x有半个身位偏移
cntrPt = (plotTree.xOff +(1.0 + float(numLeafs))/2.0/plotTree.totalW,plotTree.yOff)
#中间属性值
plotMidText(cntrPt,parentPt,nodeTxt)
plotNode(firstStr,cntrPt,parentPt,decisionNode)
secondDict = myTree[firstStr]
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
#迭代更新
plotTree(secondDict[key],cntrPt,str(key))
else:
#将x偏移右移一个标准身位
plotTree.xOff = plotTree.xOff + 1.0/plotTree.totalW
plotNode(secondDict[key],(plotTree.xOff,plotTree.yOff),cntrPt,leafNode)
plotMidText((plotTree.xOff,plotTree.yOff),cntrPt,str(key))
plotTree.yOff = plotTree.yOff +1.0/plotTree.totalD
|
class MyQueue:
def __init__(self):
self.stack_in = []
self.stack_out = []
def push(self, x: int) -> None:
self.stack_in.append(x)
def pop(self) -> int:
if self.empty():
return None
if self.stack_out:
return self.stack_out.pop()
else:
while self.stack_in:
self.stack_out.append(self.stack_in.pop())
return self.stack_out.pop()
def peek(self) -> int:
ans = self.pop()
self.stack_out.append(ans)
return ans
def empty(self) -> bool:
return not (self.stack_in or self.stack_out)
|
import datetime
import threading
import Queue
import json
import os
import thread
from server_config import *
import database_handler
import hashlib
import time
def read_log_file(filename):
content = "Couldn't load log file"
try:
with open(filename, 'r') as thefile:
content = thefile.read()
except Exception as e:
LOGGER.error("Couldn't read log file")
finally:
return content
class AdminAuthentication:
def __init__(self, client, db):
"""
Authenticates an administrator
:param client:
:type client: ClientHandler
:param db:
:type db: database_handler.DatabaseHandler
"""
self.client = client
self.credentials = None
self.db = db
def run(self, message):
"""
Runs the appropriate sequence for admin authentication
:param message: Admin message containing credentials
"""
if self.parse_response(message):
if self.validate_credentials():
self.client.change_auth_status(self.client.auth_statuses["auth_complete"])
else:
LOGGER.warning("Username, password or unique id incorrect at client: {client}".format(
client=str(self.client)))
return
else:
LOGGER.warning("Error receiving admin credentials: {client}".format(
client=str(self.client)))
return
def parse_response(self, response):
"""
Parses the client credentials into a status code and a json object.
:param response: Full response from client (contains status code and json data)
:type response: str
:return: If response is ok and parsed successfully
:rtype: bool
"""
error_msg = str(PROTOCOL_STATUS_CODES["error"]) + " Error in authentication(protocol error), Try again."
if response.find(" ") == -1:
self.client.send(error_msg)
return False
status, credentials = response.split(" ", 1)
if not status.isdigit() and int(status) == 1:
self.client.send(error_msg)
return False
try:
credentials_dict = json.loads(credentials)
except Exception as e:
self.client.send(error_msg)
return False
if "username" not in credentials_dict or "password" not in credentials_dict:
self.client.send(error_msg)
return False
self.credentials = credentials_dict
return True
def validate_credentials(self):
"""
Validates the admin credentials (uId, username, password) with the database
:return: If credentials are matching the database
:rtype: bool
"""
admin_info = self.db.get_data("admins", "uId", self.credentials["uId"])
response_text = "Username or password incorrect, try again."
if admin_info is None or len(admin_info) == 0:
response_text = "You cannot login as administrator from this computer. (unique id incorrect)"
self.client.send(
str(PROTOCOL_STATUS_CODES["error"]) + " Error in authentication, {message}".format(
message=response_text))
return
admin_info = admin_info[0]
salt = admin_info["salt"]
db_pass = admin_info["password"]
db_username = admin_info["username"]
password = self.credentials["password"]
new_hashed = hashlib.sha512(password + salt)
if db_pass == new_hashed.hexdigest() and db_username.lower() == self.credentials["username"].lower():
self.client.send(str(PROTOCOL_STATUS_CODES["ok"]) + " Authenticated successfully.")
self.client.uid = self.credentials["uId"]
return True
else:
self.client.send(
str(PROTOCOL_STATUS_CODES["error"]) + " Error in authentication, {message}".format(
message=response_text))
return False
class Server(object):
"""
The main server, responsible connecting all of the peaces together.
Responsible for accepting clients and sending them to the appropriate procedure.
"""
def __init__(self):
super(Server, self).__init__()
self.client_list = []
self.db = database_handler.DatabaseHandler(threading.Lock())
self.client_messages = Queue.Queue()
self.server_socket = network.network_base.NetworkBase()
self.init_server_socket(int(SERVER_PORT))
if os.path.isfile(ADMIN_LIST_FILENAME):
pass
accept_th = self.start_accepting()
message_th = self.handle_messages()
LOGGER.info("The server is running on port {port}".format(port=SERVER_PORT))
accept_th.join()
message_th.join()
for client in self.client_list:
client.join()
def init_server_socket(self, port):
"""
Binds the server to the desired port.
:param port: desired port
"""
self.server_socket.bind(str(port))
self.server_socket.listen(5)
def start_accepting(self):
"""
Starts a thread for accepting clients
:return: Thread which accept clients in a loop
:rtype: threading.Thread
"""
accept_th = threading.Thread(target=self.accept_clients)
accept_th.daemon = True
accept_th.start()
return accept_th
def accept_clients(self):
"""
Accept new client (designed to run in a thread)
"""
while True:
client_socket, client_addr = self.server_socket.accept()
client = ClientHandler(client_socket, self.client_messages, self, client_addr, self.db)
self.client_list.append(client)
client.daemon = True
client.start()
def handle_messages(self):
"""
Starts a thread for client messages interpretation
:return: Thread which interprets messages according to the protocol
:rtype: threading.Thread
"""
message_th = MessageHandler(self.client_messages, self)
message_th.daemon = True
message_th.start()
return message_th
def remove_client(self, client):
"""
Remove client from the server
:param client: Client to kill
"""
if client in self.client_list:
LOGGER.info("Client removed: {uid}".format(uid=client.uid))
client.kill()
self.client_list.remove(client)
class ClientHandler(threading.Thread):
"""
Created for every client, responsible for every communication with the client, authentication and more.
"""
auth_statuses = {"auth_no": SERVER_CONFIG.getint("default", "auth_no"),
"auth_ok": SERVER_CONFIG.getint("default", "auth_ok"),
"auth_complete": SERVER_CONFIG.getint("default", "auth_complete")}
client_types = {"unknown": -1, "hook": 0, "injector": 1, "admin": 2}
def __init__(self, client_socket, client_messages, server, client_addr, db):
"""
:param client_socket: The socket of the client as accepted
:type client_socket: network.network_base.NetworkBase
:param client_messages: A queue of messages to push messages to
:type client_messages: Queue.Queue
:param server: The server object
:type server: server.Server
:param client_addr: Client full address [ip:port]
:type client_addr: str
:param db: Database object
:type db: database_handler.DatabaseHandler
"""
super(ClientHandler, self).__init__()
self.db = db
self.client_socket = client_socket
self.client_messages = client_messages
self.server = server
self.addr = client_addr
self.authenticated = self.auth_statuses["auth_no"]
self.status = None
self.uid = self.addr
self.client_info = {}
self.client_type = self.client_types["unknown"]
self.__kill = threading.Event()
LOGGER.info("New client connected: {address}".format(address=self.addr))
self.db.new_connection(self.client_type, self.addr, self.uid, datetime.datetime.now(), self.authenticated)
def run(self):
"""
Receive new message and put it in the stack
"""
while True:
if self.__kill.is_set():
thread.exit()
return
else:
data = self.client_socket.recv(MAX_RECV)
data = self.receive_full_data(data)
if data is not None:
self.client_messages.put([self, data])
return
def receive_full_data(self, data):
"""
Designed to follow the protocol standard and receive messages from the client correctly.
This method helps splitting the received data into different client messages using the `content-length`
:param data: Message from the client, this message may be a some messages in the same variable
:type data: str
:return: Last message to push to the stack
"""
length = None
if data.find(" ") == -1:
if not self.closed_socket(data):
return data
possible_length = data.split(" ")[0]
try:
possible_length_loaded = json.loads(possible_length)
if "content-length" in possible_length_loaded:
length = possible_length_loaded["content-length"]
data = data[len(possible_length) + 1:]
except: # Don't care about the exception, probably ValueError.
return data
if length == len(data):
return data
if length < len(data):
return_data = data[0:length]
data = data[length:]
self.client_messages.put([self, return_data])
return self.receive_full_data(data)
if length > len(data):
data += self.client_socket.recv(length - len(data))
return data
return None # If client disconnected
def closed_socket(self, data):
"""
Remove client from server if it has disconnected
:param data:
:return:
"""
if data == SOCKET_CLOSE_DATA:
self.server.remove_client(self)
return True
return False
def send(self, data):
"""
Send data to the client.
:param data: Data to send
"""
try:
self.client_socket.send(str(data))
except Exception as e:
raise
LOGGER.error("Error sending data to client [{error}]".format(e.args))
return
def is_auth(self):
"""
Check if client has fully authenticated
:return: Weather the client is authenticated
:rtype: bool
"""
return self.authenticated == self.auth_statuses["auth_complete"]
def kill(self):
"""
Kill client
"""
self.__kill.set()
def change_auth_status(self, new_status):
"""
Change the client authentication step
:param new_status: The new authentication step
"""
LOGGER.info("Client {client} authentication status has changed to state: {state}".format(client=str(self),
state=new_status))
self.authenticated = new_status
self.db.update_data("connections", ["userType", "authenticated"], [self.client_type, self.authenticated], "uId",
self.uid)
if new_status == self.auth_statuses["auth_complete"]:
LOGGER.info(
"Client {client} has authenticated successfully".format(client=str(self)))
def authenticate(self, message):
"""
Main function for authenticating a client. Same function for all client types.
Calls the appropriate function for authentication.
:param message: Client message (authentication step)
:rtype: None
"""
if self.authenticated == self.auth_statuses["auth_complete"]:
return
if not self.is_auth() and message.strip() in [CLIENT_HELLO, INJECTOR_HELLO, ADMIN_HELLO]:
self.change_auth_status(self.auth_statuses["auth_ok"])
self.send(SERVER_HELLO)
if message.strip() == CLIENT_HELLO:
self.client_type = self.client_types["hook"]
elif message.strip() == INJECTOR_HELLO:
self.client_type = self.client_types["injector"]
elif message.strip() == ADMIN_HELLO:
self.client_type = self.client_types["admin"]
return
if self.client_type == self.client_types["admin"]:
admin = AdminAuthentication(self, self.db)
admin.run(message)
return
if self.authenticated == self.auth_statuses["auth_ok"]:
response = message.splitlines()
self.status, uid = response[0].strip().split(" ")
if int(self.status) != (PROTOCOL_STATUS_CODES["authentication"]):
return
self.change_auth_status(self.auth_statuses["auth_complete"])
self.uid = uid
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " Authentication complete")
LOGGER.info("{client_info} has authenticated".format(client_info=self))
return
return
def new_incident(self, incident):
"""
Called when the client perform an illegal action.
:param incident: incident info
:type incident: dict
"""
try:
incident_json = json.loads(incident)
except Exception as e:
LOGGER.error("Error loading incident information[{errmsg}], data received:{data}".format(errmsg=str(e.args),
data=incident))
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " Incident structure incorrect. Try again")
return
self.db.add_incident(incident_json, self.uid)
LOGGER.info("New incident at " + self.addr)
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " Incident added successfully")
# TODO: Alert admin
def send_rules(self):
"""
Send the rules requested by the client
"""
if self.client_type == self.client_types["hook"]:
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " " + json.dumps(self.db.get_rules()))
elif self.client_type == self.client_types["injector"]:
processes_black_list = list(set([rule["processName"] for rule in self.db.get_rules()]))
black_list_by_proto = {"inject_to": processes_black_list}
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " " + json.dumps(black_list_by_proto))
elif self.client_type == self.client_types["admin"]:
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " " + json.dumps(self.db.get_rules()))
LOGGER.info("Rules sent successfully to client: {client}".format(client=str(self)))
def validate_rule(self, rule, must_fields=[]):
if self.client_type != self.client_types["admin"]:
LOGGER.warning(
"Client tried to perform operation without the appropriate permissions: {client}".format(str(self)))
return False
try:
rule = json.loads(rule)
except Exception as e:
LOGGER.error("Error with with rule structure, problem loading json: {msg}".format(e.message))
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " RULE STRUCTURE INCORRECT")
return False
for item in must_fields:
if item not in rule:
LOGGER.error("Rule structure is incorrect {client}".format())
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " RULE STRUCTURE INCORRECT")
return False
return True
def add_rule(self, rule):
"""
Adds a new rule to the database
:param rule: rule data in JSON format
:type rule: str
"""
if not self.validate_rule(rule, ["processName", "ruleType", "actionToTake", "ruleContent"]):
return
try:
rule = json.loads(rule)
except ValueError as e:
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " Error parsing rule. Try again.")
LOGGER.error("Error parsing rule: {errmsg}".format(errmsg=str(e.args)))
return
except Exception as e:
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " Error parsing rule. Try again.")
LOGGER.error("Unknown error while parsing rule: {errmsg}".format(errmsg=str(e.args)))
return
self.db.add_rule(rule)
LOGGER.info("New rule added to database by {client}.".format(client=str(self)))
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " Rule added successfully")
def protocol_error(self, msg=""):
"""
Notify the client if a protocol error has occurred.
:param msg: Error information to send
"""
self.send(
str(PROTOCOL_STATUS_CODES["error"]) + " Error interpreting request (protocol error){msg}".format(
msg=": " + msg))
def update_rule(self, rule):
"""
Update a rule in the database by the rule id
:param rule: A rule represented as a json dictionary
:type rule: str
"""
if not self.validate_rule(rule, ["processName", "ruleType", "actionToTake", "ruleContent", "id"]):
return
try:
rule = json.loads(rule)
except ValueError as e:
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " Error parsing rule. Try again.")
LOGGER.error("Error parsing rule: {errmsg}".format(errmsg=str(e.args)))
return
except Exception as e:
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " Error parsing rule. Try again.")
LOGGER.error("Unknown error while parsing rule: {errmsg}".format(errmsg=str(e.args)))
return
self.db.modify_rule(rule["id"], rule)
LOGGER.info("Rule #{id} modified successfully by {client}.".format(id=rule["id"], client=str(self)))
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " Rule #{id} modified successfully".format(id=rule["id"]))
def send_log(self, info):
"""
Send the log of the network module or the server to a client, must be an admin.
:param info: Which logging file the admin wants
:return:
"""
if self.client_type != self.client_types["admin"]:
LOGGER.warning(
"Client tried to perform operation without the appropriate permissions: {client}".format(str(self)))
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " Unauthorized action")
return False
log_files = {"server": "server.log", "network": "network.log"}
if info not in log_files.keys():
LOGGER.error("Log file requested does not exist! ({file})".format(file=info))
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " The log file requested doesn't exist")
return
log_content = read_log_file(log_files[info])
data_to_send = str(PROTOCOL_STATUS_CODES["ok"]) + " " + log_content
LOGGER.info(
"Log file ({file}) sent to client successfully -> {client}".format(file=log_files[info], client=str(self)))
self.send(data_to_send)
def remove_rule(self, rule):
"""
Remove a rule from the database based on the rule id
:param rule: A rule represented as a json dictionary
:type rule: str
"""
if not self.validate_rule(rule, ["id"]):
return
try:
rule = json.loads(rule)
except ValueError as e:
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " Error parsing rule. Try again.")
LOGGER.error("Error parsing rule: {errmsg}".format(errmsg=str(e.args)))
return
except Exception as e:
self.send(str(PROTOCOL_STATUS_CODES["error"]) + " Error parsing rule. Try again.")
LOGGER.error("Unknown error while parsing rule: {errmsg}".format(errmsg=str(e.args)))
return
self.db.delete_rule(rule["id"])
LOGGER.info("Rule #{id} removed successfully by {client}.".format(id=rule["id"], client=str(self)))
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " Rule #{id} removed successfully".format(id=rule["id"]))
def send_connection_history(self):
"""
Send all of the table of connections history
"""
if self.client_type != self.client_types["admin"]:
LOGGER.warning(
"Client tried to perform operation without the appropriate permissions: {client}".format(str(self)))
return
LOGGER.info("Connections history sent successfully to client: {client}".format(client=str(self)))
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " " + json.dumps(self.db.get_connections()))
def send_incidents_history(self):
"""
Send all of the table of incidents history
"""
if self.client_type != self.client_types["admin"]:
LOGGER.warning(
"Client tried to perform operation without the appropriate permissions: {client}".format(str(self)))
return
LOGGER.info("Incidents history sent successfully to client: {client}".format(client=str(self)))
self.send(str(PROTOCOL_STATUS_CODES["ok"]) + " " + json.dumps(self.db.get_incidents()))
def __str__(self):
if self.uid != self.addr:
return "{type} | {address} | {uid}".format(address=self.addr, uid=self.uid,
type=self.client_types.keys()[
self.client_types.values().index(
self.client_type)])
else:
return "{type} | {address}".format(address=self.addr,
type=self.client_types.keys()[
self.client_types.values().index(
self.client_type)])
class MessageHandler(threading.Thread):
def __init__(self, client_messages, server):
super(MessageHandler, self).__init__()
self.client_messages = client_messages
def run(self):
"""
Get a message from the client messages queue and send it to interpretation.
"""
while True:
self.interpret_message(self.client_messages.get())
@staticmethod
def interpret_message(message):
"""
Interprets a message according to the protocol status code
:param message: Message to interpret
:type message: str
"""
client, data = message
if not client.is_auth():
client.authenticate(data)
return
if data.find(" ") == -1:
if data.isdigit():
status = int(data)
else:
return
else:
try:
status, info = data.split(" ", 1)
info = info.strip()
if status.isdigit():
status = int(status)
else:
LOGGER.warning(
"Protocol error in client {client}. Message: {msg}".format(client=str(client), msg=data))
client.protocol_error("Couldn't find request status code")
return
except Exception as e:
LOGGER.warning("Protocol error in client {client}. Message: {msg}".format(client=str(client), msg=data))
client.protocol_error(str(e.args))
return
# TODO: Transform this to a dictionary with status code as key and method pointer as value
if status == PROTOCOL_STATUS_CODES["incident_info"]:
client.new_incident(info)
elif status == PROTOCOL_STATUS_CODES["get_rules"]:
client.send_rules()
elif status == PROTOCOL_STATUS_CODES["add_rule"]:
client.add_rule(info)
elif status == PROTOCOL_STATUS_CODES["update_rule"]:
client.update_rule(info)
elif status == PROTOCOL_STATUS_CODES["delete_rule"]:
client.remove_rule(info)
elif status == PROTOCOL_STATUS_CODES["get_log"]:
client.send_log(info)
elif status == PROTOCOL_STATUS_CODES["get_connection_history"]:
client.send_connection_history()
elif status == PROTOCOL_STATUS_CODES["get_incident_history"]:
client.send_incidents_history()
|
sum, tmp = 0, 1
for i in range(1, 11):
tmp *= i
sum += tmp
print("运算结果是:{}".format(sum)) |
class 부모:
def __init__(self):
print("부모생성")
class 자식(부모):
def __init__(self):
print("자식생성") #자식생성을 출력한다
super().__init__() # super()를 이용해 부모클래스에 접근해서__init__()를 실행해 부모생성을 출력한다.
# super() 부모클래스르 명시적으로 호출할수있다
나 = 자식() # 자식()으로인해 자식 생성이 출력되고 super().__init__()로인해 부모생성을 출력한다
# 자식생성
# 부모생성 이 출력된다 |
import numpy as np
import ctypes
from scipy.optimize import minimize
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
import test_math
m = int(7e0)
n = int(15e0)
k = int(4e0)
lam = 2.5
w = 3.2
nthreads = 8
np.random.seed(123)
X = np.random.gamma(1,1, size=(m,n))
W = np.random.gamma(1,1, size=(m,n))
def get_sol():
if not is_B:
return test_math.py_optimizeA(
A0.copy(),
B0.copy(),
int(m), int(n), int(k),
int(lda), int(ldb),
Xcsr_p.astype(ctypes.c_size_t) if xtype=="sparse" else empty_size_t,
Xcsr_i.astype(ctypes.c_int) if xtype=="sparse" else empty_int,
Xcsr.copy() if xtype=="sparse" else empty_1d,
Xpass.copy() if xtype=="dense" else empty_2d,
empty_1d if not has_weight else (Wpass.reshape(-1) if xtype=="dense" else Wcsr),
0,
lam, w,
NA_as_zero,
near_dense,
int(nthreads),
buffer1
)
else:
return test_math.py_optimizeA(
B0.copy(),
A0.copy(),
int(n), int(m), int(k),
int(ldb), int(lda),
Xcsc_p.astype(ctypes.c_size_t) if xtype=="sparse" else empty_size_t,
Xcsc_i.astype(ctypes.c_int) if xtype=="sparse" else empty_int,
Xcsc.copy() if xtype=="sparse" else empty_1d,
Xpass.copy() if xtype=="dense" else empty_2d,
empty_1d if not has_weight else (Wpass.reshape(-1) if xtype=="dense" else Wcsc),
1,
lam, w,
NA_as_zero,
near_dense,
int(nthreads),
buffer1
)
def py_evalA(A, B, X, W):
Ax = A.reshape((m,lda))[:,:k]
Bx = B[:,:k]
Xuse = X.copy()
if NA_as_zero:
Xuse[np.isnan(X)] = 0
E = Xuse - Ax.dot(Bx.T)
E[np.isnan(Xuse)] = 0
if not has_weight:
res = w * (E**2).sum()
else:
Wcopy = W.copy()
if not NA_as_zero:
Wcopy[np.isnan(X)] = 0
else:
Wcopy[np.isnan(X)] = 1
res = w * (Wcopy * (E**2)).sum()
res += lam * (Ax**2).sum()
return res/2
def py_evalB(B, A, X, W):
Ax = A[:,:k]
Bx = B.reshape((n,ldb))[:,:k]
Xuse = X.copy()
if NA_as_zero:
Xuse[np.isnan(X)] = 0
E = Xuse - Ax.dot(Bx.T)
E[np.isnan(Xuse)] = 0
if not has_weight:
res = w * (E**2).sum()
else:
Wcopy = W.copy()
if not NA_as_zero:
Wcopy[np.isnan(X)] = 0
else:
Wcopy[np.isnan(X)] = 1
res = w * (Wcopy * (E**2)).sum()
res += lam * (Bx**2).sum()
return res/2
empty_1d = np.empty(0, dtype=ctypes.c_double)
empty_2d = np.empty((0,0), dtype=ctypes.c_double)
empty_int = np.empty(0, dtype=ctypes.c_int)
empty_size_t = np.empty(0, dtype=ctypes.c_size_t)
buffer1 = np.empty(int(1e6), dtype=ctypes.c_double)
def dense_to_sp(X, W, m, n):
X_sp = X[~np.isnan(X)].reshape(-1)
W_sp = W[~np.isnan(X)].reshape(-1)
X_sp_row = np.repeat(np.arange(m), n).reshape(-1)[~np.isnan(X).reshape(-1)].astype(ctypes.c_int)
X_sp_col = np.tile(np.arange(n), m).reshape(-1)[~np.isnan(X).reshape(-1)].astype(ctypes.c_int)
Xcoo = coo_matrix((X_sp, (X_sp_row, X_sp_col)))
Wcoo = coo_matrix((W_sp, (X_sp_row, X_sp_col)))
Xcsr = csr_matrix(Xcoo)
Xcsc = csc_matrix(Xcoo)
Wcsr = csr_matrix(Wcoo)
Wcsc = csc_matrix(Wcoo)
return (
Xcsr.indptr.astype(ctypes.c_size_t),
Xcsr.indices.astype(ctypes.c_int),
Xcsr.data.astype(ctypes.c_double),
Wcsr.data.astype(ctypes.c_double),
Xcsc.indptr.astype(ctypes.c_size_t),
Xcsc.indices.astype(ctypes.c_int),
Xcsc.data.astype(ctypes.c_double),
Wcsc.data.astype(ctypes.c_double)
)
ld_pad = [0, 2]
nz_try = [0, 3, int(m*n*0.5)]
xtry = ["dense", "sparse"]
ndtry = [False, True]
wtry = [False, True]
natry = [False, True]
for xtype in xtry:
for nz in nz_try:
for ldA in ld_pad:
for ldB in ld_pad:
for near_dense in ndtry:
for has_weight in wtry:
for NA_as_zero in natry:
if (near_dense) and (nz_try==0):
continue
if (NA_as_zero) and (xtype!="sparse"):
continue
lda = k + ldA
ldb = k + ldB
np.random.seed(123)
A0 = np.random.gamma(1,1, size=(m,lda))
B0 = np.random.normal(size = (n,ldb))
Xpass = X.copy()
Xpass[np.random.randint(m, size=nz), np.random.randint(n, size=nz)] = np.nan
Wpass = W.copy()
Xcsr_p, Xcsr_i, Xcsr, Wcsr, Xcsc_p, Xcsc_i, Xcsc, Wcsc = dense_to_sp(Xpass, W, m, n)
is_B = False
res_scipyA = minimize(py_evalA, A0.copy().reshape(-1), (B0, Xpass, Wpass))["x"]
res_moduleA = get_sol().reshape(-1)
is_B = True
res_scipyB = minimize(py_evalB, B0.copy().reshape(-1), (A0, Xpass, Wpass))["x"]
res_moduleB = get_sol().reshape(-1)
diffA = np.linalg.norm(res_scipyA - res_moduleA)
diffB = np.linalg.norm(res_scipyB - res_moduleB)
dfA = py_evalA(res_moduleA, B0, Xpass, Wpass) - py_evalA(res_scipyA, B0, Xpass, Wpass)
dfB = py_evalB(res_moduleB, A0, Xpass, Wpass) - py_evalB(res_scipyB, A0, Xpass, Wpass)
is_wrong = (diffA>1e1) or (dfA>5e0) or (diffB>1e1) or (dfB>5e0) \
or np.any(np.isnan(res_moduleA)) or np.any(np.isnan(res_moduleB))
if is_wrong:
print("*****ERROR BELOW*****\n\n\n")
print("[X %s] [w:%d] [nz:%d] [na:%d] [nd:%d] [pa:%d] [pb:%d] - err:%.2f,%.2f - df:%.2f,%.2f"
% (xtype[0], has_weight, nz, NA_as_zero, near_dense, ldA, ldB, diffA, diffB, dfA, dfB),
flush=True)
if is_wrong:
print("\n\n\n*****ERROR ABOVE*****")
|
key = "ICE"
m_1 = """Burning 'em, if you ain't quick and nimble
I go crazy when I hear a cymbal"""
#将2个字符按值进行XOR返回一个hex
def chr_XOR(a,b):
temp = hex(ord(a) ^ ord(b)).replace('0x','')
if len(temp) == 2:
return temp
else:
temp = "0" + temp
return temp
#利用一个字符串对一个另字符串进行按块XOR
def encrypt_string(key,PT):
CT = ""
K_L = len(key)
M_L = len(PT)
count = 0
for PT_chr in PT:
K_chr = key[count]
CT = CT + chr_XOR(K_chr,PT_chr)
count = ( count + 1 ) % 3
return CT
print encrypt_string(key,m_1)
|
import pymongo
from pymongo import MongoClient
from pymongo.errors import AutoReconnect
from xml.dom.minidom import parseString
from xml.dom.minidom import parse
from lxml import etree
try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import pandas as pd
import os
def parseInfo(list):
print(len(list))
key = str(list[0])
print("key",key)
value = list[1]
print("value",value)
# host = '10.10.11.1'
# client = MongoClient(host, 27017)
try:
client = MongoClient("mongodb://readonly:readonly@10.10.11.1:27017")
table = client.crawlerPdf.text
query = {"_id": value}
data = table.find_one(query, {"grobid": 1})["grobid"]
# print(type(data))
except pymongo.errors.ConnectionFailure:
with open('parseFailed.txt', 'w') as f:
f.write(key+':'+value)
f.write('\n')
print("mogoDB AutoReconnect exception")
else:
if (data):
xml_dom = parseString(data)
fxmlName = 'data/' + key + '.xml'
ftxtName = 'data/' + key + '.txt'
try:
with open(fxmlName, 'w', encoding='UTF-8') as fh:
xml_dom.writexml(fh, indent='', addindent='\t', newl='\n', encoding='UTF-8')
print('写入xml OK!')
except Exception as err:
print('错误信息:{0}'.format(err))
tree = ET.parse(fxmlName)
root = tree.getroot()
for rchild in root:
if "text" in rchild.tag:
for rchildren in rchild:
if "body" in rchildren.tag:
body = rchildren
break
print(body)
with open(ftxtName, 'w') as f:
for child in body:
if "div" in child.tag:
for children in child:
# if ("head" in children.tag):
# if (children.text):
# f.write(children.text)
# f.write('\n')
# print(children.text)
if ("head" in children.tag or "p" in children.tag):
if (children.text):
f.write(children.text)
f.write('\n')
print(children.text)
if "figure" in child.tag:
for children in child:
if ("figDesc" in children.tag or "table" in children.tag):
if (children.text):
f.write(children.text)
f.write('\n')
print(children.text)
# if ("table" in children.tag):
# if (children.text):
# f.write(children.text)
# f.write('\n')
# print(children.text)
# if os.path.exists(fxmlName):
# os.remove(fxmlName)
# parseInfo([369567474,'/home/raw_data/pdfs/Wiley/a4/a4fd823ba9da0a49c74ad6a8dd62a28ba11a6f3c'])
|
#! /usr/bin/env python
import re
import os
import sys
import time
import threading
import commands
import pickle
from Bio import SeqIO
from argparse import ArgumentParser
sys.path.append("/hellogene/scgene01/user/chenjiehu/bin/recovery/module/")
import pipeline
__author__ = 'Qingyuan Zhang(zhangqingyuan@scgene.com)'
__version__ = 'V1.1'
__date__ = 'August 22nd, 2018'
FASTQC = '/hellogene/scgene01/bio/software/FastQC/fastqc'
ITOOLS = '/hellogene/scgene01/bio/bin/iTools'
def read_params(argvs):
parser = ArgumentParser(description = 'RAD pipline' +__version__+ '(' +__date__ + ')'
'AUTHOR:'+__author__)
arg = parser.add_argument
arg('-t', choices =['x','dir'], type = str, default = None, metavar = 'task')
arg('-pop', type = str, default = 'Unknown', metavar = 'The population name')
arg('-ploidy', type = str, default = '2', metavar = 'The ploidy of samples')
return vars(parser.parse_args())
def creat_workspace():
if not os.path.exists('RAD'):
os.mkdir('RAD')
os.system('mkdir RAD/01assemble RAD/02join2ref RAD/03fqIndex RAD/04align RAD/05sampe RAD/06division RAD/07sort RAD/08ECSelect RAD/09Haplotype RAD/10Misa RAD/11SNP RAD/12SSR ')
os.system('cp /hellogene/scgene01/bio/software/misa/misa* RAD/10Misa/')
def extract_fq_from_dir():
file_name = os.listdir('RAD/03fqIndex')
fq={}
for f in file_name:
if f.endswith('1.fq'):
fq[f] = f.split('1.fq')[0] + ('2.fq')
return fq
def getQ20(fq):
if not os.path.exists('RAD/qc'):
os.mkdir('RAD/qc')
cmd = ITOOLS + ' Fqtools stat -MinBaseQ ! -CPU 4 '
for f in sorted(fq):
cmd += ' -InFq' + ' ' + 'RAD/03fqIndex/' + f + ' -InFq ' + ' ' + 'RAD/03fqIndex/' + fq[f]
cmd += ' -OutStat RAD/qc/baseinfo.txt'
if os.path.exists(ITOOLS):
os.system(cmd)
else:
print '-- Cannot find iTools in path "/hellogene/scgene01/bio/bin/bwa"'
exit(1)
def fastqc(fq):
cmd = FASTQC + ' -o RAD/qc/fastqc --extract -t 16 -f fastq'
if not os.path.exists('RAD/qc/fastqc'):
os.mkdir('RAD/qc/fastqc')
for f in sorted(fq):
cmd += (' RAD/03fqIndex/' + f + ' ' + 'RAD/03fqIndex/' + fq[f])
with open('fastqc.sh', 'w') as sh:
sh.write(cmd)
pipeline.man_jobs(['fastqc.sh'], '-cwd -l vf=8g,p=16')
exit(1)
def statRepli(fq):
START = 30
END = 60
LINE = 4*600000
outfile = open('RAD/qc/Repli.txt', 'w')
for f in sorted(fq):
seledic = {}
fq1 = open('RAD/03fqIndex/%s' % ( f))
fq2 = open('RAD/03fqIndex/%s' % (fq[f]))
for i in xrange(1,LINE):
if (i+2) % 4 == 0:
read1 = fq1.readline()
read2 = fq2.readline()
seq1 = read1[START: END]
seq2 = read2[START: END]
seq = "%s%s" % (seq1, seq2)
if seq in seledic:
seledic[seq] += 1
else:
seledic[seq] = 1
else:
fq1.readline()
fq2.readline()
rep = 0
for i in seledic.values():
if i!= 1:
rep += (i-1)
reprate = round(float(rep)*4*100/LINE, 2)
outfile.write('%s\tRepetition Rate: %s%%\n' %(f.split('1.fq')[0],reprate))
outfile.close()
def info2xls1(baseinfo, replic):
outfile = open('RAD/qc/Basicinfo.xls', 'w')
outfile.write('#%s \t%s \t%s \t%s \t%s \t%s \t%s' %('FastqFile',
'ReadsNum', 'BaseNum', 'GC', 'Q20', 'Q30', 'RepRate') + '\n')
infolist = {}
repline = open(replic, 'r')
for line in repline:
sample_name = line.strip().split()[0]
reprate = line.strip().split()[-1]
infolist[sample_name] = [reprate]
baseinfo = open(baseinfo, 'r')
for i in baseinfo:
if i.startswith('##'):
samplename = i.strip().strip('#')
infolist[re.split(r"[12].\w*fq", samplename)[0]].append(samplename)
if i.startswith('#ReadNum'):
numlist = re.findall(r'\d+', i)
infolist[re.split(r"[12].\w*fq", samplename)[0]].append(numlist[0])
infolist[re.split(r"[12].\w*fq", samplename)[0]].append(numlist[1])
if i.startswith('#GC'):
gc = i.split()[1]
infolist[re.split(r"[12].\w*fq", samplename)[0]].append(gc)
if i.startswith('#BaseQ') and re.search(r'Q20', i):
Q20 = i.split()[-1]
infolist[re.split(r"[12].\w*fq", samplename)[0]].append(Q20)
if i.startswith('#BaseQ') and re.search(r'Q30', i):
Q30 = i.split()[-1]
infolist[re.split(r"[12].\w*fq", samplename)[0]].append(Q30)
for x in sorted(infolist):
outfile.write('\t'.join(infolist[x][1:7])+'\n')
outfile.write('\t'.join(infolist[x][7:]) + '\t' + infolist[x][0]+'\n')
outfile.close()
def join_survey_scaffolds():
os.system('python /hellogene/scgene02/RD/RAD/RADPlus/bin/joinRef.py RAD/01assemble/* > RAD/02join2ref/genome.fa')
os.system('bwa index RAD/02join2ref/genome.fa')
os.system('java -jar /hellogene/scgene02/RD/resequencing/bin/picard-tools-1.119/CreateSequenceDictionary.jar R=RAD/02join2ref/genome.fa O=RAD/02join2ref/genome.dict')
def ala_align(fq):
jobs = []
for f in fq:
fq_name = f.split('1.fq')[0]
sample_name = f.split('_')[0]
with open('RAD/04align/aln_%s.sh' % fq_name, 'w') as sh:
sh.write('bwa aln -t 16 -f RAD/04align/%s.sai RAD/02join2ref/genome.fa RAD/03fqIndex/%s\n'%(f,f))
sh.write('bwa aln -t 16 -f RAD/04align/%s.sai RAD/02join2ref/genome.fa RAD/03fqIndex/%s\n'%(fq[f], fq[f]))
sh.write("bwa sampe -r '@RG\\tID:%s\\tSM:%s' -f RAD/05sampe/%s.sam RAD/02join2ref/genome.fa RAD/04align/%s.sai RAD/04align/%s.sai RAD/03fqIndex/%s RAD/03fqIndex/%s\n" % (sample_name, sample_name, fq_name, f, fq[f], f, fq[f]))
sh.write('sleep 1s')
jobs.append('RAD/04align/aln_%s.sh' % fq_name)
pipeline.man_jobs(jobs, '-cwd -l vf=8g,p=16')
def cut_filter_sort():
os.system('python /hellogene/scgene02/RD/RAD/RADPlus/RADSeq/splitSamPE.py RAD/05sampe/ RAD/06division/')
os.system('python /hellogene/scgene01/user/chenjiehu/bin/recovery/resequencing/resequencing.py sort RAD/06division/ RAD/07sort/')
exist = False
if os.path.exists('RAD/08ECSelect/EClib.group') and os.path.exists('RAD/08ECSelect/EClib.list'):
exist = True
else:
print 'Suspend! Please check 08ECSelect directory and replenish the configuration file.'
while not exist:
time.sleep(60)
if os.path.exists('RAD/08ECSelect/EClib.group') and os.path.exists('RAD/08ECSelect/EClib.list'):
exist = True
os.system('python /hellogene/scgene02/RD/RAD/RADPlus/RADSeq/ECSelect2.py RAD/08ECSelect/EClib.list RAD/08ECSelect/EClib.group RAD/02join2ref/genome.fa RAD/08ECSelect/')
with open('RAD/08ECSelect/EClib.group') as f0:
values = f0.read().strip().split()
enzyme = values[0]
n1 = values[1]
n2 = values[2]
with open('RAD/08ECSelect/True-MseI.fa', 'w') as f1:
for seq_record in SeqIO.parse('RAD/08ECSelect/%s-%s-%s.fa' % (enzyme, n1, n2), 'fasta'):
if not 'N' in seq_record.seq:
f1.write('>%s\n%s\n' % (seq_record.id, seq_record.seq))
os.system('python /hellogene/scgene02/RD/RAD/RADPlus/RADSeq/targetIntervals.py RAD/08ECSelect/True-%s.fa > RAD/09Haplotype/target.list' % enzyme)
def HaplotypeCaller_GATK():
bam = os.listdir('RAD/07sort')
commands.getstatusoutput('samtools faidx RAD/02join2ref/genome.fa')
index = []
GATK = []
cmd = 'java -Xmx10G -jar /hellogene/scgene02/RD/resequencing/bin/GenomeAnalysisTK-3.8-0/GenomeAnalysisTK.jar -T HaplotypeCaller -R RAD/02join2ref/genome.fa'
gvcfs= 'java -Xmx10G -jar /hellogene/scgene02/RD/resequencing/bin/GenomeAnalysisTK-3.8-0/GenomeAnalysisTK.jar -T GenotypeGVCFs -R RAD/02join2ref/genome.fa'
for fa in bam:
if fa.endswith('_map.sort.bam'):
sample_name = fa.split('_map.sort.bam')[0]
with open('RAD/07sort/index_%s.sh' % fa,'w') as sh:
sh.write('samtools index RAD/07sort/%s' % fa)
with open('RAD/09Haplotype/GATK_%s.sh' % fa, 'w') as out:
out.write(cmd + ' -I RAD/07sort/%s -o RAD/09Haplotype/%s.g.vcf --emitRefConfidence GVCF -L RAD/09Haplotype/target.list -nct 4' % (fa,sample_name))
index.append('RAD/07sort/index_%s.sh' % fa)
GATK.append('RAD/09Haplotype/GATK_%s.sh' % fa)
gvcfs += ' -V RAD/09Haplotype/%s.g.vcf' %(sample_name)
#pipeline.man_jobs(index, '-cwd -l vf=4g,p=1')
#pipeline.man_jobs(GATK, '-cwd -l vf=4g,p=4')
gvcfs += ' -o RAD/09Haplotype/All.vcf'
with open('RAD/09Haplotype/combine.sh', 'w') as f:
f.write('%s\n' % gvcfs)
with open('RAD/09Haplotype/selectVar.sh', 'w') as f:
#f.write('java -Xmx10G -jar /hellogene/scgene02/RD/resequencing/bin/GenomeAnalysisTK-3.8-0/GenomeAnalysisTK.jar -T SelectVariants -R RAD/02join2ref/genome.fa -V RAD/09Haplotype/All.vcf -selectType SNP -o RAD/09Haplotype/raw_snps.vcf\n')
f.write('java -Xmx10G -jar /hellogene/scgene02/RD/resequencing/bin/GenomeAnalysisTK-3.8-0/GenomeAnalysisTK.jar -T VariantFiltration -R RAD/02join2ref/genome.fa -V RAD/09Haplotype/raw_snps.vcf --filterExpression "QUAL < 30.0 || QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0" --filterName "my_snp_filter" -o RAD/09Haplotype/filtered_snps.vcf')
#pipeline.man_jobs(['RAD/09Haplotype/combine.sh'], '-cwd -l vf=10g,p=1')
pipeline.man_jobs(['RAD/09Haplotype/selectVar.sh'], '-cwd -l vf=10g,p=1')
def misa():
os.system('ln -s ../02join2ref/genome.fa RAD/10Misa/genome.fa')
os.chdir('RAD/10Misa/')
os.system('perl misa.pl genome.fa')
os.chdir('../../')
def SNPpattern():
repeat_dic = {}
with open('RAD/10Misa/genome.fa.misa') as f:
for line in f:
if re.search("^ID", line):
continue
lines = line.rstrip().split()
chrom = lines[0]
repeat_type = lines[2]
if not re.search("^p", repeat_type):
continue
repeat_ele = lines[3].split(')')[-1]
start = int(lines[-2])
end = int(lines[-1])
if chrom not in repeat_dic:
repeat_dic[chrom] = {}
for i in xrange(start, end + 1):
repeat_dic[chrom][i] = repeat_ele
scale = {}
with open('RAD/09Haplotype/target.list') as f:
for line in f:
line = line.rstrip().split(":")
chrom = line[0]
start, end = line[1].split("-")
if chrom not in scale:
scale[chrom] = {}
if (start, end) not in scale[chrom]:
scale[chrom][start, end] = 0
SNP = {}
with open('RAD/09Haplotype/filtered_snps.vcf') as f:
for line in f:
if re.search(r'^##', line):
continue
if line[0] == '#':
sample_name = line.strip().split()[9:]
continue
line = line.strip().split()
ref = line[3]
alt = line[4]
if len(ref) > 1 or len(alt) >1:
continue
Filter = line[6]
if Filter != 'PASS':
continue
sample = line[9:]
F = 0
for i in sample:
if i[0] == '.':
continue
DP = i.split(':')[2]
if int(DP) < 4:
F += 1
if float(F)/len(sample) > 0.2:
continue
chrom = line[0]
pos = int(line[1])
if pos in repeat_dic[chrom]:
continue
target = 0
for m in sorted(scale[chrom]):
if pos > m[1]:
break
if pos >= m[0] and pos <= m[1]:
scale[chrom][m] += 1
if scale[chrom][m] > 1:
target = 1
if target == 1:
continue
if chrom not in SNP:
SNP[chrom] = {}
if pos not in SNP[chrom]:
SNP[chrom][pos] = []
SNP[chrom][pos].append([ref, alt, sample])
with open('RAD/11SNP/SNP.xls', 'w') as f:
f.write('#Genome\tPos\tSNP_Type\tSNP\tSample_Info\n')
for i in sorted(SNP, key = lambda x:int(x.split('genome')[-1])):
for j in sorted(SNP[i]):
print SNP[i][j]
ref = SNP[i][j][0][0]
alt = SNP[i][j][0][1]
SNP_Type = SNPtype(ref, alt)
snp = ref + '/' + alt
f.write('%s\t%s\t%s\t%s\t' % (i, j, SNP_Type, snp))
n = 0
for y in SNP[i][j][0][2]:
n += 1
if SNP[i][j][0][2][0] == '.':
f.write('%s:NA\t' % sample_name[n])
if SNP[i][j][0][2][:2] == '0/0':
f.write('%s:%s\t' % (sample_name[n], ref*2))
if SNP[i][j][0][2][:2] == '1/1':
f.write('%s:%s\t' % (sample_name[n], alt*2))
if SNP[i][j][0][2][:2] == '0/1':
f.write('%s:%s\t' % (sample_name[n], ''.join(sorted(ref + alt))))
f.write('%s\n')
def SSR():
sort_bam = os.listdir('RAD/07sort')
with open('RAD/07sort/sort_bam.list', 'w') as f:
for b in sort_bam:
if b.endswith('_map.sort.bam'):
f.write('RAD/07sort/%s\n' % b)
with open('RAD/07sort/merge_bam.sh', 'w') as f:
f.write('samtools merge -c -p -@ 32 -b RAD/07sort/sort_bam.list RAD/07sort/All_map.sort.bam\n')
f.write('samtools index -@ 32 RAD/07sort/All_map.sort.bam')
pipeline.man_jobs(['RAD/07sort/merge_bam.sh'], '-cwd -l vf=8g,p=32')
os.system('python /hellogene/scgene02/RD/RAD/RADPlus/RADSeq/SSRFindPerSample.py RAD/02join2ref/genome.fa RAD/10Misa/genome.fa.misa RAD/07sort/All_map.sort.bam 2 > RAD/12SSR/All.SSR.txt')
os.system('python /hellogene/scgene02/RD/RAD/RADPlus/RADSeq/PrimerDesign.py RAD/12SSR/All.SSR.txt RAD/12SSR/Primer')
def analyse_SNP(ploidy):
table = {}
SNP_info = {}
SNP_file = os.listdir('RAD/11SNP')
sample_name = []
location = []
loc_dic = {}
for doc in SNP_file:
if doc.endswith('.xls'):
sample_name.append(doc.split('.xls')[0])
location.append(re.split(r'[0-9]', doc)[0])
location = list(set(sorted(location)))
count = 0
for point in location:
count += 1
loc_dic[point] = str(count)
with open('RAD/11SNP/Primer/SNP.xls') as f:
f.next()
for line in f:
line = line.strip().split()
SNP_ID = line[0]
SNP = line[2]
n_Sample = int(line[3])
Sample_info = line[4]
if n_Sample >= 0.8*(len(sample_name)):
hetero = 0
SNP_info[SNP_ID] = Sample_info, SNP
b1 = SNP[0]; b2 = SNP[2]
pb1 = 0; pb2 = 0
for i in re.findall('[AGCT]/[AGCT]', Sample_info):
pair = i.split('/')
if pair[0] != pair[1]:
hetero += 1
pb1 += i.count(b1)
pb2 += i.count(b2)
pb1 = float(pb1)/ (2*len(re.findall('[AGCT]/[AGCT]', Sample_info)))
pb2 = float(pb2)/ (2*len(re.findall('[AGCT]/[AGCT]', Sample_info)))
observed_heterozygosity = float(hetero)/n_Sample
expected_heterozygosity = 1-(observed_heterozygosity*observed_heterozygosity+(1-observed_heterozygosity)*(1-observed_heterozygosity))
PIC = 1 - (pb1*pb1+pb2*pb2) - 2*(pb1*pb1*pb2*pb2)
table[SNP_ID] = [str(observed_heterozygosity), str(expected_heterozygosity), str(PIC)]
base_dic = {"A":'11', "C":'12', "G":'13', "T":'14'}
with open('RAD/11SNP/SNP_Structure.txt', 'w') as s:
s.write('\t\t%s\n' % '\t'.join(sorted(SNP_info)))
for ind in sample_name:
loc = re.split(r'[0-9]', ind)[0]
mark1 = []; mark2 = []
for x in sorted(SNP_info):
if ind in SNP_info[x][0]:
allele = SNP_info[x][0].split(ind)[-1].split(';')[0].split(':')[-1]
mark1.append(base_dic[allele[0]])
mark2.append(base_dic[allele[2]])
if ind not in SNP_info[x][0]:
mark1.append('-9')
mark2.append('-9')
s.write('%s\t%s\t%s\n' % (ind, loc_dic[loc], '\t'.join(mark1)))
s.write('%s\t%s\t%s\n' % (ind, loc_dic[loc], '\t'.join(mark2)))
with open('RAD/11SNP/SNP.csv', 'w') as w:
w.write('Sample_ID\tLocation\t%s\n' % '\t'.join([str(x) for x in xrange(1, len(SNP_info)+1)]))
for sample in sorted(sample_name):
location = re.split(r'[0-9]', sample)[0]
w.write('%s\t%s\t' % (sample, location))
for j in sorted(SNP_info):
if sample in SNP_info[j]:
allele = sorted(SNP_info[j].split(sample)[-1].split(';')[0].split(':')[-1].split('/'))
allele = allele[0] + allele[1]
if sample not in SNP_info[j]:
allele = 'NA'
w.write('%s\t' % allele)
w.write('\n')
'''
with open('RAD/11SNP/SNP.R', 'w') as r:
r.write('library("poppr")\n')
r.write('mydata <- read.table("SNP.csv", header = TRUE, check.names = FALSE)\n\
dim(mydata)\n\
ind <- mydata$Sample_ID\n\
dim(mydata)\n\
locus <- mydata[, -c(1, %s:ncol(mydata))]\n' % len(SNP_info))
r.write('data <- df2genind(locus, ploidy = %s, ind.names = ind, sep = "\\t")\n' % ploidy)
r.write('dt <- missingno(data, type = "loci", cutoff = 0.05, quiet = FALSE)\n')
r.write('pop <- summary(dt)\n')
#os.system('Rscript RAD/11SNP/SNP.R > RAD/11SNP/expected_heterozygosity.txt')
with open('RAD/11SNP/expected_heterozygosity.txt') as e:
for line in e:
if line.startswith('// Expected heterozygosity:'):
line = line.strip().split(':')[-1].split()
print len(line )
'''
with open("RAD/11SNP/SNP_Analyse.txt", 'w') as o:
o.write('SNP_ID\tobserved_heterozygosity\texpected_heterozygosity\tPIC\n')
for tab in sorted(table):
o.write('%s\t%s\n' % (tab, '\t'.join((table[tab]))))
def analyse_SSR(ploidy):
pop = []
sample = []
SSR_info = {}
pop = []
pop_dic = {}
Primer = os.listdir('RAD/12SSR/Primer')
sample_file = os.listdir('RAD/11SNP/')
for i in sample_file:
if i.endswith('.xls'):
sample.append(i.split('.')[0])
pop.append(re.split(r'[0-9]', i)[0])
pop = list(set(sorted(pop)))
count = 0
for i in pop:
count += 1
pop_dic[i] = count
'''
for j in sample:
pop.append(re.split(r'[0-9]', j)[0])
for f in sorted(Primer):
if re.match(r'Polymorphism_p[1-9].xls', f) != None:
ssr_file = re.match(r'Polymorphism_p[1-9].xls', f).group()
with open('RAD/12SSR/Primer/%s' % ssr_file) as ssr:
for line in ssr:
if line[0] != '#':
line = line.strip().split()
l_sample = re.findall(r'[0-9]*/[0-9]*', line[6])
if len(l_sample) > 0.8*(len(sample)):
Sample_info = line[6].split(';')
for x in Sample_info:
if j in x:
if j not in SSR_info:
SSR_info[j] = []
SSR_info[j].append([line[0],re.split(r'[:/;?]',x)])
if j not in ''.join(Sample_info):
if j not in SSR_info:
SSR_info[j] = []
try:
if SSR_info[j][-1] != [line[0], [j, '0', '0']]:
SSR_info[j].append([line[0],[j,'0','0']])
except IndexError:
pass
with open('RAD/12SSR/SSR.csv', 'w') as p:
loci = []
for x in sorted(SSR_info):
for y in SSR_info[x]:
loci.append(y[0].replace('_', '-'))
break
p.write('%s\t%s\t%s\n' % (len(loci), len(SSR_info), len(pop)))
p.write('ind\tPop\t%s\n' % '\t'.join(loci*2))
for m in sorted(SSR_info):
location = re.split(r'[0-9]', m)[0]
p.write('%s\t%s\t' % (m, location))
for n in SSR_info[m]:
p.write('%s\t%s\t' % (n[1][1], n[1][2]))
p.write('\n')
with open('RAD/12SSR/SSR.R', 'w') as r:
r.write('library("poppr")\n')
r.write('Mydata <- read.genalex("SSR.csv", ploidy = %s, geo = FALSE, region = FALSE, genclone = TRUE, sep = ",", recode = FALSE)\n' % ploidy)
r.write('popdata <- summary(mydata)\n')
r.write('pop <- missingno(popdata, type ="loci", cutoff = 0.05, quiet = FALSE)')
'''
table = {}
for f in sorted(Primer):
if re.match(r'Polymorphism_p[1-9].xls', f) != None:
ssr_file = re.match(r'Polymorphism_p[1-9].xls', f).group()
with open('RAD/12SSR/Primer/%s' % ssr_file) as ssr:
for line in ssr:
if line[0] != '#':
line = line.strip().split()
SSR_ID = line[0]
Sample_Info = line[6]
l_sample = re.findall(r'[0-9]*/[0-9]*', line[6])
if len(l_sample) > 0.8*(len(sample)):
SSR_info[SSR_ID] = Sample_Info
poly = []
frequency = []
hetero = 0
for repeat in l_sample:
rr = repeat.split('/')
poly.append(rr[0])
poly.append(rr[1])
if rr[0] != rr[1]:
hetero += 1
observed_heterozygosity = float(hetero)/len(l_sample)
for ele in set(sorted(poly)):
n = poly.count(ele)
frequency.append(float(n)/len(poly))
pic = PIC(len(set(sorted(poly))), frequency)
He = expected_heterozygosity(len(set(sorted(poly))), frequency)
table[SSR_ID] = [str(observed_heterozygosity), str(He), str(pic)]
with open('RAD/12SSR/SSR_Analyse.txt', 'w') as o:
o.write('SSR_ID\tobserved_heterozygosity\texpected_heterozygosity\tPIC\n')
for tab in sorted(table):
o.write('%s\t%s\n' %(tab, '\t'.join(table[tab])))
with open('RAD/12SSR/SSR_Structure.txt', 'w') as s:
s.write('\t\t%s\n' % '\t'.join(sorted(table)))
for ind in (sample):
pop = re.split(r'[0-9]', ind)[0]
mark1 = []; mark2 = []
for i in sorted(table):
if ind in SSR_info[i]:
score = SSR_info[i].split(ind)[-1].split(';')[0].split(':')[-1]
score = score.split('/')
mark1.append(score[0])
mark2.append(score[1])
if ind not in SSR_info[i]:
mark1.append('-9')
mark2.append('-9')
s.write('%s\t%s\t%s\n' % (ind, pop_dic[pop], '\t'.join(mark1)))
s.write('%s\t%s\t%s\n' % (ind, pop_dic[pop], '\t'.join(mark2)))
def SNPtype(ref, alt):
if len(ref) == 1 and len(alt) == 1 :
co = ''.join(sorted(ref + alt))
if co == 'AG' or co == 'CT':
mutation = "Transitions"
else:
mutation = "Transversions"
return mutation
def PIC(n, frequency):
f2 = 0; f3 = 0
for i in range(1,n+1):
f2 += frequency[i-1]*frequency[i-1]
try:
f3 += 2*frequency[i-1]*frequency[i-1]*frequency[i]*frequency[i]
except IndexError:
pass
PIC = 1 - f2 - f3
return PIC
def expected_heterozygosity(n,frequency):
f1 = 0
for i in xrange(n):
f1 += frequency[i]*frequency[i]
He = 1-f1
return He
def qc(fq):
getQ20(fq)
fastqc(fq)
statRepli(fq)
info2xls1('RAD/qc/baseinfo.txt', 'RAD/qc/Repli.txt')
def survey():
join_survey_scaffolds()
return 0
def RAD(fq):
#ala_align(fq)
#cut_filter_sort()
#HaplotypeCaller_GATK()
#misa()
SNPpattern()
#SSR()
def main():
args = read_params(sys.argv)
if args['t'] == 'dir':
creat_workspace()
if args['t'] == 'x':
fq = extract_fq_from_dir()
threads = []
#t1 = threading.Thread(target = qc, args = (fq,))
#threads.append(t1)
#t2 = threading.Thread(target = survey)
#threads.append(t2)
#for t in threads:
# t.start()
#for t in threads:
# t.join()
RAD(fq)
#analyse_SNP(args['ploidy'])
analyse_SSR(args['ploidy'])
if __name__ == '__main__':
main()
|
import os, sys
from pprint import pprint
import GaussianRunPack
usage ='Usage; %s infile' % sys.argv[0]
try:
infilename = sys.argv[1]
except:
print (usage); sys.exit()
#if len(sys.argv)==3: option = sys.argv[2]
#For reading log file ####
#option = "eae homolumo"
#test_sdf = GaussianRunPack.GaussianDFTRun('B3LYP', 'STO-3G', 12, option,infilename,0)
#outdic = test_sdf.Extract_values(infilename,0,0,0,0,0,1,1,0,0,0,0)
#print (outdic)
#########################
#for stargint from gaussian calculation
#option = "opt deen nmr ipe eae homolumo dipole uv fluor"
option = "opt stable2o2 eae ipe"
solvent = "2.05"
test_sdf = GaussianRunPack.GaussianDFTRun('B3LYP', 'STO-3G', 12, option, solvent, infilename, 0)
outdic = test_sdf.run_gaussian()
print (outdic)
#######################################
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#!!!!!------------------------------------!!!!!
# Execute this in "su", or it won't work
#!!!!!------------------------------------!!!!!
#<<<<<startInit>>>>>
#for Displaying; show some frames
import wx
#for GPIO(requiers sudo)
import RPi.GPIO as GPIO
#for showing movies
from omxplayer import OMXPlayer
#for sleep/time managing(time has sleep)
import time
#from time import sleep
#for Picture
from PIL import Image
#for shutdown
import os
import sys
#for read csv files
import csv
#import pandas as pd
#<<<<<endInit>>>>>
#<<<<<startSetValues>>>>>
#temporarily disabled or permanently
#player = OMXPlayer('/home/pi/Desktop/ForTesUno.mp4')
STATID=1111
RSETID=3333
MENUID=2222
PoffID=4444
EnvDictionary='MvDict.csv'
ConvDictionary='CnvDict.csv'
element_array = ("element_1", "element_2", "element_3", "element_4")
#these two should be recieved another divice.
#below things are just for test
realData="forteskey"
phantomData=00000000
movieName="ForTes"
application = wx.App()
frame = wx.Frame(None, wx.ID_ANY, u"test", size=(420,340))
frame.SetBackgroundColour("#000000")
MenuApp=wx.App()
MenuFrame=wx.Frame(frame, wx.ID_ANY, u"MENU", size=(200,200))
dict={}
dictCNV={}
#<<startCalcRelativePath>>
def relPath(pathpath):
absoPath= os.path.dirname(os.path.abspath(__file__))
os.path.normpath(os.path.join(absoPath,pathpath))
#<<endCalcRelativePath>>
#<<<<<endSetValues>>>>>
#<<<<<startDictInit>>>>>
#Conv realData>>moviePath
if os.path.exists(EnvDictionary) is False:
with open(EnvDictionary,'ab') as tempTf:
writer=csv.writer(tempTf,lineterminator='\n')
writer.writerow([realData,movieName])
print "NonExisting of the Dictionary"
with open(EnvDictionary,'rb') as tempF:
tempDict=csv.reader(tempF)
for row in tempDict:
print "row: "
print row
dict[row[0]]=row[1]
#convdict phantomData>>realData
if os.path.exists(ConvDictionary) is False:
with open(ConvDictionary,'ab') as tempTfC:
writerC=csv.writer(tempTfC,lineterminator='\n')
writerC.writerow([realData,phantomData])
print "NonExisting of the Dictionary at CNV"
with open(ConvDictionary,'rb') as tempFC:
tempDictConv=csv.reader(tempFC)
for rowC in tempDictConv:
dictCNV[rowC[0]]=rowC[1]
#<<<<<endDictInit>>>>>
#<<<<<startDefineFunctions>>>>>
#event when you push the button
def click_button(event):
if event.GetId() == STATID:
STATbutton.SetBackgroundColour("#0000FF")#JustForTest
img=Image.open("picts/"+dict[realData]+".png")#load picture(Because OMXplayer takes a bit time)
img.show()#show picture(it's not good way. it calls the normal app. on each OS)
#<<<startPlaying>>>
player = OMXPlayer("movs/"+dict[realData]+".mp4")
player.play()
time.sleep(3)
player.pause()
#if you got to quit you can't re-open
player.quit()
#<<<endPlaying>>>
elif event.GetId() == MENUID:
MENUbutton.SetBackgroundColour("#FF0000")
#<<<startMenuWXsetUP>>>
#MenuApp = wx.App()
#MenuFrame = wx.Frame(None, wx.ID_ANY, u"MENU", size=(200,200))
MenuPanel = wx.Panel(MenuFrame, wx.ID_ANY)
MenuPanel.SetBackgroundColour("#AFAFAF")
#<<startMakingListBox>>
listbox_1 = wx.ListBox(MenuPanel, wx.ID_ANY, size=(200,200), choices=element_array, style=wx.LB_ALWAYS_SB)
listbox_1.Bind(wx.EVT_LISTBOX, listbox_select)
#<<endMakingListBox>>
layout = wx.BoxSizer(wx.HORIZONTAL)
layout.Add(listbox_1, flag=wx.GROW | wx.ALL, border=3)
MenuPanel.SetSizer(layout)
#<<<endMenuWXsetUP>>>
#<<<startMenuDisp>>>
MenuFrame.Show()
MenuApp.MainLoop()
#<<<endMenuDisp>>>
elif event.GetId() == RSETID:
STATbutton.SetBackgroundColour("AFAFAF")
MENUbutton.SetBackgroundColour("AFAFAF")
POFFbutton.SetBackgroundColour("AFAFAF")
elif event.GetId() == PoffID:
POFFbutton.SetBackgroundColour("#00FF00")
frame.Close()
#Close won't work now
#Available below row, and system will be shuted down.
#maybe we should show some confirmation
#os.system("sudo shutdown -h now")
sys.exit()
#event when you push menu
def listbox_select(event):
obj = event.GetEventObject()
print obj.GetStringSelection()
#MenuFrame.Close() needs MenuFrame as Global
MenuFrame.Close()
#<<<<<endDefineFunctions>>>>>
#<<<<<startWXsetUp>>>>>
#making Panel
panel = wx.Panel(frame, wx.ID_ANY)
panel.SetBackgroundColour("FF0000")
#set buttons on the Panel
STATbutton = wx.Button(panel, STATID, u"STAT", size=(50,150))
RSETbutton = wx.Button(panel, RSETID, u"RSET", size=(50,150))
MENUbutton = wx.Button(panel, MENUID, u"MENU", size=(50,150))
POFFbutton = wx.Button(panel, PoffID, u"Poff", size=(50,150))
#wx.EVT_BUTTON calls above defined click_button()
frame.Bind(wx.EVT_BUTTON, click_button, STATbutton)
#it can go with STATbutton.Bind(wx.EVT_BUTTON, someNewFunction)
frame.Bind(wx.EVT_BUTTON, click_button, MENUbutton)
frame.Bind(wx.EVT_BUTTON, click_button, RSETbutton)
frame.Bind(wx.EVT_BUTTON, click_button, POFFbutton)
#set button Layout
layout = wx.GridSizer(2,2)
layout.Add(STATbutton,flag=wx.SHAPED | wx.ALIGN_LEFT)
layout.Add(MENUbutton,flag=wx.SHAPED | wx.ALIGN_RIGHT)
layout.Add(RSETbutton,flag=wx.SHAPED | wx.ALIGN_LEFT)
layout.Add(POFFbutton,flag=wx.SHAPED | wx.ALIGN_RIGHT)
panel.SetSizer(layout)
#<<<<<endWXsetUP>>>>>
#<<<<<startApplicationLoop>>>>>
frame.Show()
application.MainLoop()
#<<<<<endApplicationLoop>>>>> |
#libraries
import os
import sys
import json
import spotipy
import webbrowser
import spotipy.util as util
from json.decoder import JSONDecodeError
username = sys.argv[1]
scope = 'user-read-private user-read-playback-state user-modify-playback-state'
SPOTIPY_REDIRECT_URI
try:
token=util.prompt_for_user_token(username, scope)
exept (AttributeError, JSONDecodeError):
os.remove(f".cache-{username}")
token=util.prompt_for_user_token(username, scope)
spotipy
#for spotipy.oauth2 import SpotifyClientCredentials
#import spotipy
#import sys
#import pprint
#import SpotifyClientCredentials
#from spotipy.oauth2 import SpotifyClientCredentials
if len(sys.argv) > 1:
search_str = sys.argv[1]
else:
search_str = 'Radiohead'
sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials())
result = sp.search(search_str)
pprint.pprint(result) |
#!/usr/bin/env python
# coding: utf-8
# In[12]:
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import random
# In[6]:
# Generate 9 folds with random pairs of patients for cross validation
def get_folds(data):
keynames = list(data.keys()) # get the key names that represent individual patients
random.shuffle(keynames) # shuffle them into a random order
folds = np.reshape(keynames, [9,2]) # pair them off into 9 folds
return folds
# In[7]:
## intakes training and test predictors, then scales them all down to be 0 mean and 1 variance (according to what is observed in training set)
def scale_data(xtrain,xtest):
scaler = StandardScaler() # generate scaler
xtrain.loc[:,:] = scaler.fit_transform(xtrain) # fit and transform on training
xtest.loc[:,:] = scaler.transform(xtest) # naively transform test data without fitting to it
return xtrain, xtest, scaler
# In[8]:
### takes in the by patient data and the folds by which to seperate, outputs training features/labels and testing features/labels for the current fold being tested
def get_train_test(data,folds,current_fold):
## get the test data
# extract patients in current fold for testing
test_data1 = data[folds[current_fold,0]]
test_data2 = data[folds[current_fold,1]]
# combine into one testing df
test_data = pd.concat([test_data1,test_data2],ignore_index=True)
# split into predictors and labels
test_y = test_data['BG_PH']
test_x = test_data.drop(columns=['BG_PH'])
## extract patients not in current fold for training
# drop the testing fold and flatten the patient array for iteration
train_pats = np.delete(folds,current_fold,axis=0).flatten()
train_data = pd.DataFrame() # holder df for training data
# combine into one training df
for pat in train_pats:
train_data = pd.concat([train_data,data[pat]], ignore_index=True)
train_y = train_data['BG_PH']
train_x = train_data.drop(columns=['BG_PH'])
train_x, test_x, scaler = scale_data(train_x,test_x)
return test_y, test_x, train_y, train_x, scaler
# In[9]:
def get_RMSE(actual,predicted):
return np.sqrt(np.mean((actual-predicted)**2))
# In[13]:
def PCA_transform(train_x, test_x):
train_x, test_x, __ = scale_data(train_x, test_x)
pca = PCA() # full PCA with no automatic truncation
train_pca = pca.fit_transform(train_x) # fit the pca and transform the training values
test_pca = pca.transform(test_x) # transform the test data without looking at it fr adjusting pca
print(train_x.head())
return train_pca, test_pca, pca.explained_variance_ratio_
# In[ ]:
|
"""
By default the read() method returns the whole text, but you can also specify how many characters you want to return.
"""
#read a 3 characters from the file.
f = open("car.txt","rt")
print(f.read(3))
f.close()
#read a line frome the file.
f = open("car.txt")
print(f.readline()) #it gives first line from the file.
print(f.readline()) #it gives second line from the file.
f.close()
#By looping throught file you can read whole file line by line.
f = open("car.txt")
for x in f:
print( x )
|
from datetime import date
class Student:
def __init__(self,id,name,dob,classification):
self.__studentid = id
self.__name = name
self.__dob = dob
self.__class = classification
self.__age = 0
self.__register = ''
def get_age(self):
return self.__age
def get_studentid(self):
return self.__studentid
def get_name(self):
return self.__name
def get_dob(self):
return self.__dob
def get_class(self):
return self.__class
def get_register(self):
return self.__register
def register(self):
if(self.__class == 'Senior'):
self.__register = "Seniors - 11/1 thru 11/3"
elif(self.__class == 'Junior'):
self.__register = "Juniors - 11/4 thru 11/6"
elif(self.__class == 'Sophomore'):
self.__register = "Sophormores - 11/7 thre 11/9"
elif(self.__class == 'Freshmen'):
self.__register = "Freshmen - 11/10 thru 11/12"
else:
self.__register = "classification not found"
def calc_age(self):
today = date.today()
bday = self.__dob.split('/')
bday_year = int(bday[2])
age = today.year - bday_year
self.__age = age
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 15:13:50 2019
# repair data for KES paper
# Purpose: get data from corpus
# | pure NPs | pure NPs + pure VBs |
# | syntactic NPs | syntactic NPs + cooccurred VBs |
# output:
l2_frequency(nounlistinFile) | 309,463 extracted terms
l2_frequency(verblistinFile) | 192,462 extracted terms
l2_frequency(nounlistinFile2) | 291,488 extracted terms #For all NPS: delete empty, single letter
l2_frequency(verblistinFile2) | 133,149 extracted terms # For all vbs: clean out the lemmas in stopwords
l2_frequency(NpsPerfileList) | 291,488 extracted NPs #no source data
l2_frequency(VBsPerfileList) | 133,149 extracted VBs #no source data
l2_frequency(NPs_SubjObj_PerfileList) | 113,140 extracted NPs
l2_frequency(VBs_SubjObj_PerfileList) | 116,617 extracted NPs
l2_frequency(VBs_SubjObj_PerfileList2) | 92,338 extracted NPs # For all vbs: clean out the lemmas in stopwords
l2_frequency(NPsVBs_PerfileList) | 480,394 extracted NPs
l2_frequency(NPsVBs_SubjObj_PerfileList) | 205,478 extracted NPs
s
@author: zoe
"""
import pandas as pd
import numpy as np
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
#from spacy.en import STOP_WORDS #from spacy.lang.en.stop_words import STOP_WORDS | version>2.0
nlp = spacy.load('en_core_web_sm')
#nlp = spacy.load('en')
from itertools import compress #to find first ture in boolean
import re
from collections import Counter
from fuzzywuzzy import fuzz
from itertools import chain #to unnest the elements in list
import xlsxwriter
################################## Target data ################################
# pure NPs
# pure NPs+ pure Verbs
# conpelte info
# 1. load data
file_wos_location = os.getcwd()+'/SourceData/Data.xlsx'
wosData = pd.read_excel(file_wos_location)
wosData_cs = wosData[wosData.Y1==0] #[6514 rows x 7 columns]
# 2. pure NPs
# from <LDA withOrout hypernym pattern.py> (2,3 mins)
# (2.1) recognize NPs with featurs
nounlistinFile = []
verblistinFile = []
noisy_pos_tags = ['SYM','NUM','PUNCT','SPACE','SCONJ','X']
for file in wosData_cs.Abstract:
doc = nlp(file)
verbRes = []
for token in doc:
if token.pos_ == "VERB":
verbRes.append((token.text, token.lemma_, token.dep_))
# print(token.text, token.pos_)
verblistinFile.append(verbRes)
nounRes =[]
for chunk in doc.noun_chunks:
i=[]
j=[]
indWoNoise=[]
nounPrase = ""
# 2.1 find index of stop words in NP
for words in chunk:
i.append(words.lower_ in STOP_WORDS) #solution3: pefect
j.append(words.pos_ in noisy_pos_tags)
indWoNoise=np.bitwise_not(np.bitwise_or(i,j))
# 2.2 delete stop words in NP, lemmatized head of NPs
for i,tokens in enumerate(list(compress(chunk, indWoNoise ))):
if i==0:
nounPrase=str(tokens.lemma_)
elif i!=0 and i != len(list(compress(chunk, indWoNoise )))-1:
if str(tokens)=="'s":
nounPrase=nounPrase+str(tokens)
else:
nounPrase=nounPrase+" "+str(tokens)
elif i!=0 and i == len(list(compress(chunk, indWoNoise )))-1:
if str(tokens)=="'s":
nounPrase=nounPrase+str(tokens)
else:
nounPrase=nounPrase+" "+str(tokens.lemma_)
nounRes.append((chunk.text, nounPrase, chunk.root.lemma_, chunk.root.dep_,chunk.root.head.lemma_))
nounlistinFile.append(nounRes)
np.save(os.getcwd()+'/intermediateRes/nounlistinFile1', nounlistinFile)
#nounlistinFile = np.load(os.getcwd()+'/intermediateRes/nounlistinFile1'+'.npy')
# l2_frequency(nounlistinFile) | 309,463 extracted terms
np.save(os.getcwd()+'/intermediateRes/verblistinFile', verblistinFile)
#verblistinFile = np.load(os.getcwd()+'/intermediateRes/verblistinFile'+'.npy')
# l2_frequency(verblistinFile) | 192,462 extracted terms
# (2.2)
# For all NPS: delete empty, single letter
nounlistinFile2=[]
for file in nounlistinFile:
nounRes2=[]
for item in file:
if len(item[1]) not in [0,1]:
nounRes2.append(item)
nounlistinFile2.append(nounRes2)
np.save(os.getcwd()+'/intermediateRes/nounlistinFile2', nounlistinFile2)
#nounlistinFile2 = np.load(os.getcwd()+'/intermediateRes/nounlistinFile2'+'.npy')
# l2_frequency(nounlistinFile2) | 291,488 extracted terms
# For all vbs: try to clean out the lemmas in stopwords
verblistinFile2 = []
for file in verblistinFile:
verbRes2=[]
for item in file:
if item[1] not in STOP_WORDS:
verbRes2.append(item)
verblistinFile2.append(verbRes2)
np.save(os.getcwd()+'/intermediateRes/verblistinFile2', verblistinFile2)
#verblistinFile2 = np.load(os.getcwd()+'/intermediateRes/verblistinFile2'+'.npy')
# l2_frequency(verblistinFile2) | 133,149 extracted terms
# (2.3) extract NPs and NPs with syntatic roles | extract VBs and VBs with syntactic roles (clean verbs)
NpsPerfileList = []
#VBsPerfileList = []
NPs_SubjObj_PerfileList = []
VBs_SubjObj_PerfileList = []
for files in nounlistinFile2:
NpsPerfile = []
# VBsPerfile = []
NPs_SubjObj_Perfile = []
VBs_SubjObj_Perfile = []
for words in files:
NpsPerfile.append(words[1])
# VBsPerfile.append(words[4])
if words[3] in ['dobj','nsubjpass','nsubj']: # NP only occurred with specific syntactic roles
NPs_SubjObj_Perfile.append(words[1])
if words[3] in ['dobj','nsubjpass','nsubj']: # verbs only occurred with specific syntactic roles
VBs_SubjObj_Perfile.append(words[4])
NpsPerfileList += [NpsPerfile]
# VBsPerfileList += [VBsPerfile]
NPs_SubjObj_PerfileList += [NPs_SubjObj_Perfile]
VBs_SubjObj_PerfileList += [VBs_SubjObj_Perfile]
VBsPerfileList = []
for files in verblistinFile2:
VBsPerfile = []
for words in files:
VBsPerfile.append(words[1])
VBsPerfileList += [VBsPerfile]
# clean the pointed VBs (nouns and stopwords are included) | verblistinFile2 | all verbs in files
VBs_SubjObj_PerfileList2 = copy.deepcopy(VBs_SubjObj_PerfileList)
for inx, file in enumerate(VBs_SubjObj_PerfileList):
intersect = np.intersect1d(file, [i[1] for i in verblistinFile2[inx]])
#only uique terms are stored in "intersect", but we need to store the frequency of terms
if intersect.size != 0:
VBs_SubjObj_PerfileList2[inx] = [i for i in file if i in intersect]
# (2.4) merge them into files | all NPs+VBs | syntactic roles NPs+VBs
NPsVBs_PerfileList = copy.deepcopy(NpsPerfileList)
for idx, files in enumerate(NpsPerfileList):
NPsVBs_PerfileList[idx].extend(VBsPerfileList[idx])
NPsVBs_SubjObj_PerfileList = copy.deepcopy(NPs_SubjObj_PerfileList)
for idx, files in enumerate(NPs_SubjObj_PerfileList):
NPsVBs_SubjObj_PerfileList[idx].extend(VBs_SubjObj_PerfileList2[idx])
# -------why we cannot use this explicit way (pointer from NPs) to extract verbs-----------
# problem: all verbs cannot be found in the way (NPs --> verbs)
# reason1: there are half of them is preposition
# reason2: not all verbs are pointed here
# reason3: for all the pointed terms, we can delete prepositions, but it is difficult to delete all nouns (no context, they are prone to be wrong tagged)
# for example: " rate" and "use" are tagged by Nouns
## for all VBs (and NPs) delete preposition; delete nouns
## input : STOP_WORDS | VBsPerfileList (if we calculate)
#
## test: whether NPs includes prepositions (answer: no)
#counter = 0 # the number of files including prepositions
#for files in NpsPerfileList:
# test = np.array(set(files)) # # STOP_WORDS is "set", NpsPerfileList is "list"
# if np.intersect1d(np.array(STOP_WORDS), test).size != 0:
# counter += 1
#
## purpose: for all VBs (and NPs) delete preposition
#VBsPerfileList2 = copy.deepcopy(VBsPerfileList)
#for inx, files in enumerate(VBsPerfileList):
# intersect = np.intersect1d(np.array(list(STOP_WORDS)), np.array(files),return_indices=True) # STOP_WORDS is "set", NpsPerfileList is "list"
# # result of intersection: |1st: value | 2nd: indice in 1st occurrence array| 3rd: indice in 2nd occurrence array |
# # the intersection between "sets" does not work, but "list" works
# if intersect[0].size != 0: # intersect.size can not work, because the results are tuples
## problem with "np.delete" # np.delete does not work
## np.delete(,intersect[2]) #the last occurrence in overlapping is slide [2]
# VBsPerfileList2[inx] = np.setdiff1d(np.array(VBsPerfileList2[inx]),intersect[0])
## transform from numpy array into list
#VBsPerfileList2 = [list(i) for i in VBsPerfileList2]
## l2_frequency(VBsPerfileList)- l2_frequency(VBsPerfileList2) 183,144 preposition terms are deleted
##np.save(os.getcwd()+'/intermediateRes/VBsPerfileList2', VBsPerfileList2)
##VBsPerfileList2 = np.load(os.getcwd()+'/intermediateRes/VBsPerfileList2'+'.npy')
## l2_frequency(VBsPerfileList2) | 108,344 extracted NPs
#
#
##purpose: for all VBs (and NPs) delete nouns
#VBsPerfileList3 = copy.deepcopy(VBsPerfileList2)
#for files in VBsPerfileList3[1:2]:
# for terms in files:
## print(terms)
# test = nlp(str(terms))[0]
## if test.pos_ == "NOUN":
# print(test.text, test.lemma_, test.pos_, test.tag_, test.dep_,test.shape_, test.is_alpha, test.is_stop)
np.save(os.getcwd()+'/intermediateRes/NpsPerfileList', NpsPerfileList)
#NpsPerfileList = np.load(os.getcwd()+'/intermediateRes/NpsPerfileList'+'.npy')
# l2_frequency(NpsPerfileList) | 291,488 extracted NPs
np.save(os.getcwd()+'/intermediateRes/VBsPerfileList', VBsPerfileList)
#VBsPerfileList = np.load(os.getcwd()+'/intermediateRes/VBsPerfileList'+'.npy')
# l2_frequency(VBsPerfileList) | 133,149 extracted NPs
np.save(os.getcwd()+'/intermediateRes/NPs_SubjObj_PerfileList', NPs_SubjObj_PerfileList)
#NPs_SubjObj_PerfileList = np.load(os.getcwd()+'/intermediateRes/NPs_SubjObj_PerfileList'+'.npy')
# l2_frequency(NPs_SubjObj_PerfileList) | 113,140 extracted NPs
np.save(os.getcwd()+'/intermediateRes/VBs_SubjObj_PerfileList', VBs_SubjObj_PerfileList)
#VBs_SubjObj_PerfileList = np.load(os.getcwd()+'/intermediateRes/VBs_SubjObj_PerfileList'+'.npy')
# l2_frequency(VBs_SubjObj_PerfileList) | 113,140 extracted NPs
np.save(os.getcwd()+'/intermediateRes/VBs_SubjObj_PerfileList2', VBs_SubjObj_PerfileList2)
#VBs_SubjObj_PerfileList2 = np.load(os.getcwd()+'/intermediateRes/VBs_SubjObj_PerfileList2'+'.npy')
# l2_frequency(VBs_SubjObj_PerfileList2) | 92,274 extracted NPs
np.save(os.getcwd()+'/intermediateRes/NPsVBs_PerfileList', NPsVBs_PerfileList)
#NPsVBs_PerfileList = np.load(os.getcwd()+'/intermediateRes/NPsVBs_PerfileList'+'.npy')
# l2_frequency(NPsVBs_PerfileList) | 424,637 extracted NPs
#TODO: solved: l2_frequency(NPsVBs_PerfileList) = l2_frequency(NpsPerfileList)+l2_frequency(VBsPerfileList)
# 291488 + 133149 = 424637
np.save(os.getcwd()+'/intermediateRes/NPsVBs_SubjObj_PerfileList', NPsVBs_SubjObj_PerfileList)
#NPsVBs_SubjObj_PerfileList = np.load(os.getcwd()+'/intermediateRes/NPsVBs_SubjObj_PerfileList'+'.npy')
# l2_frequency(NPsVBs_SubjObj_PerfileList) | 205,414 extracted NPs
|
# -*- coding: utf-8 -*-
# ------------------------------------------------------------
# TheGroove360 / XBMC Plugin
# Canale
# ------------------------------------------------------------
import os
import re
import time
import urllib
import urlparse
from core import httptools
from core import config
from core import logger
from core import scrapertools
from core import servertools
from core.item import Item
from core.tmdb import infoSod
__channel__ = "mondolunatico_due"
host = "http://mondolunatico.org/"
captcha_url = '%s/pass/CaptchaSecurityImages.php?width=100&height=40&characters=5' % host
def mainlist(item):
logger.info("[.mondolunatico_due] mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Cinema[/COLOR]",
action="peliculas",
url="%sstream/genre/al-cinema/" % host,
extra="movie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/popcorn_cinema_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Novita'[/COLOR]",
action="peliculas",
url="%sstream/movies/" % host,
extra="movie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/hd_movies_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film - [COLOR orange]Anno[/COLOR]",
action="cat_years",
url="%sstream/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_year_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Film & Serie TV - [COLOR orange]Categorie[/COLOR]",
action="categorias",
url="%sstream/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genres_P.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV - [COLOR orange]Aggiornate[/COLOR]",
action="peliculas_tv",
extra="serie",
url="%sstream/tvshows/" % host,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/new_tvshows_P.png"),
Item(channel=__channel__,
title="[COLOR orange]Cerca Serie...[/COLOR]",
action="search",
extra="serie",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca ...[/COLOR]",
action="search",
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/search_P.png")]
return itemlist
# ==================================================================================================================================================
def categorias(item):
logger.info("[.mondolunatico_due] categorias")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.get_match(data, '<h2>Film Per Genere</h2>(.*?)</li></ul></nav></div>')
# Extrae las entradas
patron = '<li[^>]+><a href="([^"]+)"[^>]>([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
if "*" in scrapedtitle:
scrapedtitle = "[COLOR orange]" + scrapedtitle + "[/COLOR]"
if "&" in scrapedtitle or "Mystery" in scrapedtitle or "Kids" in scrapedtitle:
scrapedtitle = scrapedtitle + " ([COLOR orange]Film & Serie TV[/COLOR])"
else:
scrapedtitle = "[COLOR azure]" + scrapedtitle + "[/COLOR]"
if "Download" in scrapedtitle or "Al Cinema" in scrapedtitle or "Reality" in scrapedtitle or "History" in scrapedtitle:
continue
scrapedtitle=scrapedtitle.replace("televisione film", "Film TV").replace("*", "").strip()
scrapedtitle=scrapedtitle.replace("Richieste", "Film piu' Richiesti").replace("SubITA", "Film Sub-ITA").strip()
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/genre_P.png",
folder=True))
return itemlist
# ==================================================================================================================================================
def cat_years(item):
logger.info("[.mondolunatico_due] years")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
bloque = scrapertools.get_match(data, '<h2>Release year</h2>(.*?)</li></ul></nav></div>')
# Extrae las entradas
patron = '<li><a href="([^"]+)">(.*?)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(bloque)
for scrapedurl, scrapedtitle in matches:
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/movie_year_P.png",
folder=True))
return itemlist
# ==================================================================================================================================================
def search(item, texto):
logger.info("[.mondolunatico_due] " + item.url + " search " + texto)
item.url = host + "stream/?s=" + texto
try:
#if item.extra == "movie":
return pelis_src(item)
# Continua la ricerca in caso di errore
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
# ==================================================================================================================================================
def pelis_src(item):
logger.info("[streamondemand-pureita mondolunatico_new] pelis_src")
itemlist = []
numpage = 14
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<div class="thumbnail animation-2">\s*<a href="([^"]+)">\s*<img src="([^"]+)" alt="(.*?)" />.*?'
patron += '<span class="rating">([^<]+)<\/span>.*?'
patron += '<span class="year">([^<]+)</span>.*?'
patron += '<div class="contenido">\s*<p>([^"]+)</p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedurl, scrapedthumbnail, scrapedtitle, rating, year, scrapedplot) in enumerate(matches):
if (p - 1) * numpage > i: continue
if i >= p * numpage: break
title = scrapertools.decodeHtmlentities(scrapedtitle)
rating=rating.replace("IMDb ", "")
rating=" ([COLOR yellow]" + rating + "[/COLOR])"
if year:
date = " (" + year + ")"
years=" ([COLOR yellow]" + year + "[/COLOR])"
if year in scrapedtitle:
years =""
date=""
if "tvshows" in scrapedurl:
type=" ([COLOR yellow]Serie TV[/COLOR])"
else:
type=""
itemlist.append(infoSod(
Item(channel=__channel__,
extra=item.extra,
action="episodios" if "tvshows" in scrapedurl else "findvideos",
title="[COLOR azure]" + title + "[/COLOR]" + type + years + rating,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
fulltitle=title + date,
show=title,
folder=True), tipo="tv" if "tvshows" in scrapedurl else "movie"))
# Extrae el paginador
if len(matches) >= p * numpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="pelis_src",
title="[COLOR orange]Successivi >>[/COLOR]",
url=scrapedurl,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
folder=True))
else:
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" ><span class="icon-chevron-right">')
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="pelis_src",
title="[COLOR orange]Successivi >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ==================================================================================================================================================
def peliculas(item):
logger.info("[streamondemand-pureita mondolunatico_new] peliculas")
itemlist = []
numpage = 14
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<div class="poster">\s*<img src="([^"]+)" \s*alt="([^"]+)">\s*'
patron += '<div[^>]+>[^>]+></span>\s*([^<]+)<\/div>\s*[^>]+>\s*[^>]+>(.*?)<.*?'
patron += 'a href="([^"]+)">[^>]+><\/div><\/a>.*?<\/a>\s*<\/h3>\s*<span>([^<]+)<\/span>.*?'
patron += '<div class="texto">(.*?)</div>'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedthumbnail, scrapedtitle, rating, quality, scrapedurl, year, scrapedplot ) in enumerate(matches):
if (p - 1) * numpage > i: continue
if i >= p * numpage: break
scrapedtitle = scrapedtitle.replace("+", "piu").replace("&", "e")
if "Fichier" in scrapedtitle or "***" in scrapedtitle:
continue
if quality ==" ":
quality=""
if quality:
quality=quality.replace(".", " ").strip()
quality = " ([COLOR yellow]" + quality + "[/COLOR])"
else:
quality=""
if rating =="0":
rating=""
if rating:
rating=rating.replace(",", ".").strip()
rating = " ([COLOR yellow]" + rating + "[/COLOR])"
else:
rating=""
if year:
years = " ([COLOR yellow]" + year + "[/COLOR])"
date =" ("+year+")"
else:
years =""
date =""
if year in scrapedtitle:
years =""
date=""
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoSod(
Item(channel=__channel__,
extra=item.extra,
action="episodios" if "tvshows" in scrapedurl else "findvideos",
contentType="tv" if "tvshows" in scrapedurl else "movie",
title="[COLOR azure]" + title + "[/COLOR]" + years + quality + rating,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title + date,
show=title,
plot=scrapedplot,
folder=True), tipo="tv" if "tvshows" in scrapedurl else "movie"))
# Extrae el paginador
if len(matches) >= p * numpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="peliculas",
title="[COLOR orange]Successivi >>[/COLOR]",
url=scrapedurl,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
folder=True))
else:
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" ><span class="icon-chevron-right">')
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivi >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ==================================================================================================================================================
def peliculas_list(item):
logger.info("[streamondemand-pureita mondolunatico_new] peliculas_list")
itemlist = []
numpage = 14
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = '<b><a href="([^"]+)">([^<]+)<\/a></li></b><br>'
matches = re.compile(patron, re.DOTALL).findall(data)
scrapedthumbnail = ""
scrapedplot = ""
for i, (scrapedurl, scrapedtitle) in enumerate(matches):
if (p - 1) * numpage > i: continue
if i >= p * numpage: break
title = scrapertools.decodeHtmlentities(scrapedtitle)
itemlist.append(infoSod(
Item(channel=__channel__,
extra=item.extra,
action="episodios",
contentType="tv",
title="[COLOR azure]" + title + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title,
show=title,
plot=scrapedplot,
folder=True), tipo='tv'))
# Extrae el paginador
if len(matches) >= p * numpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="peliculas_list",
title="[COLOR orange]Successivi >>[/COLOR]",
url=scrapedurl,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
folder=True))
return itemlist
# ==================================================================================================================================================
def peliculas_tv(item):
logger.info("[streamondemand-pureita mondolunatico_new] peliculas_tv")
itemlist = []
numpage = 14
p = 1
if '{}' in item.url:
item.url, p = item.url.split('{}')
p = int(p)
# Descarga la pagina
data = httptools.downloadpage(item.url).data
# Extrae las entradas
patron = '<div class="poster">\s*<img src="([^"]+)" \s*'
patron += 'alt="([^"]+)">\s*<div[^>]+>[^>]+><\/span>\s*([^<]+)<\/div>\s*'
patron += '[^>]+>\s*<\/div>\s*<a href="([^"]+)">.*?'
patron += '<\/h3>\s*<span>([^<]+)<\/span>.*?<div class="texto">(.*?)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for i, (scrapedthumbnail, scrapedtitle, rating, scrapedurl, year, scrapedplot ) in enumerate(matches):
if (p - 1) * numpage > i: continue
if i >= p * numpage: break
scrapedtitle=scrapedtitle.replace("(Cliccate La Scheda Info per vedere i link)", "")
scrapedtitle=scrapedtitle.replace("’", "'").replace("Flash", "The Flash").strip()
title = scrapertools.decodeHtmlentities(scrapedtitle)
if rating =="0":
rating=""
if rating:
rating=" ([COLOR yellow]" + rating + "[/COLOR])"
else:
rating=""
if year:
years = " ([COLOR yellow]" + year + "[/COLOR])"
date =" ("+year+")"
else:
years =""
date =""
if year in scrapedtitle:
years =""
date=""
itemlist.append(infoSod(
Item(channel=__channel__,
extra=item.extra,
action="episodios",
contentType="tv",
title="[COLOR azure]" + title + "[/COLOR]" + years + rating,
url=scrapedurl,
thumbnail=scrapedthumbnail,
fulltitle=title + date,
show=title,
plot=scrapedplot,
folder=True), tipo='tv'))
# Extrae el paginador
if len(matches) >= p * numpage:
scrapedurl = item.url + '{}' + str(p + 1)
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="peliculas_tv",
title="[COLOR orange]Successivi >>[/COLOR]",
url=scrapedurl,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png",
folder=True))
else:
next_page = scrapertools.find_single_match(data, '<a href="([^"]+)" ><span class="icon-chevron-right">')
if next_page != "":
itemlist.append(
Item(channel=__channel__,
action="peliculas_tv",
title="[COLOR orange]Successivi >>[/COLOR]",
url=next_page,
thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/channels_icon_pureita/next_1.png"))
return itemlist
# ==================================================================================================================================================
def episodios(item):
logger.info("streamondemand.mondolunatico_new episodios")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url).data
html = []
for i in range(2):
patron = 'href="(https?://www\.keeplinks\.co/p92/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
_headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html.append(httptools.downloadpage(keeplinks, headers=_headers).data)
patron = r'="(%s/pass/index\.php\?ID=[^"]+)"' % host
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
tmp = httptools.downloadpage(scrapedurl).data
if 'CaptchaSecurityImages.php' in tmp:
# Descarga el captcha
img_content = httptools.downloadpage(captcha_url).data
captcha_fname = os.path.join(config.get_data_path(), __channel__ + "captcha.img")
with open(captcha_fname, 'wb') as ff:
ff.write(img_content)
from platformcode import captcha
keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
keyb.doModal()
if keyb.isConfirmed():
captcha_text = keyb.getText()
post_data = urllib.urlencode({'submit1': 'Invia', 'security_code': captcha_text})
tmp = httptools.downloadpage(scrapedurl, post=post_data).data
try:
os.remove(captcha_fname)
except:
pass
html.append(tmp)
data = '\n'.join(html)
encontrados = set()
patron = '<p><a href="([^"]+?)">([^<]+?)</a></p>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if "mondolunatico.org/goto" in scrapedtitle:
scrapedtitle="Lista episodi in Fase di Ripristino >>"
else:
scrapedtitle = scrapedtitle.split('/')[-1]
if not scrapedtitle or scrapedtitle in encontrados: continue
encontrados.add(scrapedtitle)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).title()
scrapedtitle=scrapedtitle.replace(".-.", " ").replace("Mkv", "").replace(".", " ").replace("Mp4", "").replace("Ac3", "").replace("Soft","")
scrapedtitle=scrapedtitle.replace("By Bloody", "").replace("Avi", "").replace("Xvid", "").replace("Dvdrip", "").replace("_"," ").replace("Spft","")
scrapedtitle=scrapedtitle.replace("Internal", "").replace("%2520", " ").replace("Html", "").replace("Dvdrip", "").strip()
if "=" in scrapedtitle:
continue
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="findvideos",
title=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumbnail,
fulltitle=item.fulltitle,
plot=item.plot,
show=item.show))
patron = '<a href="([^"]+)" target="_blank" class="selecttext live">([^<]+)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
if "mondolunatico.org/goto" in scrapedtitle:
scrapedtitle="Lista Episodi in Fase di Ripristino >>"
else:
scrapedtitle = scrapedtitle.split('/')[-1]
if not scrapedtitle or scrapedtitle in encontrados: continue
encontrados.add(scrapedtitle)
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).title()
scrapedtitle=scrapedtitle.replace(".-.", " ").replace("Mkv", "").replace(".", " ").replace("Mp4", "").replace("Ac3", "").replace("_"," ")
scrapedtitle=scrapedtitle.replace("By Bloody", "").replace("Avi", "").replace("Xvid", "").replace("Dvdrip-", "").replace("Soft-", "").replace("Spft-","")
scrapedtitle=scrapedtitle.replace("Internal", "").replace("%2520", " ").replace("Html", "").replace("Dvdrip", "").strip()
if "=" in scrapedtitle:
continue
itemlist.append(
Item(channel=__channel__,
extra=item.extra,
action="findvideos",
title=scrapedtitle,
url=scrapedurl,
thumbnail=item.thumbnail,
fulltitle=item.fulltitle,
plot=item.plot,
show=item.show))
return itemlist
# ==================================================================================================================================================
def findvideos_tv(item):
logger.info("[streamondemand-pureita mondolunatico_new] findvideos_tv")
itemlist = []
# Descarga la pagina
data = item.url if item.extra == 'serie' else httptools.downloadpage(item.url).data
# Estrae i contenuti
patron = r'noshade>(.*?)<br>.*?<a href="(%s/pass/index\.php\?ID=[^"]+)"' % host
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedtitle, scrapedurl in matches:
scrapedtitle = scrapedtitle.replace('*', '').replace('Streaming', '').strip()
title = '%s - [%s]' % (scrapedtitle, item.title)
itemlist.append(
Item(channel=__channel__,
action="play",
title="[[COLOR orange]" + scrapedtitle + "[/COLOR]] -" + item.title,
url=scrapedurl,
thumbnail=item.thumbnail,
fulltitle=item.fulltitle,
show=item.show,
server='captcha',
folder=False))
patron = 'href="(%s/stream/links/\d+/)"' % host
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
data += httptools.downloadpage(scrapedurl).data
### robalo fix obfuscator - start ####
patron = 'href="(https?://www\.keeplinks\.(?:co|eu)/p92/([^"]+))"'
matches = re.compile(patron, re.DOTALL).findall(data)
for keeplinks, id in matches:
headers = [['Cookie', 'flag[' + id + ']=1; defaults=1; nopopatall=' + str(int(time.time()))],
['Referer', keeplinks]]
html = httptools.downloadpage(keeplinks, headers=headers).data
data += str(scrapertools.find_multiple_matches(html, '</lable><a href="([^"]+)" target="_blank"'))
### robalo fix obfuscator - end ####
patron = 'src="([^"]+)" frameborder="0"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
data += httptools.downloadpage(scrapedurl).data
for videoitem in servertools.find_video_items(data=data):
servername = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(['[[COLOR orange]' + servername.capitalize() + '[/COLOR]] - ', item.title])
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = __channel__
itemlist.append(videoitem)
return itemlist
# ==================================================================================================================================================
def findvideos(item):
logger.info("[streamondemand-pureita mondolunatico_new] findvideos")
# Descarga la pagina
data = httptools.downloadpage(item.url).data
patron = '<li id=[^=]+="dooplay_player_option[^=]+="([^"]+)" data-nume="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
for id, serv in matches:
base = "%sstream/wp-admin/admin-ajax.php" % host
player = urllib.urlencode({'action': 'doo_player_ajax', 'post': id, 'nume': serv})
data += httptools.downloadpage(base, post=player).data
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
servername = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(['[[COLOR orange]' + servername.capitalize() + '[/COLOR]] - ', item.title])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.plot = item.plot
videoitem.channel = __channel__
return itemlist
# ==================================================================================================================================================
def play(item):
logger.info("[streamondemand-pureita mondolunatico_new] play")
itemlist = []
if item.server == 'captcha':
headers = [['Referer', item.url]]
# Descarga la pagina
data = httptools.downloadpage(item.url, headers=headers).data
if 'CaptchaSecurityImages.php' in data:
# Descarga el captcha
img_content = httptools.downloadpage(captcha_url, headers=headers).data
captcha_fname = os.path.join(config.get_data_path(), __channel__ + "captcha.img")
with open(captcha_fname, 'wb') as ff:
ff.write(img_content)
from platformcode import captcha
keyb = captcha.Keyboard(heading='', captcha=captcha_fname)
keyb.doModal()
if keyb.isConfirmed():
captcha_text = keyb.getText()
post_data = urllib.urlencode({'submit1': 'Invia', 'security_code': captcha_text})
data = httptools.downloadpage(item.url, post=post_data, headers=headers).data
try:
os.remove(captcha_fname)
except:
pass
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
videoitem.title = item.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = __channel__
else:
itemlist.append(item)
return itemlist
# ==================================================================================================================================================
# ==================================================================================================================================================
# ==================================================================================================================================================
def findvideos_movie(item):
itemlist = []
data = httptools.downloadpage(item.url).data
itemlist.extend(servertools.find_video_items(data=data))
for videoitem in itemlist:
servername = re.sub(r'[-\[\]\s]+', '', videoitem.title)
videoitem.title = "".join(['[[COLOR orange]' + servername.capitalize() + '[/COLOR]] - ', item.title])
videoitem.fulltitle = item.fulltitle
videoitem.show = item.show
videoitem.thumbnail = item.thumbnail
videoitem.plot = item.plot
videoitem.channel = __channel__
return itemlist
# ==================================================================================================================================================
def findvideos_x(item):
logger.info("[streamondemand-pureita mondolunatico_new] findvideos_x")
itemlist = []
# Descarga la pagina
data = httptools.downloadpage(item.url, headers=headers).data
# Estrae i contenuti
patron = 'src="([^"]+)" frameborder="0"'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl in matches:
if "dir?" in scrapedurl:
data += httptools.downloadpage(scrapedurl).url
else:
data += httptools.downloadpage(scrapedurl).data
for videoitem in servertools.find_video_items(data=data):
videoitem.title = "".join(['[COLOR orange]' + videoitem.title + ' [COLOR azure]- ' + item.title+ '[/COLOR]'])
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = __channel__
itemlist.append(videoitem)
return itemlist
|
# %%
from recognize.source.contours import findContours
from recognize.source.points import firstPoint, secondPoint, thirdPoint
import numpy as np
from math import acos, pi, sqrt
# %%
def findPoints(image):
"""提取三个特征点
1.通过调用contour.py中的findContours()来识别轮廓特征。\n
2.通过调用point.py中的firstPoint(), secondPoint(), thirdPoint()来从轮廓特征中识别三个关键点。
Parameters
----------
image : numpy.array
RGB图像
Returns
-------
list
shape:(3,2,2)\n
[[left point, right point],\n
[left point, right point],\n
[left point, right point]]
"""
try:
contours = findContours(image)
except:
return [[[0,0],[0,0]],
[[0,0],[0,0]],
[[0,0],[0,0]]]
points = []
try:
xL,yL = firstPoint(contours[0][0])
except:
xL,yL = 0, 0
try:
xR,yR = firstPoint(contours[1][0])
except:
xR,yR = 0, 0
points.append([[xL,yL],[xR,yR]])
try:
xL,yL = secondPoint(contours[0][1])
except:
xL,yL = 0, 0
try:
xR,yR = secondPoint(contours[1][1])
except:
xR,yR = 0, 0
points.append([[xL,yL],[xR,yR]])
try:
xL,yL = thirdPoint(contours[0][0])
except:
xL,yL = 0, 0
try:
xR,yR = thirdPoint(contours[1][0])
except:
xR,yR = 0, 0
points.append([[xL,yL],[xR,yR]])
return points #(3,2,2)
# %%
def angleCalculate(points):
"""计算三个关键点连成的两条线段组成的角度
1.将关键点坐标组成两个向量,利用向量相关公式计算角度。
Parameters
----------
points : numpy.array
关键点的像素点坐标
Returns
-------
int
"""
points = np.asarray(points)
angle = [0, 0]
vl1 = points[1][0]-points[0][0]
print("vl1:{}".format(vl1))
vl2 = points[2][0]-points[1][0]
print("vl2:{}".format(vl2))
vr1 = points[1][1]-points[0][1]
print("vr1:{}".format(vr1))
vr2 = points[2][1]-points[1][1]
print("vr2:{}".format(vr2))
vl11 = vl1[0]*vl1[0] + vl1[1]*vl1[1]
vl22 = vl2[0]*vl2[0] + vl2[1]*vl2[1]
vl12 = vl1[0]*vl2[0] + vl1[1]*vl2[1]
try:
angle[0] = 180 - int(acos(vl12/sqrt(vl11)/sqrt(vl22))/pi*180)
except:
print("Zero divisor!")
vr11 = vr1[0]*vr1[0] + vr1[1]*vr1[1]
vr22 = vr2[0]*vr2[0] + vr2[1]*vr2[1]
vr12 = vr1[0]*vr2[0] + vr1[1]*vr2[1]
try:
angle[1] = 180 - int(acos(vr12/sqrt(vr11)/sqrt(vr22))/pi*180)
except:
print("Zero divisor!")
return angle
# %%
def predict(points):
"""预测是否存在足内旋或者足外旋
1.通过调用angleCalculate()来计算角度。\n
Parameters
----------
points : numpy.array
关键点的像素点坐标
Returns
-------
int
str
"""
angle = angleCalculate(points)
prediction = ["Normal", "Normal"]
if angle[0]>0 and angle[0] < 170:
prediction[0] = "Pronation"
if angle[0] > 190:
prediction[0] = "Supination"
if angle[1]>0 and angle[1] < 170:
prediction[1] = "Pronation"
if angle[1] > 190:
prediction[1] = "Supination"
return angle, prediction
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request, FormRequest
from Scrapy_WDZJ.items import *
import datetime
from Scrapy_WDZJ.tools.strtools import *
import requests
from lxml import etree
import json
import math
import re
from scrapy.loader import ItemLoader
import time
import logging
class PlatformEvaluation_Part1Spider(scrapy.Spider):
name = 'PlatformEvaluation_Part1'
start_urls = ['https://www.wdzj.com/dangan/search?filter']
def parse(self, response):
"""
:param response:
:return:
"""
for platId in range(40,60):
url = 'https://www.wdzj.com/plat-center/platReview/getPlatReviewList?platId={0}¤tPage=1&pageSize=2&orderType=0'.format(platId)
yield Request(url, meta={'platId': platId}, callback=self.parse_PlatformEvaluation_Part1)
def parse_PlatformEvaluation_Part1(self, response):
j_data = json.loads(response.body)
item = PlatformEvaluation_Part1()
item['platId'] = response.meta['platId']
item['good'] = j_data['data']['platReviewEvaluation']['good']
item['bad'] = j_data['data']['platReviewEvaluation']['bad']
item['midd'] = j_data['data']['platReviewEvaluation']['midd']
yield item
|
from math import log
def term_frequency(term, doc):
res = doc.count(term) / len(doc)
if not res :
return 0
return 1 + log(res)
def tf_idf(term, doc, idf):
return term_frequency(term, doc) * idf
def doc_len(doc):
return len(list(filter(lambda x: x != ' ', doc)))
def BM25(term, doc, idf, params=[2, 0.75], L=0):
tf = term_frequency(term, doc)
if L == 0:
L = doc_len(doc)
return tf * idf / (tf + params[0] * (params[1] + L * (1 - params[1])))
def count_inverse(query, passage):
inv = 0
ipas = list(map(lambda x: query.index(x[0]), passage))
for i in range(len(passage)):
for j in range(i + 1, len(passage)):
if ipas[i] > ipas[j]:
inv += 1
return inv
def passage_tfidf(passage, doc, idfs):
return sum(list(map(lambda x: tf_idf(passage[x][0], doc, idfs[x]), range(len(passage)))))
def passage_algorithm(doc, terms, parameters):
#Extract passages
passages = []
passage = {}
for inddocterm in range(len(doc)):
if doc[inddocterm] in terms:
passage[doc[inddocterm]] = inddocterm
passages.append([[i, passage[i]] for i in passage.keys()])
#Compute metrics
L = float(doc_len(doc))
m_value = 0
if len(passages) == 0:
return [0, []]
best_pas = passages[0]
for passage in passages:
metric = []
#metric.append(passage_tfidf(passage, doc, idfs))
metric.append(len(passage) / len(terms))
metric.append(1 -min(passage, key=lambda x: x[1])[1] / L)
metric.append(1 -(max(passage, key=lambda x: x[1])[1] - min(passage, key=lambda x: x[1])[1]) / L)
val = sum([metric[i] * parameters[i] for i in range(len(metric))])
if m_value < val:
m_value = val
best_pas = passage
lit = min(best_pas, key=lambda x: x[1])[1]
rit = max(best_pas, key=lambda x : x[1])[1] + 1
return m_value, doc[lit:rit]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Spotify AB
import unittest
from graphwalker import tgf
class TestTGF(unittest.TestCase):
data0 = """\
n0 Start
n1 v_a
n2 v_b
n3 v_c
#
n0 n1 e_zero
n1 n2 e_one
n2 n3 e_two
"""
def test_example_abz(self):
verts, edges = tgf.deserialize(self.data0)
self.assertEqual(
sorted(verts),
[['n0', 'Start'],
['n1', 'v_a'],
['n2', 'v_b'],
['n3', 'v_c']])
self.assertEqual(
edges,
[('e_zero', 'e_zero', 'n0', 'n1'),
('e_one', 'e_one', 'n1', 'n2'),
('e_two', 'e_two', 'n2', 'n3')])
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""SuperBench CLI command handler."""
from pathlib import Path
from knack.util import CLIError
from omegaconf import OmegaConf
import superbench
from superbench.runner import SuperBenchRunner
from superbench.executor import SuperBenchExecutor
from superbench.common.utils import create_sb_output_dir, get_sb_config
def check_argument_file(name, file):
"""Check file path in CLI arguments.
Args:
name (str): argument name.
file (str): file path.
Returns:
str: Absolute file path if it exists.
Raises:
CLIError: If file does not exist.
"""
if file:
if not Path(file).exists():
raise CLIError('{} {} does not exist.'.format(name, file))
return str(Path(file).resolve())
return file
def split_docker_domain(name):
"""Split Docker image name to domain and remainder part.
Ported from https://github.com/distribution/distribution/blob/v2.7.1/reference/normalize.go#L62-L76.
Args:
name (str): Docker image name.
Returns:
str: Docker registry domain.
str: Remainder part.
"""
legacy_default_domain = 'index.docker.io'
default_domain = 'docker.io'
i = name.find('/')
domain, remainder = '', ''
if i == -1 or ('.' not in name[:i] and ':' not in name[:i] and name[:i] != 'localhost'):
domain, remainder = default_domain, name
else:
domain, remainder = name[:i], name[i + 1:]
if domain == legacy_default_domain:
domain = default_domain
if domain == default_domain and '/' not in remainder:
remainder = 'library/{}'.format(remainder)
return domain, remainder
def process_config_arguments(config_file=None, config_override=None, output_dir=None):
"""Process configuration arguments.
Args:
config_file (str, optional): Path to SuperBench config file. Defaults to None.
config_override (str, optional): Extra arguments to override config_file,
following [Hydra syntax](https://hydra.cc/docs/advanced/override_grammar/basic). Defaults to None.
output_dir (str, optional): Path to output directory. Defaults to None.
Returns:
DictConfig: SuperBench config object.
str: Dir for output.
Raises:
CLIError: If input arguments are invalid.
"""
config_file = check_argument_file('config_file', config_file)
# SuperBench config
sb_config = get_sb_config(config_file)
if config_override:
sb_config_from_override = OmegaConf.from_dotlist(config_override)
sb_config = OmegaConf.merge(sb_config, sb_config_from_override)
# Create output directory
sb_output_dir = create_sb_output_dir(output_dir)
return sb_config, sb_output_dir
def process_runner_arguments(
docker_image='superbench/superbench',
docker_username=None,
docker_password=None,
host_file=None,
host_list=None,
host_username=None,
host_password=None,
private_key=None,
output_dir=None,
config_file=None,
config_override=None
):
"""Process runner related arguments.
Args:
docker_image (str, optional): Docker image URI. Defaults to superbench/superbench:latest.
docker_username (str, optional): Docker registry username if authentication is needed. Defaults to None.
docker_password (str, optional): Docker registry password if authentication is needed. Defaults to None.
host_file (str, optional): Path to Ansible inventory host file. Defaults to None.
host_list (str, optional): Comma separated host list. Defaults to None.
host_username (str, optional): Host username if needed. Defaults to None.
host_password (str, optional): Host password or key passphase if needed. Defaults to None.
private_key (str, optional): Path to private key if needed. Defaults to None.
output_dir (str, optional): Path to output directory. Defaults to None.
config_file (str, optional): Path to SuperBench config file. Defaults to None.
config_override (str, optional): Extra arguments to override config_file,
following [Hydra syntax](https://hydra.cc/docs/advanced/override_grammar/basic). Defaults to None.
Returns:
DictConfig: SuperBench config object.
DictConfig: Docker config object.
DictConfig: Ansible config object.
str: Dir for output.
Raises:
CLIError: If input arguments are invalid.
"""
if bool(docker_username) != bool(docker_password):
raise CLIError('Must specify both docker_username and docker_password if authentication is needed.')
if not (host_file or host_list):
raise CLIError('Must specify one of host_file or host_list.')
host_file = check_argument_file('host_file', host_file)
private_key = check_argument_file('private_key', private_key)
# Docker config
docker_config = OmegaConf.create(
{
'image': docker_image,
'username': docker_username,
'password': docker_password,
'registry': split_docker_domain(docker_image)[0],
}
)
# Ansible config
ansible_config = OmegaConf.create(
{
'host_file': host_file,
'host_list': host_list,
'host_username': host_username,
'host_password': host_password,
'private_key': private_key,
}
)
sb_config, sb_output_dir = process_config_arguments(
config_file=config_file,
config_override=config_override,
output_dir=output_dir,
)
return docker_config, ansible_config, sb_config, sb_output_dir
def version_command_handler():
"""Print the current SuperBench tool version.
Returns:
str: current SuperBench tool version.
"""
return superbench.__version__
def exec_command_handler(config_file=None, config_override=None, output_dir=None):
"""Run the SuperBench benchmarks locally.
Args:
config_file (str, optional): Path to SuperBench config file. Defaults to None.
config_override (str, optional): Extra arguments to override config_file,
following [Hydra syntax](https://hydra.cc/docs/advanced/override_grammar/basic). Defaults to None.
output_dir (str, optional): Path to output directory. Defaults to None.
Raises:
CLIError: If input arguments are invalid.
"""
sb_config, sb_output_dir = process_config_arguments(
config_file=config_file,
config_override=config_override,
output_dir=output_dir,
)
executor = SuperBenchExecutor(sb_config, sb_output_dir)
executor.exec()
def deploy_command_handler(
docker_image='superbench/superbench',
docker_username=None,
docker_password=None,
host_file=None,
host_list=None,
host_username=None,
host_password=None,
output_dir=None,
private_key=None
):
"""Deploy the SuperBench environments to all given nodes.
Deploy SuperBench environments on all nodes, including:
1. check drivers
2. install required system dependencies
3. install Docker and container runtime
4. pull Docker image
Args:
docker_image (str, optional): Docker image URI. Defaults to superbench/superbench:latest.
docker_username (str, optional): Docker registry username if authentication is needed. Defaults to None.
docker_password (str, optional): Docker registry password if authentication is needed. Defaults to None.
host_file (str, optional): Path to Ansible inventory host file. Defaults to None.
host_list (str, optional): Comma separated host list. Defaults to None.
host_username (str, optional): Host username if needed. Defaults to None.
host_password (str, optional): Host password or key passphase if needed. Defaults to None.
output_dir (str, optional): Path to output directory. Defaults to None.
private_key (str, optional): Path to private key if needed. Defaults to None.
Raises:
CLIError: If input arguments are invalid.
"""
docker_config, ansible_config, sb_config, sb_output_dir = process_runner_arguments(
docker_image=docker_image,
docker_username=docker_username,
docker_password=docker_password,
host_file=host_file,
host_list=host_list,
host_username=host_username,
host_password=host_password,
output_dir=output_dir,
private_key=private_key,
)
runner = SuperBenchRunner(sb_config, docker_config, ansible_config, sb_output_dir)
runner.deploy()
def run_command_handler(
docker_image='superbench/superbench',
docker_username=None,
docker_password=None,
host_file=None,
host_list=None,
host_username=None,
host_password=None,
output_dir=None,
private_key=None,
config_file=None,
config_override=None
):
"""Run the SuperBench benchmarks distributedly.
Run all benchmarks on given nodes.
Args:
docker_image (str, optional): Docker image URI. Defaults to superbench/superbench:latest.
docker_username (str, optional): Docker registry username if authentication is needed. Defaults to None.
docker_password (str, optional): Docker registry password if authentication is needed. Defaults to None.
host_file (str, optional): Path to Ansible inventory host file. Defaults to None.
host_list (str, optional): Comma separated host list. Defaults to None.
host_username (str, optional): Host username if needed. Defaults to None.
host_password (str, optional): Host password or key passphase if needed. Defaults to None.
output_dir (str, optional): Path to output directory. Defaults to None.
private_key (str, optional): Path to private key if needed. Defaults to None.
config_file (str, optional): Path to SuperBench config file. Defaults to None.
config_override (str, optional): Extra arguments to override config_file,
following [Hydra syntax](https://hydra.cc/docs/advanced/override_grammar/basic). Defaults to None.
Raises:
CLIError: If input arguments are invalid.
"""
docker_config, ansible_config, sb_config, sb_output_dir = process_runner_arguments(
docker_image=docker_image,
docker_username=docker_username,
docker_password=docker_password,
host_file=host_file,
host_list=host_list,
host_username=host_username,
host_password=host_password,
output_dir=output_dir,
private_key=private_key,
config_file=config_file,
config_override=config_override,
)
runner = SuperBenchRunner(sb_config, docker_config, ansible_config, sb_output_dir)
runner.run()
|
from django.conf.urls import url
from django.views.generic import (YearArchiveView, MonthArchiveView,
DayArchiveView)
from django.views.decorators.cache import cache_page
from news.models import Story
from news.views import StoryDetailView, StoryListView
urlpatterns = [
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/(?P<slug>[\w-]+)/$',
StoryDetailView.as_view()),
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/(?P<day>\w{1,2})/$',
cache_page(60 * 15)(
DayArchiveView.as_view(
queryset=Story.live.all(),
date_field="pub_date",
template_name="news/story_archive_day.html",
context_object_name="story_list",))),
url(r'^(?P<year>\d{4})/(?P<month>[a-z]{3})/$',
cache_page(60 * 15)(
MonthArchiveView.as_view(
queryset=Story.live.all(),
date_field="pub_date",
template_name="news/story_archive_month.html",
context_object_name="story_list",))),
url(r'^(?P<year>\d{4})/$', cache_page(60 * 15)(YearArchiveView.as_view(
queryset=Story.live.all(),
date_field="pub_date",
template_name="news/story_archive_year.html",
context_object_name="story_list",
make_object_list=True,
))),
url(r'^$', cache_page(60 * 15)(StoryListView.as_view())),
]
|
#Write a program to implement the concept of class and object creation.
class Person:
age = 10
def greet(self):
print('Hello')
print(Person.age)
print(Person.greet)
print(Person.__doc__)
class Person:
age = 10
def greet(self):
print('Hello')
harry = Person()
print(Person.greet)
print(harry.greet)
harry.greet() |
import os
import sys
import json
import ast
import random
import time
import uuid
from urlparse import parse_qs, urlparse
if(os.path.isfile('last_record.txt') ):
f=open('last_record.txt','r')
var=f.read().split(',')
last_record=var[0]
counter=int(var[1])
numberofRecords=int(var[2])
f.close()
else:
last_record=1
counter=0
print type(counter)
print counter
count=0
#Creating Payload
pdata={}
payload={}
sensorName="OBD-II"
sensorID=str(uuid.uuid4())
sensorType="Analog"
sensorTimeStamp=int(time.time())
payload['DeviceName']=sensorName
payload['DeviceID']=sensorID
payload['DeviceType']=sensorType
payload['DeviceTimeStamp']=sensorTimeStamp
with open('webhook_output.json') as data_file:
data = json.load(data_file)
#last_record=data['last_cursor']
extract_data=data['items']
for row in extract_data[counter:]:
var="/?"
var=var+row['query']
print "-\n"
dat=parse_qs(urlparse(var).query, keep_blank_values=True)
payload['unique_id']=str(uuid.uuid4())
payload['session']=dat['session'][0]
payload['email']=dat['eml'][0]
payload['id']=dat['id'][0]
payload['time']=dat['time'][0]
if 'kff1005' not in dat:
count=count+1
continue;
payload['Longitude']=dat['kff1005'][0]
payload['Latitude']=dat['kff1006'][0]
payload['GPSSpeed']=dat['kff1001'][0]
payload['kff1007']=dat['kff1007'][0]
payload['Acceleration']=dat['kff1223'][0]
payload['Costpermile/km']=dat['kff126d'][0]
payload['CO2ing/km']=dat['kff1258'][0]
payload['Distancetoempty']=dat['kff126a'][0]
payload['k4']=dat['k4'][0]
payload['kc']=dat['kc'][0]
pdata['DevicePulse']=payload
print json.dumps(pdata)
CurlString=""
os.system(CurlString)
#time.sleep(5)
counter=counter+int(numberofRecords)
fo=open("last_record.txt","w")
fo.write(str(last_record)+","+str(counter)+","+str(numberofRecords))
fo.close()
print "\n",count,"Done"
|
#! /usr/bin/env python2
"""
Demonstrates how to hide passwords in logging messages, based on a tip in https://stackoverflow.com/questions/48380452/mask-out-sensitive-information-in-python-log
"""
import re
import sys
import logging
class SensitiveFormatter(logging.Formatter):
regexp = re.compile('PASSW(?:OR)?D[\'"]?[ :\t=]*(?:(?:u?[\'"])?)([-a-zA-Z0-9!@#$%^&*()_+=[\]\\|;:,./<>?]+)', re.IGNORECASE)
@classmethod
def _filter(cls, s):
for hit in list(cls.regexp.finditer(s))[-1::-1]:
sys.stderr.write('Processing {groups} at {start}:{end}\n'.format(
groups=hit.groups(),
start=hit.start(1),
end=hit.end(1),
))
s = s[:hit.start(1)] + ('*'*8) + s[hit.end(1):]
return s
def format(self, record):
original = logging.Formatter.format(self, record)
return self._filter(original)
LOG_FORMAT = '%(asctime)s %(levelname)s %(pathname)s:%(lineno)d %(msg)s'
logging.basicConfig(format=LOG_FORMAT)
log = logging.getLogger()
for handler in logging.root.handlers:
handler.setFormatter(SensitiveFormatter(LOG_FORMAT))
log.setLevel(logging.DEBUG)
obj = {
'password': 'secret',
'list': [
{
'PASSWD': 'secret',
},
],
}
logging.info(obj)
logging.info('Password=shhhhhh')
creds = {u'cimc': {u'login': {u'password': u'Foobar@123', u'user': u'admin'}, u'reason': u'Credentials from /etc/creds', u'scp': {u'ip': '192.200.0.1', u'password': u'Foobar@123', u'user': u'stack'}}, u'em': {u'login': {u'password': u'Foobar@123', u'user': u'ubuntu'}, u'reason': u'Credentials from /etc/creds'}, u'shell': {}, u'sosreport': {}, u'nexus': {u'login': {u'password': u'Foobar@123', u'user': u'admin'}, u'reason': u'Credentials from /etc/creds'}, u'uas': {u'login': {u'password': u'Foobar@123', u'user': u'ubuntu'}, u'reason': u'Credentials from /etc/creds'}, u'cf': {u'login': {u'password': u'Foobar@123', u'user': u'admin'}, u'reason': u'Credentials from /etc/creds'}, u'esc': {u'login': {u'password': u'Foobar@123', u'user': u'admin'}, u'reason': u'Credentials from /etc/creds'}}
logging.info('Credentials: {creds}'.format(**locals()))
|
import matplotlib.pyplot as plt
slice_names = ["Python", "Java", "C#", "C"]
slice_sizes = [50, 90, 20, 14]
color_swatches = plt.pie(
slice_sizes,
labels=slice_names,
autopct="%1.1f%%",
explode=[0.1, 0, 0, 0],
shadow=True,
colors=["#ff0000", "#00ff00", "#0000FF", "#FFFF00"]
)[0]
plt.legend(color_swatches, slice_names, loc="best")
plt.show()
|
from django.urls import path
from .views import *
app_name = 'core'
urlpatterns = [
path('movies/', MovieList.as_view(), name='MovieList'),
path('movie/<int:pk>', MovieDetail.as_view(), name='MovieDetail'),
path('movie/<int:movie_id>/vote', CreateVote.as_view(), name='CreateVote'),
path('movie/<int:movie_id>/vote/<int:pk>', UpdateVote.as_view(), name='UpdateVote'),
path('movie/<int:movie_id>/upload', MovieUpload.as_view(), name='MovieUpload'),
path('movies/top_10/', TopView.as_view(), name='Top10MoviesList')
] |
from django.contrib import admin
from django.conf import settings
from .models import User
try:
if settings.EMAIL_MODEL_ADMIN:
admin.site.register(User)
except AttributeError:
pass
|
from django.db import models
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
class EventMap(models.Model):
#id = models.AutoField(primary_key=True)
name = models.CharField(max_length=256)
def __str__(self):
# Subject to change
return "{}: {}".format(self.id, self.name)
def validate_lat(value):
if value < -90 or value > 90:
raise ValidationError(_('%(value)s is not a valid latitude'), params={'value':value},)
def validate_lon(value):
if value < -180 or value > 180:
raise ValidationError(_('%(value)s is not a valid longitude'), params={'value':value},)
class Event(models.Model):
#id = models.AutoField(primary_key=True)
lon = models.DecimalField(max_digits=9, decimal_places=6, validators=[validate_lon])
lat = models.DecimalField(max_digits=9, decimal_places=6, validators=[validate_lat])
locname = models.CharField(max_length=256)
title = models.CharField(max_length=256)
desc = models.CharField(max_length=256)
catlist = models.CharField(max_length=256) # catlist is not a list but a string
stime = models.DateTimeField()
to = models.DateTimeField()
timetoann = models.DateTimeField()
Map = models.ForeignKey(EventMap, on_delete=models.CASCADE)
def __str__(self):
# Subject to change
return "{}: {}".format(self.id, self.title)
class Observer(models.Model):
lon_topleft = models.DecimalField(max_digits=9, decimal_places=6, validators=[validate_lon])
lat_topleft = models.DecimalField(max_digits=9, decimal_places=6, validators=[validate_lat])
lon_botright = models.DecimalField(max_digits=9, decimal_places=6, validators=[validate_lon])
lat_botright = models.DecimalField(max_digits=9, decimal_places=6, validators=[validate_lat])
category = models.CharField(max_length=256)
Map = models.ForeignKey(EventMap, on_delete=models.CASCADE)
session = models.CharField(max_length=256)
|
# coding: utf-8
# In[1]:
import numpy as np
import pickle
from numpy.linalg import norm
# In[2]:
import os
print(os.listdir("."))
train_r = pickle.load(open("train_r.pkl", "rb"))
train_b = pickle.load(open("train_b.pkl", "rb"))
valid_r = pickle.load(open("valid_r.pkl", "rb"))
valid_b = pickle.load(open("valid_b.pkl", "rb"))
# In[3]:
from sklearn.linear_model import LogisticRegression
# In[4]:
lr = LogisticRegression()
# In[5]:
print(valid_r[:3])
# In[6]:
one_label = np.ones(len(train_r))
zeros_label = np.zeros(len(train_b))
one_test = np.ones(len(valid_r))
zero_test = np.zeros(len(valid_b))
trainX = np.concatenate((train_r, train_b))
trainY = np.concatenate((one_label, zeros_label))
validX = np.concatenate((valid_r, valid_b))
validY = np.concatenate((one_test, zero_test))
print(validX[:3], len(validX))
# In[7]:
#gerenate feature vectors
total_words = {}
for line in trainX:
words = line.split(" ")
for word in words:
if word == "":
continue
try:
total_words[word]
except:
total_words[word] = True
print(len(total_words.keys()))
#dont add words in valid and test sets
# In[8]:
#create training feats
#print(len(validX))
unique_keys = list(total_words.keys())
unique_keys.sort()
#print(unique_keys[:10])
#account for a bias
train_feats = np.zeros((len(trainX), len(total_words.keys()) ))
for i, line in enumerate(trainX):
words = line.split(" ")
#print(words)
for word in words:
if word =="":
continue
#print(unique_keys.index(word))
ind = unique_keys.index(word)
#plus one is so that the very first is the bias (here a bias of zero)
train_feats[i][ind] = 1
#create testing features
valid_feats = np.zeros((len(validX), len(total_words.keys())))
for i , line in enumerate(validX):
words = line.split(" ")
#print(len(words), end = " ")
#if i ==0:
#print(words)
#print(words)
for word in words:
if word =="":
continue
#account for value error:
try:
ind = unique_keys.index(word)
#print(ind)
valid_feats[i][ind] = 1
except:
continue
#inds = np.where(valid_feats ==1)[1]
#print("\n", inds[:34])
# In[46]:
#logistic regression model from assignment 1.
def grad_descent(f, df, x, y, reg, init_t, alpha, EPS=1e-4):
print(reg)
#EPS = 1e-4 #EPS = 10**(-5)
prev_t = init_t-10*EPS
t = init_t.copy()
max_iter = 300
iter = 0
while norm(t - prev_t) > EPS and iter < max_iter:
prev_t = t.copy()
t -= alpha*df(x, y, t, reg)
if iter % 50 == 0:
print("Iter", iter)
iter += 1
print("Iter", iter)
return t
# In[75]:
def softmax(x):
return 1./(1+np.exp(-1*x))
#assume here that x is already flattened
def f_log(x, y, theta, reg):
bias = np.ones( (1, x.shape[0])).T
x = np.vstack( (bias.T, x.T))
val = softmax(np.dot(theta.T, x))
vals = np.sum((y - np.log(val)) - (1.-y)*np.log(1-val)) + reg*np.sum(theta**2)
return vals
def df_log(x, y, theta ,reg):
bias = np.ones( (1, x.shape[0])).T
x = np.vstack( (bias.T, x.T))
val = softmax(np.dot(theta.T, x)) - y
val = np.dot(val, x.T)
val += 2.*reg*theta
return val
#return -2*np.sum((y-np.dot(theta.T, x))*x, 1) + 2.*reg*theta
# In[93]:
#assume here that x is already flattened
def f_l1(x, y, theta, reg):
bias = np.ones( (1, x.shape[0])).T
x = np.vstack( (bias.T, x.T))
val = softmax(np.dot(theta.T, x))
vals = np.sum((y - np.log(val)) - (1.-y)*np.log(1-val)) + reg*np.sum(theta)
return vals
def df_l1(x, y, theta ,reg):
bias = np.ones( (1, x.shape[0])).T
x = np.vstack( (bias.T, x.T))
val = softmax(np.dot(theta.T, x)) - y
val = np.dot(val, x.T)
val += reg
return val
# In[76]:
np.random.seed(0)
#this is used to create the two differentn types of images, uncomment the one that you dont want
theta0 = np.random.random((len(unique_keys)+1))*5
theta0 = np.ones_like(theta0)
# In[77]:
theta = grad_descent(f_log, df_log, train_feats, trainY,0.15, theta0, 0.0005, EPS=5e-4)
# In[78]:
#learning curve
valid_data_ones = np.concatenate((np.ones((1, valid_feats.shape[0])).T, valid_feats), axis = 1)
preds = np.dot( valid_data_ones, theta)
print(valid_data_ones.shape, theta.shape, preds.shape)
preds_binary= np.zeros_like(preds)
#preds_binary[preds<0.5] = 0
preds_binary[preds>0.5] = 1
#print(preds)
print(len(np.where(validY ==preds_binary)[0]), validY.shape[0])
print(len(np.where(validY ==preds_binary)[0])/ validY.shape[0])
#print(validY)
# In[91]:
#l2error function
#logistic regression model from assignment 1.
def track_grad_descent(f, df, x, y,reg, init_t, alpha, EPS=1e-4):
#EPS = 1e-4 #EPS = 10**(-5)
prev_t = init_t-10*EPS
t = init_t.copy()
#times = np.linspace()
max_iter = 2500
iter = 0
thresh = 25
iterations = np.arange(max_iter//thresh +1)*thresh
learn_train = [0]#np.zeros_like(iterations)
learn_valid = [0]#np.zeros_like(iterations)
counter = 1
while norm(t - prev_t) > EPS and iter < max_iter:
prev_t = t.copy()
t -= alpha*df(x, y, t, reg)
if iter % 500 == 0:
print("Iter", iter)
if (iter%thresh==thresh-1):
#train scores
train_data_ones = np.concatenate((np.ones((1, train_feats.shape[0])).T, train_feats), axis = 1)
preds = np.dot(train_data_ones, prev_t)
preds_binary= np.zeros_like(preds)
preds_binary[preds<0.5] = 0
preds_binary[preds>0.5] = 1
val = len(np.where(trainY ==preds_binary)[0])/ float(trainY.shape[0])
print(val)
learn_train.append(val)
valid_data_ones = np.concatenate((np.ones((1, valid_feats.shape[0])).T, valid_feats), axis = 1)
preds = np.dot( valid_data_ones, prev_t)
preds_binary= np.zeros_like(preds)
preds_binary[preds<0.5] = 0
preds_binary[preds>0.5] = 1
val =len(np.where(validY ==preds_binary)[0])/ float(validY.shape[0])
print(val)
learn_valid.append(val)
#learn_valid[counter] = val
#print(learn_valid)
#counter +=1
iter += 1
print(learn_train, learn_valid)
print("Iter", iter)
return t, learn_train, learn_valid
# In[94]:
#%%time
#done with l2 regularization
np.random.seed(0)
theta0 = np.random.random((len(unique_keys)+1))
theta, train_learn, valid_learn = track_grad_descent(f_log, df_log, train_feats, trainY, 5, theta0, 0.0005, EPS=5e-4)
# In[98]:
import matplotlib
from matplotlib import pyplot as plt
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 20}
#matplotlib.rc('text', usetex=True)
matplotlib.rc('font', **font)
# In[101]:
iterations = np.arange(1176//25 +1)*25
plt.figure(figsize = (10, 7))
plt.plot(iterations, train_learn, marker = ".", label = "Training Accuracy")
plt.plot(iterations, valid_learn, marker = ".", label = "Testing Accuracy")
plt.legend()
plt.show()
#print(train_learn)
# In[102]:
#%%time
#done with l1 regularization
np.random.seed(0)
theta0 = np.random.random((len(unique_keys)+1))
theta, train_learn, valid_learn = track_grad_descent(f_l1, df_l1, train_feats, trainY, 5, theta0, 0.0005, EPS=5e-4)
# In[103]:
iterations = np.arange(2500//25 +1)*25
plt.figure(figsize = (10, 7))
plt.plot(iterations, train_learn, marker = ".", label = "Training Accuracy")
plt.plot(iterations, valid_learn, marker = ".", label = "Testing Accuracy")
plt.legend()
plt.show()
#print(train_learn)
# #write up part 5
# In[ ]:
# In[ ]:
# Its reasonable to use magnitude for this, since the different scale for the features have meaning, if one is larger, it means that it occured more often(native bayes method)
# For this one, the individual features are already normalized, since they're either zero if that word is not present, or are one if they are.
# In[ ]:
|
import sqlite3
class DBHelper():
def __init__(self, name="todo.sqlite"):
self.name = name
self.conn = sqlite3.connect(name)
def create(self):
query = 'CREATE TABLE IF NOT EXISTS items (user text, description text)'
user_index = 'CREATE INDEX IF NOT EXISTS userIndex ON items (user ASC)'
item_index = 'CREATE INDEX IF NOT EXISTS itemIndex ON items (description ASC)'
self.conn.execute(query)
self.conn.execute(user_index)
self.conn.execute(item_index)
self.conn.commit()
def add_item(self, user, item):
query = 'INSERT INTO items (user, description) VALUES (?,?)'
args = (user, item)
self.conn.execute(query, args)
self.conn.commit()
def delete_item(self, user, item):
query = 'DELETE FROM items WHERE user = (?) AND description = (?)'
args = (user, item)
self.conn.execute(query, args)
self.conn.commit()
def get_items(self, user):
query = 'SELECT description FROM items WHERE user = (?)'
args = (user, )
items = self.conn.execute(query, args)
return [item[0] for item in items]
|
from django.urls import path
from messenger_bot.views import MessengerWebhookAPIView
urlpatterns = [
path('webhook/', MessengerWebhookAPIView.as_view(), name='messenger_bot_webhook'),
]
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class CustomABACUser(AbstractUser):
designation = models.CharField(blank=True, max_length=20)
age = models.CharField(blank=True, max_length=20)
role = models.CharField(blank=True, max_length=120)
organization = models.CharField(blank=True, max_length=120)
suburb = models.CharField(blank=True, max_length=120)
city = models.CharField(blank=True, max_length=120)
state = models.CharField(blank=True, max_length=120)
country = models.CharField(blank=True, max_length=120)
def __str__(self):
return self.username
class Policy(models.Model):
user = models.ForeignKey(CustomABACUser, on_delete=models.CASCADE)
policy_name = models.CharField(blank=True, null=True, max_length=1000)
policy_description = models.CharField(blank=True, null=True, max_length=1000)
policy_version = models.CharField(blank=True, null=True, max_length=1000)
subject_name = models.CharField(blank=True, null=True, max_length=1000)
subject_value = models.CharField(blank=True, null=True, max_length=1000)
action_name = models.CharField(blank=True, null=True, max_length=1000)
action_value = models.CharField(blank=True, null=True, max_length=1000)
resource_name = models.CharField(blank=True, null=True, max_length=1000)
resource_value = models.CharField(blank=True, null=True, max_length=1000)
environment_name = models.CharField(blank=True, null=True, max_length=1000)
environment_value = models.CharField(blank=True, null=True, max_length=1000)
def __str__(self):
return self.policy_name
# TRUE_FALSE_CHOICES = (
# (True, 'True'),
# (False, 'False')
# )
class Resource(models.Model):
resource_name = models.CharField(blank=True, null=True, max_length=1000)
access = models.CharField(default="False", max_length=10)
viewable = models.CharField(default="False", max_length=10)
policy = models.ForeignKey(Policy, blank=True, null=True, on_delete=models.CASCADE)
def __str__(self):
return self.resource_name
class ResourceDescription(models.Model):
resource = models.OneToOneField(Resource, on_delete=models.CASCADE, primary_key=True)
creator = models.ForeignKey(CustomABACUser, on_delete=models.CASCADE)
resource_description_1 = models.CharField(blank=True, null=True, max_length=1000)
resource_description_2 = models.CharField(blank=True, null=True, max_length=1000)
resource_description_3 = models.CharField(blank=True, null=True, max_length=1000)
resource_description_4 = models.CharField(blank=True, null=True, max_length=1000)
resource_description_5 = models.CharField(blank=True, null=True, max_length=1000)
def __str__(self):
return self.resource.resource_name |
def solution(s):
result = s[0]
for letter in s[1:]:
if letter < result[0]:
result = result + letter
else:
result = letter + result
return result
with open('input', 'r') as infile, open('output', 'w') as out:
num_testcases = int(next(infile))
for i in range(num_testcases):
out.write("Case #{0}: {1}".format(i+1, solution(next(infile))))
|
class TestData:
HOME_PAGE_URL = 'http://www.gidmenu.com'
BREAKFASTS_PAGE_URL = HOME_PAGE_URL + '/restaurants/zavtraki/'
ACCOUNT = {
'email': 'john.doe@mail.com',
'password': 'Hilton101',
'invalid_email': 'john.doe@mail.con',
'invalid_password': 'Hilton102'
}
RESTAURANTS_COLLECTION = [
{
'restaurant_name': 'PATRIARSHIE_PRUDY',
'url': 'https://gidmenu.com/restaurants/patriarshie-prudy/?show_dish_rest_groups=N',
'title': 'Патриаршие Пруды'
},
{
'restaurant_name': 'MOSKVA_CITY',
'url': 'https://gidmenu.com/restaurants/moskva-siti/?show_dish_rest_groups=N',
'title': 'Москва сити',
},
{
'restaurant_name': 'DEPO',
'url': 'https://gidmenu.com/restaurants/depo/?show_dish_rest_groups=N',
'title': 'Депо',
},
{
'restaurant_name': 'TSENTRALNYY-RYNOK',
'url': 'https://gidmenu.com/restaurants/tsentralnyy-rynok/?show_dish_rest_groups=N',
'title': 'Центральный рынок',
},
{
'restaurant_name': 'DANILOVSKIY-RYNOK',
'url': 'https://gidmenu.com/restaurants/danilovskiy-rynok/?show_dish_rest_groups=N',
'title': 'Даниловский рынок',
},
{
'restaurant_name': 'GASTROFERMA-N1-NA-BAUMANSKOY',
'url': 'https://gidmenu.com/restaurants/gastroferma-n1-na-baumanskoy/?show_dish_rest_groups=N',
'title': '«Гастроферма N1» на Бауманской',
},
{
'restaurant_name': 'GASTROMARKET-VOKRUG-SVETA-NA-NIKOLSKOY',
'url': 'https://gidmenu.com/restaurants/gastromarket-vokrug-sveta-na-nikolskoy/?show_dish_rest_groups=N',
'title': 'Гастромаркет «Вокруг света»',
},
{
'restaurant_name': 'RYNOK-NA-MAROSEYKE',
'url': 'https://gidmenu.com/restaurants/rynok-na-maroseyke/?show_dish_rest_groups=N',
'title': 'Рынок на Маросейке',
},
{
'restaurant_name': 'USACHEVSKIY-RYNOK',
'url': 'https://gidmenu.com/restaurants/usachevskiy-rynok/?show_dish_rest_groups=N',
'title': 'Усачевский рынок',
},
]
LIST_OF_RESTAURANTS_NAMES = [x['restaurant_name'] for x in RESTAURANTS_COLLECTION]
LOGIN_ERROR_MESSAGE_TEXT = 'Неверный логин или пароль.'
|
import logging
logging.basicConfig(filename='app.log', filemode='a',format='%(asctime)s - %(message)s', level=logging.INFO)
# logging.info('Admin logged in')
def print_log(*para):
para = ['%s'%e for e in para]
content = ' '.join(para)
print(content)
logging.info(content)
if __name__ =="__main__":
# ham goc
print(1,2,'nghiahsgs')
# ham thay the
print_log(1,2,'nghiahsgs')
|
import re
copRegex = re.compile(r'Police(wo)?man')
sentence = 'Policewoman and Policeman bad.'
found = copRegex.search(sentence)
print('The sentence is: "' + sentence +'"')
if found != None:
print(f'Found word: {found.group()}')
print(f'found.group(1) = {found.group(1)}')
else:
print('Ne e pronajden zbor.')
|
from chess.agent import BaseAgent
from chess.pieces import Empty
from chess.board import Board
class HumanAgent(BaseAgent):
"""A human agent."""
def policy(self, game):
print()
str_from = input('Move from : ')
str_to = input('Move to : ')
print()
from_ = Board.translate(str_from)
to = Board.translate(str_to)
if from_ is None or to is None:
print("Invalid move...\n")
return None
if isinstance(game.board[from_], Empty):
print("Not a piece...\n")
return None
if game.board[from_] is None:
print("Not on the board...\n")
return None
if to not in list(game.board[from_].moves()):
print("Can't move this piece to here...\n")
return None
if game.board[from_].color != self.color:
print('Not this players turn...\n')
return None
return from_, to
|
#!/usr/bin/env python
import os
import time
# Device: Pipe/0/ppl
PRESS_DELAY = 0.025
TILT_DELAY = 0.05
HOLD_DELAY = 0.8
NAMED_PIPE = os.path.expanduser('~/Library/Application Support/Dolphin/Pipes/ppl')
PIPE = None
def initialize():
global PIPE
print '[controller.py] Initializing named pipe writer...'
if not os.path.exists(NAMED_PIPE):
os.mkfifo(NAMED_PIPE)
PIPE = open(NAMED_PIPE, 'w')
##### Call these #####
def A(): press('A')
def B(): press('B')
def X(): press('X')
def Y(): press('Y')
def Z(): press('Z')
def START(): press('START')
def L(): press('L')
def R(): press('R')
def D_UP(): press('D_UP')
def D_DOWN(): press('D_DOWN')
def D_RIGHT(): press('D_RIGHT')
def D_LEFT(): press('D_LEFT')
def MAIN_UP(): up('MAIN')
def MAIN_DOWN(): down('MAIN')
def MAIN_LEFT(): left('MAIN')
def MAIN_RIGHT(): right('MAIN')
def UP(): MAIN_UP()
def DOWN(): MAIN_DOWN()
def LEFT(): MAIN_LEFT()
def RIGHT(): MAIN_RIGHT()
def C_UP(): up('C')
def C_DOWN(): down('C')
def C_LEFT(): left('C')
def C_RIGHT(): right('C')
def MASH_UP(): up('MAIN', .35)
def MASH_DOWN(): down('MAIN', .35)
def MASH_RIGHT(): right('MAIN', .25)
def MASH_LEFT(): left('MAIN', .25)
##### End ######
def up(stick, delay=TILT_DELAY): tilt(stick, .5, 1, delay)
def down(stick, delay=TILT_DELAY): tilt(stick, .5, 0, delay)
def left(stick, delay=TILT_DELAY): tilt(stick, 0, .5, delay)
def right(stick, delay=TILT_DELAY): tilt(stick, 1, .5, delay)
def press(button):
PIPE.write('PRESS %s\n' % button)
PIPE.flush()
time.sleep(PRESS_DELAY)
PIPE.write('RELEASE %s\n' % button)
PIPE.flush()
time.sleep(PRESS_DELAY)
def tilt(stick, x, y, delay=TILT_DELAY):
PIPE.write('SET %s %.1f %.1f\n' % (stick, x, y))
PIPE.flush()
time.sleep(delay)
PIPE.write('SET %s .5 .5\n' % stick)
PIPE.flush()
if delay == TILT_DELAY:
time.sleep(delay)
def holdR():
PIPE.write('PRESS R\n')
PIPE.flush()
time.sleep(HOLD_DELAY)
PIPE.write('RELEASE R\n')
PIPE.flush()
time.sleep(HOLD_DELAY)
def shutdown():
PIPE.close()
print '[controller.py] Closed named pipe writer.'
def test():
initialize()
A()
shutdown()
if __name__ == '__main__':
test()
|
import io
import os
import numpy as np
import tensorflow as tf
from datetime import datetime
import tensorflow.keras as keras
# from PIL import Image
# from sklearn.metrics import roc_auc_score
# import PIL.Image
# import cv2
from config import config
from utils.dataset import get_dataset
from utils.custom_model import MySequential
from utils.custom_metrics import EuclideanDistanceMetric
import yaml
import math
import argparse
# Fixing seeds in order to fix this problem: https://stackoverflow.com/questions/48979426/keras-model-accuracy-differs-after-loading-the-same-saved-model
from numpy.random import seed
seed(42) # keras seed fixing
tf.random.set_seed(42) # tensorflow seed fixing
os.makedirs(config.SAVE_DIR, exist_ok=True)
os.makedirs(config.LOG_DIR, exist_ok=True)
def train(args):
training_net = args.network
batch_size = args.batch_size
if 'EfficientNetB3' in training_net:
IMG_SIZE = 320
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.EfficientNetB3(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'NASNetMobile' in training_net:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.NASNetMobile(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'DenseNet201' in training_net:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.densenet.DenseNet201(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'DenseNet121' in training_net:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.densenet.DenseNet121(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'DenseNet169' in training_net:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.densenet.DenseNet169(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'NASNetLarge' in training_net:
batch_size = 6
IMG_SIZE = 331
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.NASNetLarge(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'resnet152' in training_net:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.ResNet152(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'Xception' in training_net:
IMG_SIZE = 299
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.Xception(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'inception-resnet' in training_net:
IMG_SIZE = 299
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.InceptionResNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'resetNetV2' in training_net:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.ResNet152V2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'mobilenet_v2' in training_net:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'vgg19' in training_net:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.VGG19(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
if 'EfficientNetB0' in training_net:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
base_model = tf.keras.applications.EfficientNetB0(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
img_height = img_width = IMG_SIZE
data_info_file = os.path.join(args.dataset_dir, 'dataset_info.yml')
with open(data_info_file) as file:
dataset_info = yaml.load(file, Loader=yaml.FullLoader)
num_train_entries = dataset_info['train_size']
num_val_entries = dataset_info['validation_size']
regression = dataset_info['regression']
num_labels = dataset_info['num_labels']
dataset_info['img_width'] = img_width
dataset_info['img_height'] = img_height
dataset_info['zero_norm'] = args.normalization_zero
# base_model.summary()
last_layer = base_model.layers[-1] # layer that you want to connect your new FC layer to
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()(last_layer.output)
if regression: # regression
new_top_layer = tf.keras.layers.Dense(num_labels)(global_average_layer)
else: # classification
new_top_layer = tf.keras.layers.Dense(num_labels, activation='sigmoid')(global_average_layer)
filepath = "./models" + "/saved-model-" + training_net + "-" + args.task_key + "-{epoch:02d}.hdf5"
model_checkpoint = keras.callbacks.ModelCheckpoint(filepath, save_freq='epoch', verbose=1)
logs = "logs/" + training_net + '-' + args.task_key + '-' + datetime.now().strftime("%Y%m%d-%H%M%S")
config.file_writer = tf.summary.create_file_writer(logs)
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logs, write_graph=True,
update_freq='epoch')
model = tf.keras.models.Model(base_model.input, new_top_layer)
# model.summary()
model = MySequential(model)
model.trainable = True
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=args.lr),
loss=tf.keras.losses.BinaryCrossentropy() if not regression else tf.keras.losses.MeanSquaredError(),
metrics=['accuracy', tf.keras.metrics.AUC(num_thresholds=10000)] if not regression else EuclideanDistanceMetric(is_training=True))
# model.summary()
# one, get training and validation data
train_ds = get_dataset(args.dataset_dir, 'train', batch_size, dataset_info, config.file_writer,
add_image_summaries=True)
val_ds = get_dataset(args.dataset_dir, 'dev', batch_size, dataset_info, config.file_writer)
history = model.fit_generator(train_ds,
steps_per_epoch=math.ceil(num_train_entries / batch_size),
epochs=args.epochs,
validation_data=val_ds,
validation_steps=math.ceil(num_val_entries / batch_size),
callbacks=[model_checkpoint, tensorboard_callback])
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-i', '--dataset_dir', default='/home/hassan/ClusterGPU/data_GPU/hassan/REFUGE2/data/'
'refuge_data/datasets_fovea/',
help='directory containing the tf records')
parser.add_argument('-o', '--network', default='DenseNet169',
help='The network architecture to use in the training')
parser.add_argument('-tk', '--task_key', default='Localisation',
help='The key to use for generated files/folders')
parser.add_argument('-ni', '--normalization_zero', action='store_true',
help='Zero normalization on batch of images')
parser.add_argument('-n', '--batch_size', type=int, default=10,
help='batch size to update the weights')
parser.add_argument('-ep', '--epochs', type=int, default=100,
help='Number of epoch to train the dataset')
parser.add_argument('-lrate', '--lr', type=float, default=0.00001,
help='Number of epoch to train the dataset')
args = parser.parse_args()
train(args)
if __name__ == '__main__':
main()
|
from .._tier0 import execute
from .._tier0 import plugin_function
from .._tier0 import Image
@plugin_function
def copy_horizontal_slice(source : Image, destination : Image = None, slice_index : int = 0) -> Image:
"""This method has two purposes:
It copies a 2D image to a given slice y position in a 3D image stack or
It copies a given slice at position y in an image stack to a 2D image.
Parameters
----------
source : Image
destination : Image, optional
slice_index : Number, optional
Returns
-------
destination
Examples
--------
>>> import pyclesperanto_prototype as cle
>>> cle.copy_slice(source, destination, slice_index)
References
----------
.. [1] https://clij.github.io/clij2-docs/reference_copySlice
"""
parameters = {
"dst":destination,
"src":source,
"slice":int(slice_index)
}
if (len(destination.shape) == 3):
execute(__file__, 'copy_horizontal_slice_to_3d_x.cl', 'copy_horizontal_slice_to_3d', [1, source.shape[0], source.shape[1]], parameters)
else:
execute(__file__, 'copy_horizontal_slice_from_3d_x.cl', 'copy_horizontal_slice_from_3d', destination.shape, parameters)
return destination
|
import sys
import numpy as np
from atom import atom
from msa import msa
from protein import protein
import common.commp as cp
def addscore(msacol, sm):
for aa in msacol:
#if aa.upper() not in cp.abaa:
if aa.upper() in cp.aa201:
sm[aa.upper()]+=1
# check whether there is unmatched residue in the map file
def validncg(ncg, rtmap):
for a in ncg:
if a.resName.upper() in cp.abaa or a.resSeq <0:
return False
k = '%s%d' % (a.chainID, a.resSeq)
if rtmap[k][0] == -1:
return False
return True
# a chuck of output produced from a pfam msa
def cgmsa_composition(pdbfile, msafile, mapfile):
'''
find corresponding columns in msa
'''
cgsize = 2 # pairwised contact
# init score
m = msa(msafile)
# matrix format of msa
# for columnwise access
msamat = np.array([list(s[1]) for s in m.msaArray])
# 'A9': (14, 'V')
rtmap = cp.getPDBUniprotMap(mapfile)
p = protein(pdbfile)
ret = []
# each ncg yields a result
for ncg in p.contactbynearest(p.atoms,cgsize):
if validncg(ncg, rtmap) == False:
#print 'skip invalid ncg: %s' % ' '.join(['%s%d' % (a.chainID, a.resSeq) for a in ncg])
continue
pdbcg = ['%s%d' % (a.chainID, a.resSeq) for a in ncg]
pdbcgstr = ','.join(pdbcg)
mass = sum([cp.aadef[cp.aa2a[a.resName]][1] for a in ncg])
volume = sum([cp.aadef[cp.aa2a[a.resName]][2] for a in ncg])
polar = sum([cp.aadef[cp.aa2a[a.resName]][3] for a in ncg])
# a list (two for pairwise) of residues in the contact. e.g. (A6,A112)
msacg = [rtmap[k][0] for k in pdbcg]
msacgstr = ','.join([str(col) for col in msacg])
sm = {}
for aa in cp.aa201:
sm[aa]=0
for col in msacg:
addscore(list(msamat[:,col]), sm)
#print sm[aa], 2*len(m.msaArray)
compstr = ','.join([str(int(5.0*sm[aa]/len(m.msaArray))) for aa in cp.aa201])
ret.append('%s %s %s %s %s %.2f %.2f %.2f' % (pdbfile, msafile, pdbcgstr, msacgstr, compstr, mass, volume, polar))
#print '%s %s %s %s %s %.2f %.2f %.2f\n' % (pdbfile, msafile, pdbcgstr, msacgstr, compstr, mass, volume, polar)
return '\n'.join(ret)
def main():
'''
For residue contact msa column composition
covariate data
read pdb, msa
output pdb+pfam+composition
'''
if len(sys.argv) < 2:
print 'Usage: python proc_cv_composition 13-talign-ok.tsv'
return
blackboard = sys.argv[1]
outfile = 'covariate.%s.rpcomp' % blackboard
fout = open(outfile, 'w')
with open(blackboard) as fp:
for line in fp:
strArray = line.strip().split(' ')
pdbfile = '%s-%s.rpdb.tip' % (strArray[0].lower(), strArray[1])
pfamfile = '%s_full.txt.rseq' % strArray[3]
rtmapfile = '%s-%s-%s-%s.map' % (strArray[0].lower(), strArray[1], strArray[3], strArray[6])
#1a0p-A-PF00589-XERD_ECOLI.map
fout.write(cgmsa_composition(pdbfile, pfamfile, rtmapfile))
fout.close()
print 'save to %s.' % outfile
if __name__ == '__main__':
main() |
import tkinter as tk
from tkinter import messagebox
root = tk.Tk()
#root.attributes('-fullscreen', True)
root.geometry('2000x2000')
root.title('AppCop 1.0')
button_frame = tk.Frame(root)
button_frame.pack(fill=tk.X, side=tk.TOP)
button_frame.columnconfigure(0, weight=1)
button_frame.columnconfigure(1, weight=1)
button_frame.columnconfigure(2, weight=1)
button_frame.columnconfigure(3, weight=1)
image = tk.PhotoImage(file="AppCop_logo.PNG")
label = tk.Label(image=image)
label.pack()
def f_HostnameIP():
import AppCopUtil as f_machine_details
host_machine_detail = ''
host_machine_detail = f_machine_details.get_Host_IP()
messagebox.showinfo("Logged in User Hostname & IP",host_machine_detail)
def f_PasswordGen():
import AppCopUtil as f_auto_password
auto_password_gen = ''
auto_password_gen = f_auto_password.gen_Password()
messagebox.showinfo("Auto Generated Password",auto_password_gen)
def f_CardValidate():
import AppCopUtil as f_card_validate
Card_Details = ''
Card_Number = ''
# Create the Entry widget PENDING WORK
## Card_Number = input ('Enter Card Number to Validate >>>')
Card_Details = f_card_validate.card_validate(Card_Number)
messagebox.showinfo("Card Validation Check",Card_Details)
Ip_button = tk.Button(button_frame, text='Get Hostname & IP Address of Local Machine', command = f_HostnameIP)
Pass_button = tk.Button(button_frame, text='Automated Password Generator', command = f_PasswordGen)
Card_button = tk.Button(button_frame, text='Validate Your Card Number', command = f_CardValidate)
Exit_button = tk.Button(button_frame, text='Exit', command = exit)
Ip_button.grid(row=3, column=0, sticky=tk.N+tk.S+tk.E+tk.W)
Pass_button.grid(row=3, column=1, sticky=tk.N+tk.S+tk.E+tk.W)
Card_button.grid(row=3, column=2, sticky=tk.N+tk.S+tk.E+tk.W)
Exit_button.grid(row=3, column=3, sticky=tk.N+tk.S+tk.E+tk.W)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 4 13:39:00 2018
@author: Student
STATUS : incomplete, abandoned
Assignment 5
1. Use Hunt's Algorithm to create and test a decision tree on Car Evaluation Dataset.
ref: https://archive.ics.uci.edu/ml/datasets/car+evaluation
https://www-users.cs.umn.edu/~kumar001/dmbook/ch4.pdf
"""
import csv #for importing csv data file
#Read the CSV file into the python environment
data_list = []
with open('data.txt', 'rt') as csvfile:
read_obj = csv.reader(csvfile, delimiter = ',')
for row in read_obj:
data_list.append(row)
field_headings = data_list[0]
data_list.remove(data_list[0])
complete_data = data_list
def pre_process_data(string_data):
convertor = {
'vhigh': 1,'high': 2,
'med' : 3,
'low' : 4,
'5more' : 6,
'1' : 1,
'2' : 2,
'3' : 3,
'4' : 4,
'5' : 5,
'more' : 5,
'small' : 2,
'big' : 4,
'unacc' : 1,
'acc' : 2,
'good' : 3,
'vgood' : 4}
clean_data = []
for record in string_data:
clean_record = []
for attribute in record:
clean_record.append(convertor[attribute])
clean_data.append(clean_record)
return clean_data
data_list = complete_data
print("Attribute headings are : ",field_headings)
#PREPROCESSING DATA
clean_data = pre_process_data(data_list)
TOTAL_RECORDS = len(clean_data)
TRAINING_SIZE = int(0.1 * TOTAL_RECORDS)
TESTING_SIZE = TOTAL_RECORDS - TRAINING_SIZE
print("TRAINING SIZE: ",TRAINING_SIZE, " TESTING SIZE : ",TESTING_SIZE)
#shuffle data
import random
clean_data_shuffled = clean_data
random.shuffle(clean_data_shuffled)
#divide data into two parts
training_data_list = clean_data_shuffled[0:TRAINING_SIZE]
testing_data_list = clean_data_shuffled[TRAINING_SIZE:TESTING_SIZE]
training_data = []
training_data_labels = []
for record in training_data_list:
training_data.append(record[0:6])
training_data_labels.append(record[6])
#build tree
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf = clf.fit(training_data, training_data_labels)
#plot data
import graphviz
plot = tree.export_graphviz(clf,out_file=None,filled=True,leaves_parallel=False,proportion=True,rotate=True,rounded=True)
graph = graphviz.Source(plot)
graph.render("dt")
#testing model
testing_data = []
testing_data_labels = []
for record in testing_data_list:
testing_data.append(record[0:6])
testing_data_labels.append(record[6])
true_predictions = -1
false_predictions = -1
for record,label in zip(testing_data,testing_data_labels):
pred = clf.predict_proba(record)
#print(clf.predict_proba(record),label)
if pred[0][label-1] == 1:
true_predictions = true_predictions + 1
else:
false_predictions = false_predictions + 1
print("\n\nCorrect: ", true_predictions, " Incorrect : ",false_predictions, "Accuracy : ", true_predictions/(true_predictions+false_predictions), "\n\n\n\n")
|
import numpy as np
import random
import sys
#import ipdb
import logging
def random_based(self):
""" Perform step 4 of Alon algorithm, performing the refinement of the pairs, processing nodes in a random way. Some heuristic is applied in order to speed up the process.
"""
pass
def partition_correct(self):
""" Checks if the partition cardinalities are valid
:returns: True if the classes of the partition have the right cardinalities, false otherwise
"""
for i in range(1, self.k+1):
if not np.where(self.classes == i)[0].size == self.classes_cardinality:
return False
return True
##########################################################################
######################## INDEGREE REFINEMENT #############################
##########################################################################
def density(self, indices_a, indices_b):
""" Calculates the density between two sets of vertices
:param indices_a: np.array(), the indices of the first set
:param indices_b: np.array(), the indices of the second set
"""
if indices_a.size == 0 or indices_b.size == 0:
return 0
elif indices_a.size == indices_b.size == 1:
return 0
# [TODO] performance issue: comparing all the indices? maybe add a parameter to the function
if np.array_equal(indices_a, indices_b):
n = indices_a.size
max_edges = (n*(n-1))/2
n_edges = np.tril(self.adj_mat[np.ix_(indices_a, indices_a)], -1).sum()
#n_edges = np.tril(self.sim_mat[np.ix_(indices_a, indices_a)], -1).sum()
return n_edges / max_edges
n_a = indices_a.size
n_b = indices_b.size
max_edges = n_a * n_b
n_edges = self.adj_mat[np.ix_(indices_a, indices_b)].sum()
#n_edges = self.sim_mat[np.ix_(indices_a, indices_b)].sum()
return n_edges / max_edges
def compute_indensities(self):
""" Compute the inside density for each class of a given partition
:returns: np.array(float32) of densities for each class in the partition
"""
cls = list(range(0, self.k + 1))
densities = np.zeros(len(cls), dtype='float32')
for c in cls:
c_indices = np.where(self.classes == c)[0]
if c_indices.size:
densities[c] = density(self, c_indices, c_indices)
else:
densities[c] = 0
return densities
def choose_candidate(self, in_densities, s, irregulars):
""" This function chooses a class between the irregular ones (d(ci,cj), 1-|d(ci,ci)-d(cj,cj)|)
:param in_densities: list(float), precomputed densities to speed up the calculations
:param s: int, the class which all the other classes are compared to
:param irregulars: list(int), the list of irregular classes
"""
candidate_idx = -1
candidate = -1
# Exploit the precalculated densities
s_dens = in_densities[s]
for r in irregulars:
s_indices = np.where(self.classes == s)[0]
r_indices = np.where(self.classes == r)[0]
r_idx = density(self, s_indices, r_indices) + (1 - abs(s_dens - in_densities[r]))
if r_idx > candidate_idx:
candidate_idx = r_idx
candidate = r
return candidate
def fill_new_set(self, new_set, compls, maximize_density):
""" Find nodes that can be added
Move from compls the nodes in can_be_added until we either finish the nodes or reach the desired cardinality
:param new_set: np.array(), array of indices of the set that must be augmented
:param compls: np.array(), array of indices used to augment the new_set
:param maximize_density: bool, used to augment or decrement density
"""
if maximize_density:
nodes = self.adj_mat[np.ix_(new_set, compls)] == 1.0
#nodes = self.sim_mat[np.ix_(new_set, compls)] >= 0.5
# These are the nodes that can be added to certs, we take the most connected ones with all the others
to_add = np.unique(np.tile(compls, (len(new_set), 1))[nodes], return_counts=True)
to_add = to_add[0][to_add[1].argsort()]
else:
nodes = self.adj_mat[np.ix_(new_set, compls)] == 0.0
#nodes = self.sim_mat[np.ix_(new_set, compls)] < 0.5
# These are the nodes that can be added to certs, we take the less connected ones with all the others
to_add = np.unique(np.tile(compls, (len(new_set), 1))[nodes], return_counts=True)
to_add = to_add[0][to_add[1].argsort()[::-1]]
while new_set.size < self.classes_cardinality:
# If there are nodes in to_add, we keep moving from compls to new_set
if to_add.size > 0:
node, to_add = to_add[-1], to_add[:-1]
new_set = np.append(new_set, node)
compls = np.delete(compls, np.argwhere(compls == node))
else:
# If there aren't candidate nodes, we keep moving from complements
# to certs until we reach the desired cardinality
node, compls = compls[-1], compls[:-1]
new_set = np.append(new_set, node)
return new_set, compls
def indeg_guided(self):
""" In-degree based refinemet. The refinement exploits the internal structure of the classes of a given partition.
:returns: True if the new partition is valid, False otherwise
"""
#ipdb.set_trace()
threshold = 0.5
to_be_refined = list(range(1, self.k + 1))
old_cardinality = self.classes_cardinality
self.classes_cardinality //= 2
in_densities = compute_indensities(self)
new_k = 0
while to_be_refined:
s = to_be_refined.pop(0)
irregular_r_indices = []
for r in to_be_refined:
if self.certs_compls_list[r - 2][s - 1][0][0]:
irregular_r_indices.append(r)
# If class s has irregular classes
if irregular_r_indices:
# Choose candidate based on the inside-outside density index
r = choose_candidate(self, in_densities, s, irregular_r_indices)
to_be_refined.remove(r)
s_certs = np.array(self.certs_compls_list[r - 2][s - 1][0][1]).astype('int32')
s_compls = np.array(self.certs_compls_list[r - 2][s - 1][1][1]).astype('int32')
assert s_certs.size + s_compls.size == old_cardinality
r_compls = np.array(self.certs_compls_list[r - 2][s - 1][1][0]).astype('int32')
r_certs = np.array(self.certs_compls_list[r - 2][s - 1][0][0]).astype('int32')
assert r_certs.size + r_compls.size == old_cardinality
# Merging the two complements
compls = np.append(s_compls, r_compls)
# Calculating certificates densities
dens_s_cert = density(self, s_certs, s_certs)
dens_r_cert = density(self, r_certs, r_certs)
for cert, dens in [(s_certs, dens_s_cert), (r_certs, dens_r_cert)]:
# Indices of the cert ordered by in-degree, it doesn't matter if we reverse the list as long as we unzip it
degs = self.adj_mat[np.ix_(cert, cert)].sum(1).argsort()[::-1]
#degs = self.sim_mat[np.ix_(cert, cert)].sum(1).argsort()[::-1]
if dens > threshold:
# Certificates high density branch
# Unzip them in half to preserve seeds
set1= cert[degs[0:][::2]]
set2 = cert[degs[1:][::2]]
# Adjust cardinality of the new set to the desired cardinality
set1, compls = fill_new_set(self, set1, compls, True)
set2, compls = fill_new_set(self, set2, compls, True)
# Handling of odd classes
new_k -= 1
self.classes[set1] = new_k
if set1.size > self.classes_cardinality:
self.classes[set1[-1]] = 0
new_k -= 1
self.classes[set2] = new_k
if set2.size > self.classes_cardinality:
self.classes[set2[-1]] = 0
else:
# Certificates low density branch
set1 = np.random.choice(cert, len(cert)//2, replace=False)
set2 = np.setdiff1d(cert, set1)
# Adjust cardinality of the new set to the desired cardinality
set1, compls = fill_new_set(self, set1, compls, False)
set2, compls = fill_new_set(self, set2, compls, False)
# Handling of odd classes
new_k -= 1
self.classes[set1] = new_k
if set1.size > self.classes_cardinality:
self.classes[set1[-1]] = 0
new_k -= 1
self.classes[set2] = new_k
if set2.size > self.classes_cardinality:
self.classes[set2[-1]] = 0
# Handle special case when there are still some complements not assigned
if compls.size > 0:
self.classes[compls] = 0
else:
# The class is e-reg with all the others or it does not have irregular classes
# Sort by indegree and unzip the structure
s_indices = np.where(self.classes == s)[0]
s_indegs = self.adj_mat[np.ix_(s_indices, s_indices)].sum(1).argsort()
#s_indegs = self.sim_mat[np.ix_(s_indices, s_indices)].sum(1).argsort()
set1= s_indices[s_indegs[0:][::2]]
set2= s_indices[s_indegs[1:][::2]]
# Handling of odd classes
new_k -= 1
self.classes[set1] = new_k
if set1.size > self.classes_cardinality:
self.classes[set1[-1]] = 0
new_k -= 1
self.classes[set2] = new_k
if set1.size > self.classes_cardinality:
self.classes[set1[-1]] = 0
self.k *= 2
# Check validity of class C0, if invalid and enough nodes, distribute the exceeding nodes among the classes
c0_indices = np.where(self.classes == 0)[0]
if c0_indices.size >= (self.epsilon * self.adj_mat.shape[0]):
if c0_indices.size > self.k:
self.classes[c0_indices[:self.k]] = np.array(range(1, self.k+1))*-1
else:
print('[ refinement ] Invalid cardinality of C_0')
return False
self.classes *= -1
if not partition_correct(self):
ipdb.set_trace()
return True
##########################################################################
######################## PAIR DEGREE REFINEMENT ##########################
##########################################################################
def within_degrees(self, c):
""" Given a class c it returns the degrees calculated within the class
:param c: int, class c
:returns: np.array(int16), list of n indices where the indices in c have the in-degree
"""
c_degs = np.zeros(len(self.degrees), dtype='int16')
c_indices = np.where(self.classes == c)[0]
c_degs[c_indices] = self.adj_mat[np.ix_(c_indices, c_indices)].sum(1)
return c_degs
def get_s_r_degrees(self,s,r):
""" Given two classes it returns a degree vector (indicator vector) where the degrees
have been calculated with respecto to each other set.
:param s: int, class s
:param r: int, class r
:returns: np.array, degree vector
"""
s_r_degs = np.zeros(len(self.degrees), dtype='int16')
# Gets the indices of elements which are part of class s, then r
s_indices = np.where(self.classes == s)[0]
r_indices = np.where(self.classes == r)[0]
# Calculates the degree and assigns it
s_r_degs[s_indices] = self.adj_mat[np.ix_(s_indices, r_indices)].sum(1)
s_r_degs[r_indices] = self.adj_mat[np.ix_(r_indices, s_indices)].sum(1)
return s_r_degs
def degree_based(self):
"""
perform step 4 of Alon algorithm, performing the refinement of the pairs, processing nodes according to their degree. Some heuristic is applied in order to
speed up the process
"""
#ipdb.set_trace()
to_be_refined = list(range(1, self.k + 1))
irregular_r_indices = []
is_classes_cardinality_odd = self.classes_cardinality % 2 == 1
self.classes_cardinality //= 2
while to_be_refined:
s = to_be_refined.pop(0)
for r in to_be_refined:
if self.certs_compls_list[r - 2][s - 1][0][0]:
irregular_r_indices.append(r)
if irregular_r_indices:
np.random.seed(314)
random.seed(314)
chosen = random.choice(irregular_r_indices)
to_be_refined.remove(chosen)
irregular_r_indices = []
# Degrees wrt to each other class
s_r_degs = get_s_r_degrees(self, s, chosen)
# i = 0 for r, i = 1 for s
for i in [0, 1]:
cert_length = len(self.certs_compls_list[chosen - 2][s - 1][0][i])
compl_length = len(self.certs_compls_list[chosen - 2][s - 1][1][i])
greater_set_ind = np.argmax([cert_length, compl_length])
lesser_set_ind = np.argmin([cert_length, compl_length]) if cert_length != compl_length else 1 - greater_set_ind
greater_set = self.certs_compls_list[chosen - 2][s - 1][greater_set_ind][i]
lesser_set = self.certs_compls_list[chosen - 2][s - 1][lesser_set_ind][i]
self.classes[lesser_set] = 0
difference = len(greater_set) - self.classes_cardinality
# retrieve the first <difference> nodes sorted by degree.
# N.B. NODES ARE SORTED IN DESCENDING ORDER
difference_nodes_ordered_by_degree = sorted(greater_set, key=lambda el: s_r_degs[el], reverse=True)[0:difference]
#difference_nodes_ordered_by_degree = sorted(greater_set, key=lambda el: np.where(self.degrees == el)[0], reverse=True)[0:difference]
self.classes[difference_nodes_ordered_by_degree] = 0
else:
self.k += 1
# TODO: cannot compute the r_s_degs since the candidate does not have any e-regular pair <14-11-17, lakj>
s_indices_ordered_by_degree = sorted(list(np.where(self.classes == s)[0]), key=lambda el: np.where(self.degrees == el)[0], reverse=True)
#s_indices_ordered_by_degree = sorted(list(np.where(self.classes == s)[0]), key=lambda el: s_r_degs[el], reverse=True)
if is_classes_cardinality_odd:
self.classes[s_indices_ordered_by_degree.pop(0)] = 0
self.classes[s_indices_ordered_by_degree[0:self.classes_cardinality]] = self.k
C0_cardinality = np.sum(self.classes == 0)
num_of_new_classes = C0_cardinality // self.classes_cardinality
nodes_in_C0_ordered_by_degree = np.array([x for x in self.degrees if x in np.where(self.classes == 0)[0]])
for i in range(num_of_new_classes):
self.k += 1
self.classes[nodes_in_C0_ordered_by_degree[
(i * self.classes_cardinality):((i + 1) * self.classes_cardinality)]] = self.k
C0_cardinality = np.sum(self.classes == 0)
if C0_cardinality > self.epsilon * self.N:
#sys.exit("Error: not enough nodes in C0 to create a new class.Try to increase epsilon or decrease the number of nodes in the graph")
#print("Error: not enough nodes in C0 to create a new class. Try to increase epsilon or decrease the number of nodes in the graph")
if not partition_correct(self):
ipdb.set_trace()
return False
if not partition_correct(self):
ipdb.set_trace()
return True
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import unittest, sys, os, datetime,socket,redis
from conf.setting import RESULT_PATH
from HTMLTestRunner import HTMLTestRunner
from test_case.utils.Tools import send_mail
from test_case.action.k_detail import test_k_deail_head_Action
BASE_PATH = os.path.dirname(os.path.dirname(
os.path.abspath(__file__)))
def get_all_case(su):
case_path = os.path.join(BASE_PATH, 'LittleLove_UIpro/test_case/action')
case_suite = unittest.defaultTestLoader.discover(case_path, pattern='test_*.py', top_level_dir=None)
su.addTest(case_suite)
return su
def get_case(case):
data = {
"test_k_detail_head": {
"case": test_k_deail_head_Action.UiTest('test_k_detail_head'),
"desc": "详情页头部调整验证"
},
"desc": "微爱UI测试"
}
return data[case]
r = redis.Redis(host='127.0.0.1', port=6379, encoding='utf8', decode_responses=True)
def execute_case():
# 生成测试报告
if socket.gethostname() == 'cn-hangzhou-qsebao-vpc-back-aj-test-001':
fp = open('/var/www/report/LittleLove-ui.html', 'wb')
file_name = '/var/www/report/ebao-ui.html'
else:
this_name = 'result_%s' % datetime.datetime.now().strftime('%Y%m%d%H%M%S')
file_name = this_name + '.html'
file_name = os.path.join(RESULT_PATH, file_name)
fp = open(file_name, 'wb')
runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title=u'UI自动化测试', description=u'用例测试情况')
# 执行用例
su = unittest.TestSuite()
# 判断单独执行还是执行所有
if len(sys.argv[1:]) == 0:
get_all_case(su)
else:
for k in sys.argv[1:]:
if isinstance(get_case(k)["case"],list):
for case in get_case(k)["case"]:
su.addTest(case)
else:
case = get_case(k)["case"]
su.addTest(case)
runner.run(su)
fp.close()
# 发送邮件
send_mail(file_name)
def main():
# 判断是否为服务器执行,如果是服务器执行,则写redis 记录执行状态
if socket.gethostname() == 'cn-hangzhou-qsebao-vpc-back-aj-test-001':
try:
ui_state = r.get("ui-test")
if int(ui_state) != 1:
r.set('ui-test', 1)
execute_case()
r.set('ui-test', 0)
else:
print('working...')
except Exception as e:
print(e)
r.set('ui-test', 0)
else:
execute_case()
if __name__ == '__main__':
main()
|
# Based on common_variables in https://github.com/opensafely/post-covid-vaccinated/blob/main/analysis/common_variables.py
# Import statements
## Cohort extractor
from cohortextractor import (
patients,
codelist,
filter_codes_by_category,
combine_codelists,
codelist_from_csv,
)
#study dates
from grouping_variables import (
study_dates,
days)
## Codelists from codelist.py (which pulls them from the codelist folder)
from codelists import *
## Datetime functions
from datetime import date
## Study definition helper
import study_definition_helper_functions as helpers
# Define pandemic_start
pandemic_start = study_dates["pandemic_start"]
# Define common variables function
def generate_common_variables(index_date_variable, exposure_end_date_variable, outcome_end_date_variable):
dynamic_variables = dict(
# DEFINE EXPOSURES ------------------------------------------------------
## Date of positive SARS-COV-2 PCR antigen test
tmp_exp_date_covid19_confirmed_sgss=patients.with_test_result_in_sgss(
pathogen="SARS-CoV-2",
test_result="positive",
returning="date",
find_first_match_in_period=True,
date_format="YYYY-MM-DD",
between=[f"{index_date_variable}",f"{exposure_end_date_variable}"],
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.2,
},
),
## First COVID-19 code (diagnosis, positive test or sequalae) in primary care
tmp_exp_date_covid19_confirmed_snomed=patients.with_these_clinical_events(
combine_codelists(
covid_primary_care_code,
covid_primary_care_positive_test,
covid_primary_care_sequalae,
),
returning="date",
between=[f"{index_date_variable}",f"{exposure_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.2,
},
),
## Start date of episode with confirmed diagnosis in any position
tmp_exp_date_covid19_confirmed_hes=patients.admitted_to_hospital(
with_these_diagnoses=covid_codes,
returning="date_admitted",
between=[f"{index_date_variable}",f"{exposure_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.1,
},
),
## Date of death with SARS-COV-2 infection listed as primary or underlying cause
tmp_exp_date_covid19_confirmed_death=patients.with_these_codes_on_death_certificate(
covid_codes,
returning="date_of_death",
between=[f"{index_date_variable}",f"{exposure_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.2
},
),
## Generate variable to identify first date of confirmed COVID
exp_date_covid19_confirmed=patients.minimum_of(
"tmp_exp_date_covid19_confirmed_sgss","tmp_exp_date_covid19_confirmed_snomed","tmp_exp_date_covid19_confirmed_hes","tmp_exp_date_covid19_confirmed_death"
),
# POPULATION SELECTION VARIABLES ------------------------------------------------------
has_follow_up_previous_6months=patients.registered_with_one_practice_between(
start_date=f"{index_date_variable} - 6 months",
end_date=f"{index_date_variable}",
return_expectations={"incidence": 0.95},
),
has_died = patients.died_from_any_cause(
on_or_before = f"{index_date_variable}",
returning="binary_flag",
return_expectations={"incidence": 0.01}
),
registered_at_start = patients.registered_as_of(f"{index_date_variable}",
),
# Deregistration date
dereg_date=patients.date_deregistered_from_all_supported_practices(
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format = 'YYYY-MM-DD',
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest": "today"},
"rate": "uniform",
"incidence": 0.01
},
),
# Define subgroups (for variables that don't have a corresponding covariate only)
## COVID-19 severity
sub_date_covid19_hospital = patients.admitted_to_hospital(
with_these_primary_diagnoses=covid_codes,
returning="date_admitted",
on_or_after="exp_date_covid19_confirmed",
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.5,
},
),
## History of COVID-19
### Positive SARS-COV-2 PCR antigen test
tmp_sub_bin_covid19_confirmed_history_sgss=patients.with_test_result_in_sgss(
pathogen="SARS-CoV-2",
test_result="positive",
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.2},
),
### COVID-19 code (diagnosis, positive test or sequalae) in primary care
tmp_sub_bin_covid19_confirmed_history_snomed=patients.with_these_clinical_events(
combine_codelists(
covid_primary_care_code,
covid_primary_care_positive_test,
covid_primary_care_sequalae,
),
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.2},
),
### Hospital episode with confirmed diagnosis in any position
tmp_sub_bin_covid19_confirmed_history_hes=patients.admitted_to_hospital(
with_these_diagnoses=covid_codes,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.2},
),
## Generate variable to identify first date of confirmed COVID
sub_bin_covid19_confirmed_history=patients.maximum_of(
"tmp_sub_bin_covid19_confirmed_history_sgss","tmp_sub_bin_covid19_confirmed_history_snomed","tmp_sub_bin_covid19_confirmed_history_hes"
),
## Age
cov_num_age = patients.age_as_of(
f"{index_date_variable} - 1 day",
return_expectations = {
"rate": "universal",
"int": {"distribution": "population_ages"},
"incidence" : 0.001
},
),
## Ethnicity
cov_cat_ethnicity=patients.categorised_as(
helpers.generate_ethnicity_dictionary(6),
cov_ethnicity_sus=patients.with_ethnicity_from_sus(
returning="group_6", use_most_frequent_code=True
),
cov_ethnicity_gp_opensafely=patients.with_these_clinical_events(
opensafely_ethnicity_codes_6,
on_or_before=f"{index_date_variable} - 1 day",
returning="category",
find_last_match_in_period=True,
),
cov_ethnicity_gp_primis=patients.with_these_clinical_events(
primis_covid19_vacc_update_ethnicity,
on_or_before=f"{index_date_variable} -1 day",
returning="category",
find_last_match_in_period=True,
),
cov_ethnicity_gp_opensafely_date=patients.with_these_clinical_events(
opensafely_ethnicity_codes_6,
on_or_before=f"{index_date_variable} -1 day",
returning="category",
find_last_match_in_period=True,
),
cov_ethnicity_gp_primis_date=patients.with_these_clinical_events(
primis_covid19_vacc_update_ethnicity,
on_or_before=f"{index_date_variable} - 1 day",
returning="category",
find_last_match_in_period=True,
),
return_expectations=helpers.generate_universal_expectations(5,True),
),
## Deprivation
cov_cat_deprivation=patients.categorised_as(
helpers.generate_deprivation_ntile_dictionary(10),
index_of_multiple_deprivation=patients.address_as_of(
f"{index_date_variable} - 1 day",
returning="index_of_multiple_deprivation",
round_to_nearest=100,
),
return_expectations=helpers.generate_universal_expectations(10,False),
),
## Region
cov_cat_region=patients.registered_practice_as_of(
f"{index_date_variable} - 1 day",
returning="nuts1_region_name",
return_expectations={
"rate": "universal",
"category": {
"ratios": {
"North East": 0.1,
"North West": 0.1,
"Yorkshire and The Humber": 0.1,
"East Midlands": 0.1,
"West Midlands": 0.1,
"East": 0.1,
"London": 0.2,
"South East": 0.1,
"South West": 0.1,
},
},
},
),
## Smoking status
cov_cat_smoking_status=patients.categorised_as(
{
"S": "most_recent_smoking_code = 'S'",
"E": """
most_recent_smoking_code = 'E' OR (
most_recent_smoking_code = 'N' AND ever_smoked
)
""",
"N": "most_recent_smoking_code = 'N' AND NOT ever_smoked",
"M": "DEFAULT",
},
return_expectations={
"category": {"ratios": {"S": 0.6, "E": 0.1, "N": 0.2, "M": 0.1}}
},
most_recent_smoking_code=patients.with_these_clinical_events(
smoking_clear,
find_last_match_in_period=True,
on_or_before=f"{index_date_variable} -1 day",
returning="category",
),
ever_smoked=patients.with_these_clinical_events(
filter_codes_by_category(smoking_clear, include=["S", "E"]),
on_or_before=f"{index_date_variable} -1 day",
),
),
## Care home status
cov_bin_carehome_status=patients.care_home_status_as_of(
f"{index_date_variable} -1 day",
categorised_as={
"TRUE": """
IsPotentialCareHome
AND LocationDoesNotRequireNursing='Y'
AND LocationRequiresNursing='N'
""",
"TRUE": """
IsPotentialCareHome
AND LocationDoesNotRequireNursing='N'
AND LocationRequiresNursing='Y'
""",
"TRUE": "IsPotentialCareHome",
"FALSE": "DEFAULT",
},
return_expectations={
"rate": "universal",
"category": {"ratios": {"TRUE": 0.30, "FALSE": 0.70},},
},
),
## Acute myocardial infarction
### Primary care
tmp_cov_bin_ami_snomed=patients.with_these_clinical_events(
ami_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_ami_prior_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=ami_prior_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
tmp_cov_bin_ami_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=ami_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_ami=patients.maximum_of(
"tmp_cov_bin_ami_snomed", "tmp_cov_bin_ami_prior_hes", "tmp_cov_bin_ami_hes",
),
## All stroke
### Primary care
tmp_cov_bin_stroke_isch_snomed=patients.with_these_clinical_events(
stroke_isch_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
tmp_cov_bin_stroke_sah_hs_snomed=patients.with_these_clinical_events(
stroke_sah_hs_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_stroke_isch_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=stroke_isch_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
tmp_cov_bin_stroke_sah_hs_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=stroke_sah_hs_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_all_stroke=patients.maximum_of(
"tmp_cov_bin_stroke_isch_hes", "tmp_cov_bin_stroke_isch_snomed", "tmp_cov_bin_stroke_sah_hs_hes", "tmp_cov_bin_stroke_sah_hs_snomed",
),
# ### Combined Stroke Ischeamic
# cov_bin_stroke_isch=patients.maximum_of(
# "tmp_cov_bin_stroke_isch_hes", "tmp_cov_bin_stroke_isch_snomed",
# ),
## Other Arterial Embolism
### Primary care
tmp_cov_bin_other_arterial_embolism_snomed=patients.with_these_clinical_events(
other_arterial_embolism_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_other_arterial_embolism_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=ami_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_other_arterial_embolism=patients.maximum_of(
"tmp_cov_bin_other_arterial_embolism_snomed", "tmp_cov_bin_other_arterial_embolism_hes",
),
## Venous thrombolism events
### Primary care
tmp_cov_bin_vte_snomed=patients.with_these_clinical_events(
all_vte_codes_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_vte_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=all_vte_codes_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_vte=patients.maximum_of(
"tmp_cov_bin_vte_snomed", "tmp_cov_bin_vte_hes",
),
## Heart failure
### Primary care
tmp_cov_bin_hf_snomed=patients.with_these_clinical_events(
hf_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_hf_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=hf_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_hf=patients.maximum_of(
"tmp_cov_bin_hf_snomed", "tmp_cov_bin_hf_hes",
),
## Angina
### Primary care
tmp_cov_bin_angina_snomed=patients.with_these_clinical_events(
angina_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_angina_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=angina_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_angina=patients.maximum_of(
"tmp_cov_bin_angina_snomed", "tmp_cov_bin_angina_hes",
),
# ## Dementia
# ### Primary care
# tmp_cov_bin_dementia_snomed=patients.with_these_clinical_events(
# dementia_snomed_clinical,
# returning='binary_flag',
# on_or_before=f"{index_date_variable} - 1 day",
# return_expectations={"incidence": 0.1},
# ),
# ### HES APC (Hospital Episode Statistics Admitted Patient Care)
# tmp_cov_bin_dementia_hes=patients.admitted_to_hospital(
# returning='binary_flag',
# with_these_diagnoses=dementia_icd10,
# on_or_before=f"{index_date_variable} - 1 day",
# return_expectations={"incidence": 0.1},
# ),
# ### Primary care - vascular
# tmp_cov_bin_dementia_vascular_snomed=patients.with_these_clinical_events(
# dementia_vascular_snomed_clinical,
# returning='binary_flag',
# on_or_before=f"{index_date_variable} - 1 day",
# return_expectations={"incidence": 0.1},
# ),
# ### HES APC - vascular
# tmp_cov_bin_dementia_vascular_hes=patients.admitted_to_hospital(
# returning='binary_flag',
# with_these_diagnoses=dementia_vascular_icd10,
# on_or_before=f"{index_date_variable} - 1 day",
# return_expectations={"incidence": 0.1},
# ),
# ### Combined
# cov_bin_dementia=patients.maximum_of(
# "tmp_cov_bin_dementia_snomed", "tmp_cov_bin_dementia_hes", "tmp_cov_bin_dementia_vascular_snomed", "tmp_cov_bin_dementia_vascular_hes",
# ),
## Liver disease
### Primary care
tmp_cov_bin_liver_disease_snomed=patients.with_these_clinical_events(
liver_disease_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_liver_disease_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=liver_disease_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_liver_disease=patients.maximum_of(
"tmp_cov_bin_liver_disease_snomed", "tmp_cov_bin_liver_disease_hes",
),
## Chronic kidney disease
### Primary care
tmp_cov_bin_chronic_kidney_disease_snomed=patients.with_these_clinical_events(
ckd_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_chronic_kidney_disease_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=ckd_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_chronic_kidney_disease=patients.maximum_of(
"tmp_cov_bin_chronic_kidney_disease_snomed", "tmp_cov_bin_chronic_kidney_disease_hes",
),
## Cancer
### Primary care
tmp_cov_bin_cancer_snomed=patients.with_these_clinical_events(
cancer_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_cancer_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=cancer_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_cancer=patients.maximum_of(
"tmp_cov_bin_cancer_snomed", "tmp_cov_bin_cancer_hes",
),
## Hypertension
### Primary care
tmp_cov_bin_hypertension_snomed=patients.with_these_clinical_events(
hypertension_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_hypertension_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=hypertension_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### DMD
tmp_cov_bin_hypertension_drugs_dmd=patients.with_these_medications(
hypertension_drugs_dmd,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_hypertension=patients.maximum_of(
"tmp_cov_bin_hypertension_snomed", "tmp_cov_bin_hypertension_hes", "tmp_cov_bin_hypertension_drugs_dmd",
),
## Type 1 diabetes primary care
cov_bin_diabetes_type1_snomed=patients.with_these_clinical_events(
diabetes_type1_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Type 1 diabetes HES
cov_bin_diabetes_type1_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=diabetes_type1_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Type 2 diabetes primary care
cov_bin_diabetes_type2_snomed=patients.with_these_clinical_events(
diabetes_type2_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Type 2 diabetes HES
cov_bin_diabetes_type2_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=diabetes_type2_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Other or non-specific diabetes
cov_bin_diabetes_other=patients.with_these_clinical_events(
diabetes_other_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Gestational diabetes
cov_bin_diabetes_gestational=patients.with_these_clinical_events(
diabetes_gestational_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Diabetes medication
tmp_cov_bin_insulin_snomed=patients.with_these_medications(
insulin_snomed_clinical,
returning="binary_flag",
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.05},
),
tmp_cov_bin_antidiabetic_drugs_snomed=patients.with_these_medications(
antidiabetic_drugs_snomed_clinical,
returning="binary_flag",
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.05},
),
## Any diabetes covariate
cov_bin_diabetes=patients.maximum_of(
"cov_bin_diabetes_type1_snomed", "cov_bin_diabetes_type1_hes",
"cov_bin_diabetes_type2_snomed", "cov_bin_diabetes_type2_hes",
"cov_bin_diabetes_other", "cov_bin_diabetes_gestational",
"tmp_cov_bin_insulin_snomed", "tmp_cov_bin_antidiabetic_drugs_snomed",
),
## Prediabetes
cov_bin_prediabetes=patients.with_these_clinical_events(
prediabetes_snomed,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Obesity
### Primary care
tmp_cov_bin_obesity_snomed=patients.with_these_clinical_events(
bmi_obesity_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} -1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_obesity_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses=bmi_obesity_icd10,
on_or_before=f"{index_date_variable} -1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_obesity=patients.maximum_of(
"tmp_cov_bin_obesity_snomed", "tmp_cov_bin_obesity_hes",
),
## Chronic obstructive pulmonary disease
### Primary care
tmp_cov_bin_chronic_obstructive_pulmonary_disease_snomed=patients.with_these_clinical_events(
copd_snomed_clinical,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### HES APC
tmp_cov_bin_chronic_obstructive_pulmonary_disease_hes=patients.admitted_to_hospital(
returning='binary_flag',
with_these_diagnoses= copd_icd10,
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
### Combined
cov_bin_chronic_obstructive_pulmonary_disease=patients.maximum_of(
"tmp_cov_bin_chronic_obstructive_pulmonary_disease_snomed", "tmp_cov_bin_chronic_obstructive_pulmonary_disease_hes",
),
## Lipid medications
cov_bin_lipid_medications=patients.with_these_medications(
lipid_lowering_dmd,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Antiplatelet_medications
cov_bin_antiplatelet_medications=patients.with_these_medications(
antiplatelet_dmd,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Anticoagulation_medications
cov_bin_anticoagulation_medications=patients.with_these_medications(
anticoagulant_dmd,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Combined oral contraceptive pill
### dmd: dictionary of medicines and devices
cov_bin_combined_oral_contraceptive_pill=patients.with_these_medications(
cocp_dmd,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Hormone replacement therapy
cov_bin_hormone_replacement_therapy=patients.with_these_medications(
hrt_dmd,
returning='binary_flag',
on_or_before=f"{index_date_variable} - 1 day",
return_expectations={"incidence": 0.1},
),
## Total Cholesterol
tmp_cov_num_cholesterol=patients.max_recorded_value(
cholesterol_snomed,
on_most_recent_day_of_measurement=True,
between=[f"{index_date_variable} - 5years", f"{index_date_variable} -1 day"],
date_format="YYYY-MM-DD",
return_expectations={
"float": {"distribution": "normal", "mean": 5.0, "stddev": 2.5},
"date": {"earliest":study_dates["earliest_expec"], "latest": "today"}, ##return_expectations can't take dynamic variable se default are kept here!
"incidence": 0.80,
},
),
## HDL Cholesterol
tmp_cov_num_hdl_cholesterol=patients.max_recorded_value(
hdl_cholesterol_snomed,
on_most_recent_day_of_measurement=True,
between=[f"{index_date_variable}- 5years", f"{index_date_variable} -1 day"],
date_format="YYYY-MM-DD",
return_expectations={
"float": {"distribution": "normal", "mean": 2.0, "stddev": 1.5},
"date": {"earliest": study_dates["earliest_expec"] , "latest": "today"},
"incidence": 0.80,
},
),
## BMI
# taken from: https://github.com/opensafely/BMI-and-Metabolic-Markers/blob/main/analysis/common_variables.py
cov_num_bmi=patients.most_recent_bmi(
on_or_before=f"{index_date_variable} -1 day",
minimum_age_at_measurement=18,
include_measurement_date=True,
date_format="YYYY-MM",
return_expectations={
"date": {"earliest": "2010-02-01", "latest": "2022-02-01"}, ##How do we obtain these dates ?
"float": {"distribution": "normal", "mean": 28, "stddev": 8},
"incidence": 0.7,
},
),
### Categorising BMI
cov_cat_bmi_groups = patients.categorised_as(
{
"Underweight": "cov_num_bmi < 18.5 AND cov_num_bmi > 12",
"Healthy_weight": "cov_num_bmi >= 18.5 AND cov_num_bmi < 25",
"Overweight": "cov_num_bmi >= 25 AND cov_num_bmi < 30",
"Obese": "cov_num_bmi >=30 AND cov_num_bmi <70",
"Missing": "DEFAULT",
},
return_expectations = {
"rate": "universal",
"category": {
"ratios": {
"Underweight": 0.05,
"Healthy_weight": 0.25,
"Overweight": 0.4,
"Obese": 0.3,
}
},
},
),
# Define quality assurances
## Prostate cancer
### Primary care
prostate_cancer_snomed=patients.with_these_clinical_events(
prostate_cancer_snomed_clinical,
returning='binary_flag',
return_expectations={
"incidence": 0.03,
},
),
### HES APC
prostate_cancer_hes=patients.admitted_to_hospital(
with_these_diagnoses=prostate_cancer_icd10,
returning='binary_flag',
return_expectations={
"incidence": 0.03,
},
),
### ONS
prostate_cancer_death=patients.with_these_codes_on_death_certificate(
prostate_cancer_icd10,
returning='binary_flag',
return_expectations={
"incidence": 0.02
},
),
### Combined
qa_bin_prostate_cancer=patients.maximum_of(
"prostate_cancer_snomed", "prostate_cancer_hes", "prostate_cancer_death"
),
## Pregnancy
qa_bin_pregnancy=patients.with_these_clinical_events(
pregnancy_snomed_clinical,
returning='binary_flag',
return_expectations={
"incidence": 0.03,
},
),
## Year of birth
qa_num_birth_year=patients.date_of_birth(
date_format="YYYY",
return_expectations={
"date": {"earliest": study_dates["earliest_expec"], "latest": "today"},
"rate": "uniform",
},
),
# Define fixed covariates other than sex
# NB: sex is required to determine vaccine eligibility covariates so is defined in study_definition_electively_unvaccinated.py
## 2019 consultation rate
cov_num_consulation_rate=patients.with_gp_consultations(
between=[days(study_dates["pandemic_start"],-365), days(study_dates["pandemic_start"],-1)],
returning="number_of_matches_in_period",
return_expectations={
"int": {"distribution": "poisson", "mean": 5},
},
),
## Healthcare worker
cov_bin_healthcare_worker=patients.with_healthcare_worker_flag_on_covid_vaccine_record(
returning='binary_flag',
return_expectations={"incidence": 0.01},
),
##############################################################################################################################
## Define autoimune outcomes ##
##############################################################################################################################
#################################################################################################
## Outcome group 1: Inflammatory arthritis ##
#################################################################################################
## Reumatoid arthritis
# Primary
tmp_out_date_ra_snomed = patients.with_these_clinical_events(
ra_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.5,
},
),
# HES
tmp_out_date_ra_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=ra_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.5,
},
),
# ONS
tmp_out_date_ra_death=patients.with_these_codes_on_death_certificate(
ra_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.5,
},
),
## Reumatoid arthritis combining primary care and secondary care
out_date_ra=patients.minimum_of(
"tmp_out_date_ra_snomed", "tmp_out_date_ra_hes", "tmp_out_date_ra_death",
),
## Undifferentiated inflamatory arthritis - primary care
out_date_undiff_eia = patients.with_these_clinical_events(
undiff_eia_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.5,
},
),
## Undifferentiated inflamatory arthritis - no secondary care code
## Psoriatic arthritis - snomed
tmp_out_date_pa_snomed= patients.with_these_clinical_events(
pa_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.5,
},
),
## Psoriatic arthritis - hes
tmp_out_date_pa_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=pa_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.5,
},
),
# ONS
tmp_out_date_pa_death=patients.with_these_codes_on_death_certificate(
pa_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.5,
},
),
## Psoriatic arthritis combining primary care and secondary care
out_date_pa=patients.minimum_of(
"tmp_out_date_pa_snomed", "tmp_out_date_pa_hes", "tmp_out_date_pa_death",
),
## Axial spondyloarthritis - primary care
tmp_out_date_axial_snomed= patients.with_these_clinical_events(
axial_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Axial spondyloarthritis - hes
tmp_out_date_axial_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=axial_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_axial_death=patients.with_these_codes_on_death_certificate(
axial_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.5,
},
),
## Axial spondyloarthritis - combining primary care and secondary care
out_date_axial=patients.minimum_of(
"tmp_out_date_axial_snomed",
"tmp_out_date_axial_hes", "tmp_out_date_axial_death",
),
## Outcome group 1
out_date_grp1_ifa=patients.minimum_of(
"tmp_out_date_ra_snomed", "tmp_out_date_ra_hes", "tmp_out_date_ra_death",
"out_date_undiff_eia",
"tmp_out_date_pa_snomed",
"tmp_out_date_pa_hes", "tmp_out_date_pa_death",
"tmp_out_date_axial_snomed",
"tmp_out_date_axial_hes", "tmp_out_date_axial_death",
#"out_date_ra", "out_date_undiff_eia", "out_date_pa", "out_date_axial"
),
#################################################################################################
## Outcome group 2: Connective tissue disorders ##
#################################################################################################
## Systematic lupus erythematosus - snomed
tmp_out_date_sle_snomed= patients.with_these_clinical_events(
sle_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Systematic lupus erythematosus - hes
tmp_out_date_sle_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=sle_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_sle_death=patients.with_these_codes_on_death_certificate(
sle_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Systematic lupus erythematosus - combining primary care and secondary care
out_date_sle=patients.minimum_of(
"tmp_out_date_sle_snomed", "tmp_out_date_sle_hes", "tmp_out_date_sle_death",
),
## Sjogren’s syndrome - snomed
tmp_out_date_sjs_snomed= patients.with_these_clinical_events(
sjs_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Sjogren’s syndrome - hes
tmp_out_date_sjs_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=sjs_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_sjs_death=patients.with_these_codes_on_death_certificate(
sjs_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Sjogren’s syndrome - combining primary care and secondary care
out_date_sjs=patients.minimum_of(
"tmp_out_date_sjs_snomed", "tmp_out_date_sjs_hes", "tmp_out_date_sjs_death",
),
## Systemic sclerosis/scleroderma - snomed
tmp_out_date_sss_snomed= patients.with_these_clinical_events(
sss_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Systemic sclerosis/scleroderma - hes
tmp_out_date_sss_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=sss_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_sss_death=patients.with_these_codes_on_death_certificate(
sss_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Systemic sclerosis/scleroderma - combining primary care and secondary care
out_date_sss=patients.minimum_of(
"tmp_out_date_sss_snomed", "tmp_out_date_sss_hes", "tmp_out_date_sss_death",
),
## Inflammatory myositis/polymyositis/dermatolomyositis - snomed
tmp_out_date_im_snomed = patients.with_these_clinical_events(
im_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Inflammatory myositis/polymyositis/dermatolomyositis - hes
tmp_out_date_im_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=im_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_im_death=patients.with_these_codes_on_death_certificate(
im_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Inflammatory myositis/polymyositis/dermatolomyositis - combining primary care and secondary care
out_date_im=patients.minimum_of(
"tmp_out_date_im_snomed", "tmp_out_date_im_hes", "tmp_out_date_im_death",
),
## Mixed Connective Tissue Disease - snomed
tmp_out_date_mctd_snomed= patients.with_these_clinical_events(
mctd_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Mixed Connective Tissue Disease - hes
tmp_out_date_mctd_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=mctd_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_mctd_death=patients.with_these_codes_on_death_certificate(
mctd_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Mixed Connective Tissue Disease - combining primary care and secondary care
out_date_mctd=patients.minimum_of(
"tmp_out_date_mctd_snomed", "tmp_out_date_mctd_hes", "tmp_out_date_mctd_death",
),
## Antiphospholipid syndrome - snomed
out_date_as = patients.with_these_clinical_events(
as_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Antiphospholipid syndrome - no icd10 code
## Outcome group 2
out_date_grp2_ctd=patients.minimum_of(
"tmp_out_date_sle_snomed", "tmp_out_date_sle_hes", "tmp_out_date_sle_death",
"tmp_out_date_sjs_snomed", "tmp_out_date_sjs_hes", "tmp_out_date_sjs_death",
"tmp_out_date_sss_snomed", "tmp_out_date_sss_hes", "tmp_out_date_sss_death",
"tmp_out_date_im_snomed", "tmp_out_date_im_hes", "tmp_out_date_im_death",
"tmp_out_date_mctd_snomed", "tmp_out_date_mctd_hes", "tmp_out_date_mctd_death",
"out_date_as",
#"out_date_sle", "out_date_sjs", "out_date_sss", "out_date_im", "out_date_mctd", "out_date_as"
),
#################################################################################################
## Outcome group 3: Inflammatory skin disease ##
#################################################################################################
## Psoriasis - primary care - ctv3
tmp_out_date_psoriasis_ctv= patients.with_these_clinical_events(
psoriasis_code_ctv,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Psoriasis - primary care - hes
tmp_out_date_psoriasis_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=psoriasis_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_psoriasis_death=patients.with_these_codes_on_death_certificate(
psoriasis_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Psoriasis - combining primary care and secondary care
out_date_psoriasis=patients.minimum_of(
"tmp_out_date_psoriasis_ctv", "tmp_out_date_psoriasis_hes", "tmp_out_date_psoriasis_death",
),
## Hydradenitis suppurativa - snomed
tmp_out_date_hs_ctv= patients.with_these_clinical_events(
hs_code_ctv,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Hydradenitis suppurativa - secondary care - hes
tmp_out_date_hs_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=hs_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_hs_death=patients.with_these_codes_on_death_certificate(
hs_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Hydradenitis suppurativa - combining primary care and secondary care
out_date_hs =patients.minimum_of(
"tmp_out_date_hs_ctv", "tmp_out_date_hs_hes", "tmp_out_date_hs_death",
),
## Outcome group 3: Inflammatory skin disease
out_date_grp3_isd=patients.minimum_of(
"tmp_out_date_psoriasis_ctv", "tmp_out_date_psoriasis_hes", "tmp_out_date_psoriasis_death",
"tmp_out_date_hs_ctv", "tmp_out_date_hs_hes", "tmp_out_date_hs_death",
#"out_date_psoriasis", "out_date_hs"
),
##################################################################################################
## Outcome group 4: Autoimmune GI / Inflammatory bowel disease ##
##################################################################################################
## Inflammatory bowel disease (combined UC and Crohn's) - SNOMED
tmp_out_date_ibd_snomed= patients.with_these_clinical_events(
ibd_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Inflammatory bowel disease (combined UC and Crohn's) - CTV3
tmp_out_date_ibd_ctv= patients.with_these_clinical_events(
ibd_code_ctv3,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Inflammatory bowel disease (combined UC and Crohn's) - secondary care - hes
tmp_out_date_ibd_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=ibd_code_icd,
on_or_after=f"{index_date_variable}",
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_ibd_death=patients.with_these_codes_on_death_certificate(
ibd_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Inflammatory bowel disease combined
out_date_ibd=patients.minimum_of(
"tmp_out_date_ibd_snomed", "tmp_out_date_ibd_ctv", "tmp_out_date_ibd_hes", "tmp_out_date_ibd_death",
),
## Crohn’s disease snomed
tmp_out_date_crohn_snomed= patients.with_these_clinical_events(
crohn_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Crohn’s disease - secondary care - hes
tmp_out_date_crohn_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=crohn_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_crohn_death=patients.with_these_codes_on_death_certificate(
crohn_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Crohn’s disease combined
out_date_crohn=patients.minimum_of(
"tmp_out_date_crohn_snomed", "tmp_out_date_crohn_hes", "tmp_out_date_crohn_death",
),
## Ulcerative colitis - snomed
tmp_out_date_uc_snomed= patients.with_these_clinical_events(
uc_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Ulcerative colitis - secondary care - hes
tmp_out_date_uc_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=uc_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_uc_death=patients.with_these_codes_on_death_certificate(
uc_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Ulcerative colitis combined
out_date_uc=patients.minimum_of(
"tmp_out_date_uc_snomed", "tmp_out_date_uc_hes", "tmp_out_date_uc_death",
),
## Celiac disease - snomed
tmp_out_date_celiac_snomed= patients.with_these_clinical_events(
celiac_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Celiac disease - hes
tmp_out_date_celiac_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=celiac_code_icd ,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_celiac_death=patients.with_these_codes_on_death_certificate(
celiac_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Celiac disease combined
out_date_celiac=patients.minimum_of(
"tmp_out_date_celiac_snomed", "tmp_out_date_celiac_hes", "tmp_out_date_celiac_death",
),
## Outcome group 4: Autoimmune GI / Inflammatory bowel disease
out_date_grp4_agi_ibd=patients.minimum_of(
"tmp_out_date_ibd_snomed", "tmp_out_date_ibd_ctv", "tmp_out_date_ibd_hes", "tmp_out_date_ibd_death",
"tmp_out_date_crohn_snomed", "tmp_out_date_crohn_hes", "tmp_out_date_crohn_death",
"tmp_out_date_uc_snomed", "tmp_out_date_uc_hes", "tmp_out_date_uc_death",
"tmp_out_date_celiac_snomed", "tmp_out_date_celiac_hes", "tmp_out_date_celiac_death",
#"out_date_crohn", "out_date_uc", "out_date_celiac"
),
##################################################################################################
## Outcome group 5: Thyroid diseases #
##################################################################################################
## Addison’s disease - primary care
tmp_out_date_addison_snomed= patients.with_these_clinical_events(
addison_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Addison’s disease - hes
tmp_out_date_addison_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=addison_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_addison_death=patients.with_these_codes_on_death_certificate(
addison_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Addison’s disease combined
out_date_addison=patients.minimum_of(
"tmp_out_date_addison_snomed", "tmp_out_date_addison_hes", "tmp_out_date_addison_death",
),
## Grave’s disease - primary care
tmp_out_date_grave_snomed= patients.with_these_clinical_events(
grave_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Grave’s disease - hes
tmp_out_date_grave_hes=patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=grave_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_grave_death=patients.with_these_codes_on_death_certificate(
grave_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Grave’s disease combined
out_date_grave=patients.minimum_of(
"tmp_out_date_grave_snomed", "tmp_out_date_grave_hes", "tmp_out_date_grave_death",
),
## Hashimoto’s thyroiditis - snomed
tmp_out_date_hashimoto_thyroiditis_snomed = patients.with_these_clinical_events(
hashimoto_thyroiditis_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Hashimoto’s thyroiditis - hes
tmp_out_date_hashimoto_thyroiditis_hes =patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses=hashimoto_thyroiditis_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_hashimoto_thyroiditis_death=patients.with_these_codes_on_death_certificate(
hashimoto_thyroiditis_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Hashimoto’s thyroiditis combined
out_date_hashimoto_thyroiditis=patients.minimum_of(
"tmp_out_date_hashimoto_thyroiditis_snomed", "tmp_out_date_hashimoto_thyroiditis_hes", "tmp_out_date_hashimoto_thyroiditis_death",
),
## Thyroid toxicosis / hyper thyroid - YW: This seems to have been taken out from the excel spreadsheet, 13/Dec/2022
## Outcome group 5: Thyroid diseases - to be expanded once the other outcome components are avilable
out_date_grp5_atv=patients.minimum_of(
"tmp_out_date_addison_snomed", "tmp_out_date_addison_hes","tmp_out_date_addison_death",
"tmp_out_date_grave_snomed", "tmp_out_date_grave_hes", "tmp_out_date_grave_death",
"tmp_out_date_hashimoto_thyroiditis_snomed", "tmp_out_date_hashimoto_thyroiditis_hes", "tmp_out_date_hashimoto_thyroiditis_death",
#"out_date_addison", "out_date_grave", "out_date_hashimoto_thyroiditis"
),
##################################################################################################
## Outcome group 6: Autoimmune vasculitis ##
##################################################################################################
## ANCA-associated - snomed
tmp_out_date_anca_snomed= patients.with_these_clinical_events(
anca_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## ANCA-associated - hes
tmp_out_date_anca_hes =patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses= anca_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_anca_death=patients.with_these_codes_on_death_certificate(
anca_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## ANCA-associated - combined
out_date_anca =patients.minimum_of(
"tmp_out_date_anca_snomed", "tmp_out_date_anca_hes", "tmp_out_date_anca_death",
),
## Giant cell arteritis - snomed
tmp_out_date_gca_snomed= patients.with_these_clinical_events(
gca_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Giant cell arteritis - hes
tmp_out_date_gca_hes =patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses= gca_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_gca_death=patients.with_these_codes_on_death_certificate(
gca_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Giant cell arteritis - combined
out_date_gca=patients.minimum_of(
"tmp_out_date_gca_snomed", "tmp_out_date_gca_hes", "tmp_out_date_gca_death",
),
## IgA (immunoglobulin A) vasculitis - snomed
tmp_out_date_iga_vasculitis_snomed= patients.with_these_clinical_events(
iga_vasculitis_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## IgA (immunoglobulin A) vasculitis - hes
tmp_out_date_iga_vasculitis_hes =patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses= iga_vasculitis_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_iga_vasculitis_death=patients.with_these_codes_on_death_certificate(
iga_vasculitis_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## IgA (immunoglobulin A) vasculitis - combined
out_date_iga_vasculitis=patients.minimum_of(
"tmp_out_date_iga_vasculitis_snomed", "tmp_out_date_iga_vasculitis_hes", "tmp_out_date_iga_vasculitis_death",
),
## Polymyalgia Rheumatica (PMR) - snomed
tmp_out_date_pmr_snomed= patients.with_these_clinical_events(
pmr_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Polymyalgia Rheumatica (PMR) - hes
tmp_out_date_pmr_hes =patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses= pmr_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_pmr_death=patients.with_these_codes_on_death_certificate(
pmr_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## IPolymyalgia Rheumatica (PMR) - combined
out_date_pmr=patients.minimum_of(
"tmp_out_date_pmr_snomed", "tmp_out_date_pmr_hes", "tmp_out_date_pmr_death",
),
## Outcome group 6: Autoimmune vasculitis - to be expanded once the other outcome components are avilable
out_date_grp6_trd=patients.minimum_of(
"tmp_out_date_anca_snomed", "tmp_out_date_anca_hes", "tmp_out_date_anca_death",
"tmp_out_date_gca_snomed", "tmp_out_date_gca_hes", "tmp_out_date_gca_death",
"tmp_out_date_iga_vasculitis_snomed", "tmp_out_date_iga_vasculitis_hes", "tmp_out_date_iga_vasculitis_death",
"tmp_out_date_pmr_snomed", "tmp_out_date_pmr_hes", "tmp_out_date_pmr_death",
#"out_date_anca", "out_date_gca","out_date_iga_vasculitis","out_date_pmr"
),
##################################################################################################
## Outcome group 7: Hematologic Diseases ##
##################################################################################################
## Immune thrombocytopenia (formerly known as idiopathic thrombocytopenic purpura) - snomed
tmp_out_date_immune_thromb_snomed= patients.with_these_clinical_events(
immune_thromb_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Immune thrombocytopenia (formerly known as idiopathic thrombocytopenic purpura) - hes
tmp_out_date_immune_thromb_hes =patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses= immune_thromb_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_immune_thromb_death=patients.with_these_codes_on_death_certificate(
immune_thromb_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# Immune thrombocytopenia (formerly known as idiopathic thrombocytopenic purpura) - combined
out_date_immune_thromb=patients.minimum_of(
"tmp_out_date_immune_thromb_snomed", "tmp_out_date_immune_thromb_hes", "tmp_out_date_immune_thromb_death",
),
## Pernicious anaemia - snomed
tmp_out_date_pernicious_anaemia_snomed= patients.with_these_clinical_events(
pernicious_anaemia_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Pernicious anaemia - hes
tmp_out_date_pernicious_anaemia_hes =patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses= pernicious_anaemia_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_pernicious_anaemia_death=patients.with_these_codes_on_death_certificate(
pernicious_anaemia_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Pernicious anaemia combined
out_date_pernicious_anaemia=patients.minimum_of(
"tmp_out_date_pernicious_anaemia_snomed", "tmp_out_date_pernicious_anaemia_hes", "tmp_out_date_pernicious_anaemia_death",
),
## Aplastic Anaemia - snomed
tmp_out_date_apa_snomed= patients.with_these_clinical_events(
apa_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Aplastic Anaemia - ctv3
tmp_out_date_apa_ctv= patients.with_these_clinical_events(
apa_code_ctv,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Aplastic Anaemia - hes
tmp_out_date_apa_hes =patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses= apa_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_apa_death=patients.with_these_codes_on_death_certificate(
apa_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Aplastic Anaemia combined
out_date_apa=patients.minimum_of(
"tmp_out_date_apa_snomed", "tmp_out_date_apa_ctv", "tmp_out_date_apa_hes", "tmp_out_date_apa_death",
),
## Autoimmune haemolytic anaemia - snomed
tmp_out_date_aha_snomed= patients.with_these_clinical_events(
aha_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Autoimmune haemolytic anaemia - hes
tmp_out_date_aha_hes =patients.admitted_to_hospital(
returning="date_admitted",
with_these_primary_diagnoses= aha_code_icd,
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_aha_death=patients.with_these_codes_on_death_certificate(
aha_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Autoimmune haemolytic anaemia combined
out_date_aha =patients.minimum_of(
"tmp_out_date_aha_snomed", "tmp_out_date_aha_hes", "tmp_out_date_aha_death",
),
## Outcome group 7: Hematologic Diseases - to be expanded once the other outcome components are avilable
out_date_grp7_htd=patients.minimum_of(
"tmp_out_date_immune_thromb_snomed", "tmp_out_date_immune_thromb_hes", "tmp_out_date_immune_thromb_death",
"tmp_out_date_pernicious_anaemia_snomed", "tmp_out_date_pernicious_anaemia_hes", "tmp_out_date_pernicious_anaemia_death",
"tmp_out_date_apa_snomed", "tmp_out_date_apa_ctv", "tmp_out_date_apa_hes", "tmp_out_date_apa_death",
"tmp_out_date_aha_snomed", "tmp_out_date_aha_hes", "tmp_out_date_aha_death",
#"out_date_immune_thromb", "out_date_pernicious_anaemia", "out_date_apa", "out_date_aha"
),
##################################################################################################
## Outcome group 8: Inflammatory neuromuscular disease ##
##################################################################################################
## Guillain Barre - snomed
tmp_out_date_glb_snomed= patients.with_these_clinical_events(
glb_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Guillain Barre - icd10
tmp_out_date_glb_hes= patients.admitted_to_hospital(
with_these_diagnoses=glb_code_icd,
returning="date_admitted",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_glb_death=patients.with_these_codes_on_death_certificate(
glb_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Guillain Barre combined
out_date_glb=patients.minimum_of(
"tmp_out_date_glb_snomed", "tmp_out_date_glb_hes", "tmp_out_date_glb_death",
),
## Multiple Sclerosis - snomed
tmp_out_date_multiple_sclerosis_snomed= patients.with_these_clinical_events(
multiple_sclerosis_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Multiple Sclerosis - hes
tmp_out_date_multiple_sclerosis_hes= patients.admitted_to_hospital(
with_these_diagnoses=multiple_sclerosis_code_icd,
returning="date_admitted",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_multiple_sclerosis_death=patients.with_these_codes_on_death_certificate(
multiple_sclerosis_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Multiple Sclerosis combined
out_date_multiple_sclerosis=patients.minimum_of(
"tmp_out_date_multiple_sclerosis_snomed", "tmp_out_date_multiple_sclerosis_hes", "tmp_out_date_multiple_sclerosis_death",
),
## Myasthenia gravis - snomed
tmp_out_date_myasthenia_gravis_snomed= patients.with_these_clinical_events(
myasthenia_gravis_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Myasthenia gravis - hes
tmp_out_date_myasthenia_gravis_hes= patients.admitted_to_hospital(
with_these_diagnoses=myasthenia_gravis_code_icd,
returning="date_admitted",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_myasthenia_gravis_death=patients.with_these_codes_on_death_certificate(
myasthenia_gravis_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Myasthenia gravis combined
out_date_myasthenia_gravis=patients.minimum_of(
"tmp_out_date_myasthenia_gravis_snomed", "tmp_out_date_myasthenia_gravis_hes", "tmp_out_date_myasthenia_gravis_death",
),
## Longitudinal myelitis - snomed
tmp_out_date_longit_myelitis_snomed= patients.with_these_clinical_events(
longit_myelitis_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Longitudinal myelitis - hes
tmp_out_date_longit_myelitis_hes= patients.admitted_to_hospital(
with_these_diagnoses=longit_myelitis_code_icd,
returning="date_admitted",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_longit_myelitis_death=patients.with_these_codes_on_death_certificate(
longit_myelitis_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Longitudinal myelitis combined
out_date_longit_myelitis=patients.minimum_of(
"tmp_out_date_longit_myelitis_snomed", "tmp_out_date_longit_myelitis_hes", "tmp_out_date_longit_myelitis_death",
),
## Clinically isolated syndrome - snomed
tmp_out_date_cis_snomed= patients.with_these_clinical_events(
cis_code_snomed,
returning="date",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Clinically isolated syndrome - hes
tmp_out_date_cis_hes= patients.admitted_to_hospital(
with_these_diagnoses=cis_code_icd,
returning="date_admitted",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
date_format="YYYY-MM-DD",
find_first_match_in_period=True,
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
# ONS
tmp_out_date_cis_death=patients.with_these_codes_on_death_certificate(
cis_code_icd,
returning="date_of_death",
between=[f"{index_date_variable}",f"{outcome_end_date_variable}"],
match_only_underlying_cause=True,
date_format="YYYY-MM-DD",
return_expectations={
"date": {"earliest": study_dates["pandemic_start"], "latest" : "today"},
"rate": "uniform",
"incidence": 0.3,
},
),
## Clinically isolated syndrome combined
out_date_cis=patients.minimum_of(
"tmp_out_date_cis_snomed", "tmp_out_date_cis_hes", "tmp_out_date_cis_death",
),
## Outcome group 8: Inflammatory neuromuscular disease - to be expanded once codelist for other outcome components are available
out_date_grp8_ind=patients.minimum_of(
"tmp_out_date_glb_snomed", "tmp_out_date_glb_hes", "tmp_out_date_glb_death",
"tmp_out_date_multiple_sclerosis_snomed", "tmp_out_date_multiple_sclerosis_hes", "tmp_out_date_multiple_sclerosis_death",
"tmp_out_date_myasthenia_gravis_snomed", "tmp_out_date_myasthenia_gravis_hes", "tmp_out_date_myasthenia_gravis_death",
"tmp_out_date_longit_myelitis_snomed", "tmp_out_date_longit_myelitis_hes", "tmp_out_date_longit_myelitis_death",
"tmp_out_date_cis_snomed", "tmp_out_date_cis_hes", "tmp_out_date_cis_death",
#"out_date_glb", "out_date_multiple_sclerosis","out_date_myasthenia_gravis","out_date_longit_myelitis", "out_date_cis"
),
# Define primary outcome: composite auto-immune outcome
# out_date_composite_ai=patients.minimum_of(
# "out_date_grp1_ifa", "out_date_grp2_ctd", "out_date_grp3_isd", "out_date_grp4_agi_ibd",
# "out_date_grp5_atv", "out_date_grp6_trd", "out_date_grp7_htd", "out_date_grp8_ind"
# ),
## Define primary outcome: composite auto-immune outcome
out_date_composite_ai=patients.minimum_of(
"tmp_out_date_ra_snomed", "tmp_out_date_ra_hes", "tmp_out_date_ra_death",
"out_date_undiff_eia",
#"tmp_out_date_pa_snomed",
"tmp_out_date_pa_hes", #"tmp_out_date_pa_death,"
#"tmp_out_date_axial_snomed",
"tmp_out_date_axial_hes", "tmp_out_date_axial_death",
"tmp_out_date_sle_snomed", "tmp_out_date_sle_hes", "tmp_out_date_sle_death",
"tmp_out_date_sjs_snomed", "tmp_out_date_sjs_hes", "tmp_out_date_sjs_death",
"tmp_out_date_sss_snomed", "tmp_out_date_sss_hes", "tmp_out_date_sss_death",
"tmp_out_date_im_snomed", "tmp_out_date_im_hes", "tmp_out_date_im_death",
"tmp_out_date_mctd_snomed", "tmp_out_date_mctd_hes", "tmp_out_date_mctd_death",
"out_date_as",
"tmp_out_date_psoriasis_ctv", "tmp_out_date_psoriasis_hes", "tmp_out_date_psoriasis_death",
"tmp_out_date_hs_ctv", "tmp_out_date_hs_hes", "tmp_out_date_hs_death",
"tmp_out_date_ibd_snomed", "tmp_out_date_ibd_ctv", "tmp_out_date_ibd_hes", "tmp_out_date_ibd_death",
"tmp_out_date_crohn_snomed", "tmp_out_date_crohn_hes", "tmp_out_date_crohn_death",
"tmp_out_date_uc_snomed", "tmp_out_date_uc_hes", "tmp_out_date_uc_death",
"tmp_out_date_celiac_snomed", "tmp_out_date_celiac_hes", "tmp_out_date_celiac_death",
"tmp_out_date_addison_snomed", "tmp_out_date_addison_hes","tmp_out_date_addison_death",
"tmp_out_date_grave_snomed", "tmp_out_date_grave_hes", "tmp_out_date_grave_death",
"tmp_out_date_hashimoto_thyroiditis_snomed", "tmp_out_date_hashimoto_thyroiditis_hes", "tmp_out_date_hashimoto_thyroiditis_death",
"tmp_out_date_anca_snomed", "tmp_out_date_anca_hes", "tmp_out_date_anca_death",
"tmp_out_date_gca_snomed", "tmp_out_date_gca_hes", "tmp_out_date_gca_death",
"tmp_out_date_iga_vasculitis_snomed", "tmp_out_date_iga_vasculitis_hes", "tmp_out_date_iga_vasculitis_death",
"tmp_out_date_pmr_snomed", "tmp_out_date_pmr_hes", "tmp_out_date_pmr_death",
"tmp_out_date_immune_thromb_snomed", "tmp_out_date_immune_thromb_hes", "tmp_out_date_immune_thromb_death",
"tmp_out_date_pernicious_anaemia_snomed", "tmp_out_date_pernicious_anaemia_hes", "tmp_out_date_pernicious_anaemia_death",
"tmp_out_date_apa_snomed", "tmp_out_date_apa_ctv", "tmp_out_date_apa_hes", "tmp_out_date_apa_death",
"tmp_out_date_aha_snomed", "tmp_out_date_aha_hes", "tmp_out_date_aha_death",
"tmp_out_date_glb_snomed", "tmp_out_date_glb_hes", "tmp_out_date_glb_death",
"tmp_out_date_multiple_sclerosis_snomed", "tmp_out_date_multiple_sclerosis_hes", "tmp_out_date_multiple_sclerosis_death",
"tmp_out_date_myasthenia_gravis_snomed", "tmp_out_date_myasthenia_gravis_hes", "tmp_out_date_myasthenia_gravis_death",
"tmp_out_date_longit_myelitis_snomed", "tmp_out_date_longit_myelitis_hes", "tmp_out_date_longit_myelitis_death",
"tmp_out_date_cis_snomed", "tmp_out_date_cis_hes", "tmp_out_date_cis_death",
#"out_date_grp1_ifa", "out_date_grp2_ctd", "out_date_grp3_isd", "out_date_grp4_agi_ibd",
#"out_date_grp5_atv", "out_date_grp6_trd", "out_date_grp7_htd", "out_date_grp8_ind"
),
)
return dynamic_variables
|
__author__ = 'admin'
import numpy as np
import pickle
from skimage.io import imread, imsave
from ProposalSizeFilter import ProposalSizeFilter
import scipy.io as sio
proposal_path = "/Users/admin/Desktop/NewExp/proposals.mat"
mat_cont = sio.loadmat(proposal_path)
proposals = mat_cont["res"]
proposals = proposals[0, :]
ProposalSizeFilter.initialize(80, 8000)
proposals = ProposalSizeFilter.filter(proposals)
p2 = proposals[1]
f = open("/Users/admin/Desktop/NewExp/dis_mat_cosine.pkl", "rb")
dis_mat = pickle.load(f)
img2_val = np.amin(dis_mat, axis=0)
img2_index = np.argsort(img2_val)
image = imread("/Users/admin/Desktop/NewExp/2.jpg")
for i in range(5):
proposal = p2[img2_index[i], :]
if proposal[2] >= image.shape[1]:
proposal[2] = image.shape[1] - 1
if proposal[3] >= image.shape[0]:
proposal[3] = image.shape[0] - 1
image[proposal[1]:proposal[3], proposal[0], :] = [0, 255, 0]
image[proposal[1]:proposal[3], proposal[2], :] = [0, 255, 0]
image[proposal[1], proposal[0]:proposal[2], :] = [0, 255, 0]
image[proposal[3], proposal[0]:proposal[2], :] = [0, 255, 0]
imsave("/Users/admin/Desktop/NewExp/5_5.jpg", image)
|
def fun(variable):
letters=['a','e','o','i','u']
if (variable in letters):
return True
else:
return False
sequence=['g','e','w','a','k','s','p','r']
filtered=filter(fun,variable)
print("The filtered letters are:")
for s in filtered:
print(s) |
"""
Plot illustrateive firing rate responses to step stimuli.
Created by Nirag Kadakia at 13:00 09-20-2018
This work is licensed under the
Creative Commons Attribution-NonCommercial-ShareAlike 4.0
International License.
To view a copy of this license,
visit http://creativecommons.org/licenses/by-nc-sa/4.0/.
"""
import scipy as sp
import sys
import os
import matplotlib.pyplot as plt
sys.path.append('../../shared_src')
from plot_formats import fig_tuning_curves_pulse, fig_tuning_curves_norm
from save_load_figure_data import save_fig
# The location of the source code for CS-variability-adaptation is listed
# in the ../../shared_src/local_methods file within src_dir()
from local_methods import src_dir
sys.path.append(src_dir())
from utils import get_flag
from load_specs import read_specs_file, compile_all_run_vars
from four_state_receptor_CS import four_state_receptor_CS
from encode_CS import single_encode_CS
dt = 1e-3
pulse_beg = 0.25
pulse_end = 0.75
pulse_height = 300
pulse_rnd = 0.005
wind_len = 1.0
num_odors = 20
data_flag = get_flag()
iter_var_idxs = list(map(int, sys.argv[2:]))
list_dict = read_specs_file(data_flag)
vars_to_pass = compile_all_run_vars(list_dict, iter_var_idxs)
obj = four_state_receptor_CS(**vars_to_pass)
obj.temporal_run = True
# Set signal manually: step from pulse_beg to pulse_end, rounded by pulse_rnd
Tt = sp.arange(0, wind_len, dt)
signal = pulse_height/2*(sp.tanh(((Tt - pulse_beg)/pulse_rnd)) + 1)*\
(1 - 0.5*(sp.tanh(((Tt - pulse_end)/pulse_rnd)) + 1))
signal += 1e-5
obj.signal_trace_Tt = Tt
obj.signal_trace = signal
# For each odor identity, generate time-traces of ORN responses
obj_arr = [[] for i in range(num_odors)]
Yy_arr = [[] for i in range(num_odors)]
for seed in range(num_odors):
# Set new odor identity and run
print (seed)
obj.seed_dSs = seed
obj_list = []
Yy_list = []
for iT in range(len(Tt)):
# Set estimation dSs values from signal trace -- mu_dSs must just
# be non-negative to get sparse_idxs
obj.mu_Ss0 = obj.signal_trace[iT]
obj.mu_dSs = obj.signal_trace[iT]*1e-5
obj.sigma_dSs = 0
# Encode / decode fully first time; then just update eps and responses
if iT == 0:
obj = single_encode_CS(obj, list_dict['run_specs'])
# Spread adaptation rates over the system if sigma is set
if obj.temporal_adaptation_rate_sigma != 0:
obj.set_ordered_temporal_adaptation_rate()
else:
obj.set_sparse_signals()
obj.set_temporal_adapted_epsilon()
obj.set_measured_activity()
Yy_list.append(obj.Yy)
Yy_arr[seed] = Yy_list
# Plot, for each receptor, all normalized responses
for iM in range(obj.Mm):
num_plots = 0
fig = fig_tuning_curves_norm()
for seed in range(num_odors):
Yy_list = Yy_arr[seed]
act = []
for Yy in Yy_list:
act.append(Yy[iM])
# Only can normalize if response isn't too low
if max(act) < 1:
continue
col_val = 0.2 + 0.7*seed/(num_odors - 1)
color=plt.cm.inferno(col_val)
lw = 2
plt.plot(Tt, act/max(act), color=color, lw=lw, alpha=0.7)
num_plots += 1
plt.ylim(-0.05, 1.1)
fig_name = '%s_int=%s_iM=%s_norm' % (iter_var_idxs, pulse_height, iM)
save_fig(fig_name, subdir=data_flag, clear_plot=True)
# Plot, for each receptor unnormalized responses
for iM in range(obj.Mm):
fig = fig_tuning_curves_pulse()
for seed in range(num_odors):
Yy_list = Yy_arr[seed]
act = []
for Yy in Yy_list:
act.append(Yy[iM])
col_val = 0 + 0.8*seed/(num_odors - 1)
lw = 2
color=plt.cm.inferno(col_val)
plt.plot(Tt, act, color=color, lw=lw, alpha=0.7)
plt.ylim(-5, 305)
fig_name = '%s_int=%s_iM=%s' % (iter_var_idxs, pulse_height, iM)
save_fig(fig_name, subdir=data_flag, clear_plot=True)
|
from app import application, engine
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import *
from flask import request, jsonify
from werkzeug.security import check_password_hash, generate_password_hash
from sqlalchemy.orm import Session
from flask_cors import CORS, cross_origin
CORS(application, support_credentials=True)
Base = automap_base()
Base.prepare(engine, reflect=True)
Accounts = Base.classes.account
Books = Base.classes.books
session = Session(engine)
metadata = MetaData(engine)
@application.route('/index')
@application.route('/')
def index():
return 'Welcome to this page'
@application.route('/register', methods=["GET", "POST"])
def register():
username = request.args.get('username')
email = request.args.get('email')
password = request.args.get('password')
password_hash = generate_password_hash(password)
account = Table('account', metadata, autoload=True)
engine.execute(account.insert(), username=username,
email=email, password=password_hash)
return jsonify({'user_added': True})
@application.route('/sign_in', methods=["GET", "POST"])
def sign_in():
username_entered = request.args.get('username')
password_entered = request.args.get('password')
user = session.query(Accounts).filter(or_(Accounts.username == username_entered, Accounts.email == username_entered)
).first()
if user is not None and check_password_hash(user.password, password_entered):
return jsonify({'signed_in': True})
return jsonify({'signed_in': False})
@application.route('/add_book', methods=["GET", "POST"])
def add_book():
isbn = request.args.get('isbn')
book_title = request.args.get('book_title')
book_author = request.args.get('book_author')
publication_year = request.args.get('publication_year')
image_url = request.args.get('image_url')
price = request.args.get('price')
books = Table('books', metadata, autoload=True)
engine.execute(books.insert(), isbn=isbn,
book_title=book_title, book_author=book_author, publication_year=publication_year,
image_url=image_url, price=price)
return jsonify({'book_added': True})
@application.route('/fetch_books', methods=["GET", "POST"])
def fetch_books():
books = session.query(Books).all()
books_list = []
for book in books:
books_list.append({
'isbn': book.isbn,
'book_title': book.book_title,
'book_author': book.book_author,
'publication_year': book.publication_year,
'image_url': book.image_url,
'price': book.price
})
return jsonify(books_list)
|
#1. Convert "8.8" to a float.
a = float("8.8")
print(a)
#2. Convert 8.8 to an integer (with rounding).
b = int(round(8.8))
print(b)
#3. Convert "8.8" to an integer (with rounding).
c = int(round(float('8.8')))
print(c)
#4. Convert 8.8 to a string.
d = str(8.8)
print(d)
#5. Convert 8 to a string.
e = str(8)
print(e)
#6. Convert 8 to a float.
f = float(8)
print(f)
#7. Convert 8 to a boolean.
g = bool(8)
print(g)
|
# coding=utf-8
import json
import os, sys
import hashlib
import hmac
import base64
import urllib
import time
import uuid
import requests
def get_iso8601_time():
'''返回iso8601格式的时间'''
TIME_ZONE = "GMT"
FORMAT_ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
return time.strftime(FORMAT_ISO8601, time.gmtime())
def get_uuid():
'''返回uuid'''
return str(uuid.uuid4())
def get_parameters(user_param, Action, AccessKeyId, Version):
'''
拼接参数字典
user_param: {"RegionId":"cn-beijing", "LoadBalancerName":"test-node1", "AddressType":"intranet", "VSwitchId":"vsw-2zevjlczuvp2mkhhch12x"}
Action操作例如:CreateLoadBalancer
AccessKeyId:access key ID
Version: 接口的版本
'''
parameters = {}
parameters['HTTPMethod'] = 'GET'
parameters['AccessKeyId'] = AccessKeyId
parameters['Format'] = 'json'
parameters['Version'] = Version
parameters['SignatureMethod'] = 'HMAC-SHA1'
parameters['Timestamp'] = get_iso8601_time()
parameters['SignatureVersion'] = '1.0'
parameters['SignatureNonce'] = get_uuid()
parameters['Action'] = Action
for (k, v) in sorted(user_param.items()):
parameters[k] = v
return parameters
def get_param(parameters):
'''把公共参数拼接成字符串'''
param_str = ''
for (k, v) in sorted(parameters.items()):
param_str += "&" + urllib.quote(k, safe='') + "=" + urllib.quote(v, safe='')
param_str = param_str[1:]
return param_str
def get_StringToSign(parameters, param_str):
'''拼接生成签名的字符串'''
StringToSign = parameters['HTTPMethod'] + "&%2F&" + urllib.quote(param_str, safe='')
return StringToSign
def get_signature(StringToSign, AccessKeySecret):
'''构建签名'''
h = hmac.new(AccessKeySecret, StringToSign, hashlib.sha1)
signature = base64.encodestring(h.digest()).strip()
return signature
def build_request(server_url, param_str, signature, AccessKeySecret):
'''拼接url并进行请求'''
Signature = "Signature=" + urllib.quote(signature)
param = param_str + "&" + Signature
request_url = server_url + param
s = requests.get(request_url)
print s.content
print s
return s
def get_regions(server_url, Action, user_param, AccessKeySecret, AccessKeyId, Version):
'''对请求进行模块
server_url: slb.aliyun.com
Action = 'DescribeRegions'
AccessKeySecret, AccessKeyId:也就是ak
user_param = {'LoadBalancerId': 'lb-2zekxu2elibyexxoo9hlw'}
Version:例如slb的版本是2014-05-15,每个服务都不相同
'''
server_url = 'https://' + server_url + '/?'
AccessKeySecret = AccessKeySecret
AccessKeyId = AccessKeyId
parameters = get_parameters(user_param, Action, AccessKeyId, Version)
param_str = get_param(parameters)
StringToSign = get_StringToSign(parameters, param_str)
signature = get_signature(StringToSign, AccessKeySecret + '&')
Message = build_request(server_url, param_str, signature, AccessKeySecret)
return Message
def describe_VServerGroups():
Action = 'DescribeVServerGroups'
user_param = {'RegionId': 'cn-shenzhen', #阿里云SLB所在的区域
'LoadBalancerId': 'lb-wz9***q5hg03lk', #阿里云SLB的ID
'IncludeRule': 'true',
'IncludeListener': 'true'
}
server_url = 'slb.aliyuncs.com'
Version = '2014-05-15'
AccessKeySecret='********' #阿里云的AK Secret
AccessKeyId='*********' #阿里云的AK ID
message = get_regions(server_url, Action, user_param, AccessKeySecret, AccessKeyId, Version)
print "======================%s" %message
print json.dumps(message.content)
def modify_VServerGroupBackendServers(OldBackendServers,NewBackendServers):
Action = 'ModifyVServerGroupBackendServers'
user_param = {'RegionId': 'cn-shenzhen',
'VServerGroupId': 'rsp-wz9n8*****rdtg', #虚拟服务器组的ID
'OldBackendServers': OldBackendServers,
'NewBackendServers': NewBackendServers,
}
server_url = 'slb.aliyuncs.com'
Version = '2014-05-15'
AccessKeySecret='********' #阿里云的AK Secret
AccessKeyId='*********' #阿里云的AK ID
message = get_regions(server_url, Action, user_param, AccessKeySecret, AccessKeyId, Version)
print "======================%s" %message
print json.dumps(message.content)
|
# -*- coding: utf-8 -*-
import numpy as np
import argparse
import os
def maxmin(initial, final, basename, sk, res):
maxdens,mindens=0,0
for i in range(initial,final):
path=basename+'%05d'%(i,)
data=np.genfromtxt(path,skip_header=sk)
density=data[::res,-2]
mindens_temp = np.amin(density)
maxdens_temp = np.amax(density)
if i==initial:
maxdens=maxdens_temp
mindens=mindens_temp
else:
if mindens_temp<mindens:
mindens=mindens_temp
if maxdens_temp>maxdens:
maxdens=maxdens_temp
print "Iteration n°",i,"of",final-1,"Completed."
return (maxdens,mindens)
# serve per scrivere il file maxmin.dat o, se è già presente, per leggerlo e ritornare i valori
def gen_maxmin_dat(initial, final, basename, sk, res):
if '/' in basename: # questo ciclo serve per prendere il path giusto per il maxmin.dat
b = basename.split('/')
b[-1] = 'maxmin.dat'
n = '/'.join(b)
else:
n = 'maxmin.dat'
if os.path.isfile(n):
print "File maxmin.dat found."
with open(n,'r') as f:
dat = f.readlines()
print "Initial: {}Final: {}Basename: {}Resolution: {}Maxdens: {}Mindens: {}".format(dat[0], dat[1], dat[2], dat[3], dat[4], dat[5])
gen = raw_input("Generate new maxmin.dat file or use current one? [new/current]\n---> ")
if gen == 'current' or gen=='c':
with open(n,'r') as f:
data = f.readlines()
maxdens = data[-2].rstrip()
mindens = data[-1].rstrip()
return (maxdens, mindens)
print "Generating a new maxmin.dat"
maxdens, mindens = maxmin(initial, final, basename, sk, res)
data = [initial, final-1, basename, res, maxdens, mindens]
with open(n,'w') as f:
for e in data:
f.write(str(e)+'\n')
return (maxdens,mindens)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('initial', type=int)
parser.add_argument('final', type=int)
parser.add_argument('basename', type=str)
parser.add_argument('skip_header', type=int)
parser.add_argument('res', type=int)
args = parser.parse_args()
maxdens, mindens = maxmin(args.initial, args.final, args.basename, args.skip_header, args.res)
print maxdens, mindens
|
from GestionFichiers import *
from FonctionsEssaisVitesseLineaire import *
rho_max = 250
v_max = 130 / 3.6
longueurSegment = 100000
dureeExperience = 900
facteur = 1 + 1e-2
nbPointsEspace = 25000
deltaX = longueurSegment / nbPointsEspace
deltaT = deltaX * (1 / (facteur * v_max))
nbPointsTemps = int(dureeExperience / deltaT)
rho = [rho0(i * deltaX, longueurSegment) for i in range(nbPointsEspace)]
nettoyage("results/essaisVitesseLineaire/config/")
nettoyage("results/essaisVitesseLineaire/calculs/")
ecrireParametresDansUnFichier("results/essaisVitesseLineaire/config/config.txt", longueurSegment, dureeExperience, nbPointsEspace, deltaX, deltaT, nbPointsTemps, v_max, rho_max, facteur)
ecrireListeDansUnFichier(rho, "results/essaisVitesseLineaire/calculs/0.txt")
temps = 0
for i in range(1, nbPointsTemps):
temps = temps + deltaT
rho = calcul(rho, deltaT, deltaX, v_max, rho_max)
ecrireListeDansUnFichier(rho, "results/essaisVitesseLineaire/calculs/" + str(i) + ".txt")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.